diff --git a/.coderabbit.yml b/.coderabbit.yml deleted file mode 100644 index 17855753fc3..00000000000 --- a/.coderabbit.yml +++ /dev/null @@ -1,35 +0,0 @@ -language: "en" -early_access: false -reviews: - request_changes_workflow: false - high_level_summary: true - poem: false - review_status: false - collapse_walkthrough: true - path_filters: - - "!**/*.pb.go" - - "!**/*.pb.gw.go" - - "!**/*.mod" - - "!**/*.sum" - path_instructions: - - path: "**/*.go" - instructions: "Review the Golang code for conformity with the Uber Golang style guide, highlighting any deviations." - - path: "e2e/**/*" - instructions: | - "Assess the integration and e2e test code assessing sufficient code coverage for the changes associated in the pull request" - - path: "**/*_test.go" - instructions: | - "Assess the unit test code assessing sufficient code coverage for the changes associated in the pull request" - - path: "**/*.md" - instructions: | - "Assess the documentation for misspellings, grammatical errors, missing documentation and correctness" - auto_review: - enabled: false - ignore_title_keywords: - - "WIP" - - "DO NOT MERGE" - drafts: false - base_branches: - - "main" -chat: - auto_reply: true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 7973db43841..00000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# CODEOWNERS: https://help.github.com/articles/about-codeowners/ - -* @AdityaSripal @gjermundgaraba @srdtrk -/docs/ @womensrights - -# Our appreciation and gratitude to past code owners: @fedekunze @seantking @tmsdkeys @charleenfei @crodriguezvega @colin-axner @chatton @bznein @DimitrisJim @damiannolan diff --git a/.github/dependabot.yml b/.github/dependabot.yml index cfee720a20e..20da0f73a8e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,33 +7,12 @@ updates: open-pull-requests-limit: 10 - package-ecosystem: gomod - directory: "/" - schedule: - interval: daily - open-pull-requests-limit: 10 - labels: - - dependencies - - - package-ecosystem: gomod - directory: "/e2e" + directories: + - "/" + - "/modules/light-clients/08-wasm" + - "/e2e" + - "/simapp" schedule: interval: daily - open-pull-requests-limit: 10 - labels: - - dependencies - - - package-ecosystem: gomod - directory: "/modules/light-clients/08-wasm" - schedule: - interval: daily - open-pull-requests-limit: 10 - labels: - - dependencies - - - package-ecosystem: gomod - directory: "/simapp" - schedule: - interval: daily - open-pull-requests-limit: 10 labels: - dependencies diff --git a/.github/mergify.yml b/.github/mergify.yml index d22fe47f228..a622909bf48 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -14,38 +14,6 @@ queue_rules: merge_method: squash pull_request_rules: - - name: backport patches to v0.2.x callbacks ibc-go v7.3.x branch - conditions: - - base=main - - label=backport-callbacks-to-v0.2.x+ibc-go-v7.3.x - actions: - backport: - branches: - - callbacks/release/v0.2.x+ibc-go-v7.3.x - - name: backport patches to v0.2.x callbacks ibc-go v8.0.x branch - conditions: - - base=main - - label=backport-callbacks-to-v0.2.x+ibc-go-v8.0.x - actions: - backport: - branches: - - callbacks/release/v0.2.x+ibc-go-v8.0.x - - name: backport patches to v0.4.x wasm ibc-go v7.4.x & wasmvm 1.5.x branch - conditions: - - base=main - - label=backport-wasm-v0.4.x+ibc-go-v7.4.x-wasmvm-v1.5.x - actions: - backport: - branches: - - 08-wasm/release/v0.4.x+ibc-go-v7.4.x-wasmvm-v1.5.x - - name: backport patches to v0.5.x wasm ibc-go v8.4.x & wasmvm 2.1.x branch - conditions: - - base=main - - label=backport-wasm-v0.5.x+ibc-go-v8.4.x-wasmvm-v2.1.x - actions: - backport: - branches: - - 08-wasm/release/v0.5.x+ibc-go-v8.4.x-wasmvm-v2.1.x - name: backport patches to v7.10.x branch conditions: - base=main @@ -54,30 +22,22 @@ pull_request_rules: backport: branches: - release/v7.10.x - - name: backport patches to v8.7.x branch - conditions: - - base=main - - label=backport-to-v8.7.x - actions: - backport: - branches: - - release/v8.7.x - - name: backport patches to v10.2.x branch + - name: backport patches to v8.8.x branch conditions: - base=main - - label=backport-to-v10.2.x + - label=backport-to-v8.8.x actions: backport: branches: - - release/v10.2.x - - name: backport patches to v10.3.x branch + - release/v8.8.x + - name: backport patches to v10.4.x branch conditions: - base=main - - label=backport-to-v10.3.x + - label=backport-to-v10.4.x actions: backport: branches: - - release/v10.3.x + - release/v10.4.x - name: automerge to main with label automerge and branch protection passing conditions: [] actions: diff --git a/.github/workflows/build-simd-image-from-tag.yml b/.github/workflows/build-simd-image-from-tag.yml index fc32653394f..7d43f2c2ad9 100644 --- a/.github/workflows/build-simd-image-from-tag.yml +++ b/.github/workflows/build-simd-image-from-tag.yml @@ -19,17 +19,17 @@ env: jobs: build-image-at-tag: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 permissions: packages: write contents: read steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: "${{ env.GIT_TAG }}" fetch-depth: 0 - name: Log in to the Container registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} diff --git a/.github/workflows/build-wasm-simd-image-from-tag.yml b/.github/workflows/build-wasm-simd-image-from-tag.yml index 72173a3a642..247800c9ada 100644 --- a/.github/workflows/build-wasm-simd-image-from-tag.yml +++ b/.github/workflows/build-wasm-simd-image-from-tag.yml @@ -28,13 +28,13 @@ jobs: platform: linux/arm64 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: ref: "${{ env.GIT_TAG }}" fetch-depth: 0 # TODO: #7885 Get rid of this script, it is super unecessary and can probably be done in the Dockerfile or a bash script - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: '3.10' - name: Install dependencies @@ -46,7 +46,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Log in to the Container registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -76,7 +76,7 @@ jobs: retention-days: 1 merge: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 permissions: packages: write contents: read @@ -84,7 +84,7 @@ jobs: - build-image-at-tag steps: - name: Download digests - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: path: ${{ runner.temp }}/digests pattern: digests-* @@ -96,7 +96,7 @@ jobs: run: echo "DOCKER_TAG=$(echo $GIT_TAG | sed 's/[^a-zA-Z0-9\.]/-/g')" >> $GITHUB_ENV - name: Log in to the Container registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b2fa896f5c0..304c0aa7f15 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -23,7 +23,7 @@ on: jobs: analyze: name: Analyze - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 permissions: actions: read contents: read @@ -39,7 +39,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 - uses: technote-space/get-diff-action@v6.1.2 with: PATTERNS: | diff --git a/.github/workflows/dependabot-tidy.yml b/.github/workflows/dependabot-tidy.yml new file mode 100644 index 00000000000..8e1ebf5b237 --- /dev/null +++ b/.github/workflows/dependabot-tidy.yml @@ -0,0 +1,47 @@ +name: Dependabot Tidy + +on: + pull_request: + types: + - opened + - synchronize + +jobs: + tidy: + if: github.actor == 'dependabot[bot]' + permissions: + contents: write + pull-requests: write + runs-on: depot-ubuntu-24.04-4 + steps: + - name: Check out PR + uses: actions/checkout@v5 + with: + ref: ${{ github.event.pull_request.head.ref }} + token: ${{ secrets.DEPENDABOT_PUSH_PAT }} + fetch-depth: 0 # otherwise, there would be errors pushing refs to the destination repository. + + - name: Run make tidy-all + run: make tidy-all + + - name: Commit changes if any + id: commit + run: | + if ! git diff --quiet; then + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add . + git commit -m "chore: run make tidy-all" + echo "changed=true" >> $GITHUB_OUTPUT + else + echo "No changes after make tidy-all" + echo "changed=false" >> $GITHUB_OUTPUT + fi + + - name: Push changes if any + if: ${{ steps.commit.outputs.changed == 'true' }} + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.DEPENDABOT_PUSH_PAT }} + branch: ${{ github.event.pull_request.head.ref }} + force_with_lease: true diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3f74a130d9d..6bd9ef8d861 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -6,6 +6,7 @@ on: push: branches: - main + - release/v* paths: - '.github/workflows/docker.yml' - '**.go' @@ -16,43 +17,54 @@ env: jobs: docker-build: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 permissions: packages: write contents: read steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 + uses: docker/metadata-action@v5 with: images: ${{ env.REGISTRY }}/cosmos/${{ env.IMAGE_NAME }} + - name: Compute release branch tag + id: reltag + if: startsWith(github.ref_name, 'release/v') + shell: bash + run: | + RAW="${GITHUB_REF_NAME}" + SANITIZED="${RAW//\//-}" + echo "full_tag=${{ env.REGISTRY }}/cosmos/${{ env.IMAGE_NAME }}:branch-${SANITIZED}" >> "$GITHUB_OUTPUT" + - name: Build Docker image - uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 + uses: docker/build-push-action@v6 with: context: . tags: ${{ steps.meta.outputs.tags }} build-args: | - IBC_GO_VERSION=main + IBC_GO_VERSION=${{ github.ref_name }} - name: Test simd is runnable run: | docker run --rm ${{ steps.meta.outputs.tags }} - name: Log in to the Container registry - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Push Docker image - uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 + uses: docker/build-push-action@v6 with: context: . push: true - tags: ${{ steps.meta.outputs.tags }} + tags: | + ${{ steps.meta.outputs.tags }} + ${{ steps.reltag.outputs.full_tag }} build-args: | - IBC_GO_VERSION=main + IBC_GO_VERSION=${{ github.ref_name }} diff --git a/.github/workflows/docs-check.yml b/.github/workflows/docs-check.yml index 0b90a0565ab..56982b0c329 100644 --- a/.github/workflows/docs-check.yml +++ b/.github/workflows/docs-check.yml @@ -10,10 +10,10 @@ on: jobs: build: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@v5 + - uses: actions/setup-node@v5 with: node-version: 22 cache: npm @@ -25,12 +25,12 @@ jobs: run: cd docs && npm run build lint: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - - uses: DavidAnson/markdownlint-cli2-action@v19 + - uses: DavidAnson/markdownlint-cli2-action@v20 with: globs: ./docs/docs/**/*.md diff --git a/.github/workflows/docs-deploy.yml b/.github/workflows/docs-deploy.yml index 4f380957516..1722ed10e73 100644 --- a/.github/workflows/docs-deploy.yml +++ b/.github/workflows/docs-deploy.yml @@ -14,10 +14,10 @@ on: jobs: deploy: name: Deploy to GitHub Pages - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-node@v4 + - uses: actions/checkout@v5 + - uses: actions/setup-node@v5 with: node-version: 22 cache: npm diff --git a/.github/workflows/e2e-compatibility-workflow-call.yaml b/.github/workflows/e2e-compatibility-workflow-call.yaml index 2e459ed7614..a52da8c08bc 100644 --- a/.github/workflows/e2e-compatibility-workflow-call.yaml +++ b/.github/workflows/e2e-compatibility-workflow-call.yaml @@ -19,10 +19,10 @@ jobs: load-test-matrix: outputs: test-matrix: ${{ steps.set-test-matrix.outputs.test-matrix }} - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-python@v6 with: python-version: '3.10' - run: pip install -r requirements.txt @@ -34,7 +34,7 @@ jobs: id: set-test-matrix e2e: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 needs: load-test-matrix # this job is skipped if the test-matrix generated is empty. i.e. if the file was not present. # this allows us to not have to handle special case versions which may not have certain tests run against them. @@ -44,12 +44,12 @@ jobs: matrix: ${{ fromJSON(needs.load-test-matrix.outputs.test-matrix) }} steps: - name: Checkout the ibc-go repo - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: repository: cosmos/ibc-go - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' cache-dependency-path: 'e2e/go.sum' - name: Run e2e Test run: | diff --git a/.github/workflows/e2e-compatibility.yaml b/.github/workflows/e2e-compatibility.yaml index 9ab9912d157..f51881f7100 100644 --- a/.github/workflows/e2e-compatibility.yaml +++ b/.github/workflows/e2e-compatibility.yaml @@ -15,7 +15,8 @@ on: options: - release/v7.10.x - release/v8.7.x - - release/v10.3.x + - release/v8.8.x + - release/v10.4.x - main ibc-go-version: description: 'The version of ibc-go that is going to be released' @@ -31,7 +32,7 @@ env: jobs: determine-image-tag: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 outputs: release-version: ${{ steps.set-release-version.outputs.release-version }} steps: @@ -45,7 +46,7 @@ jobs: # build-release-images builds all docker images that are relevant for the compatibility tests. If a single release # branch is specified, only that image will be built, e.g. release-v6.0.x. build-release-images: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 permissions: packages: write contents: read @@ -53,18 +54,18 @@ jobs: matrix: release-branch: - release/v7.10.x - - release/v8.7.x - - release/v10.3.x + - release/v8.8.x + - release/v10.4.x - main steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 if: env.RELEASE_BRANCH == matrix.release-branch with: ref: "${{ matrix.release-branch }}" fetch-depth: 0 - name: Log in to the Container registry if: env.RELEASE_BRANCH == matrix.release-branch - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -128,15 +129,6 @@ jobs: test-file: "e2e/tests/interchain_accounts/gov_test.go" release-version: "${{ needs.determine-image-tag.outputs.release-version }}" - ica-groups-test: - needs: - - build-release-images - - determine-image-tag - uses: ./.github/workflows/e2e-compatibility-workflow-call.yaml - with: - test-file: "e2e/tests/interchain_accounts/groups_test.go" - release-version: "${{ needs.determine-image-tag.outputs.release-version }}" - ica-localhost-test: needs: - build-release-images diff --git a/.github/workflows/e2e-test-workflow-call.yml b/.github/workflows/e2e-test-workflow-call.yml deleted file mode 100644 index 7d8b2e643cb..00000000000 --- a/.github/workflows/e2e-test-workflow-call.yml +++ /dev/null @@ -1,279 +0,0 @@ -on: - workflow_call: - inputs: - test-entry-point: - description: 'Test entry point' - required: false - type: string - default: '' # empty string means run all tests - temp-run-full-suite: - description: 'This flag exists to run a hard coded set of tests and will be phased out' - required: false - type: boolean - default: false - test: - description: 'test name to run as standalone' - required: false - type: string - default: '' - test-exclusions: - description: 'Comma separated list of tests to skip' - required: false - type: string - default: '' # empty string means don't skip any test. - chain-image: - description: 'The image to use for chains' - required: false - type: string - default: 'ghcr.io/cosmos/ibc-go-simd' - chain-a-tag: - description: 'The tag to use for chain A' - required: true - type: string - default: main - chain-b-tag: - default: main - description: 'The tag to use for chain B' - required: true - type: string - # upgrade-plan-name is only required during upgrade tests, and is otherwise ignored. - upgrade-plan-name: - default: '' - description: 'The upgrade plan name' - required: false - type: string - build-and-push-docker-image: - description: 'Flag to specify if the docker image should be built and pushed beforehand' - required: false - type: boolean - default: false - build-and-push-docker-image-wasm: - description: 'Flag to specify if the wasm docker image should be built and pushed beforehand' - required: false - type: boolean - default: false - upload-logs: - description: 'Specify flag to indicate that logs should be uploaded on failure' - required: false - type: boolean - default: false - e2e-config-path: - description: 'Specify relative or absolute path of config file for test' - required: false - type: string - default: 'ci-e2e-config.yaml' - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ibc-go-simd - IMAGE_NAME_WASM: ibc-go-wasm-simd - -jobs: - # test-details exists to provide an easy way to see the inputs for the e2e test. - test-details: - runs-on: depot-ubuntu-22.04-4 - steps: - - name: Display Inputs - run: | - echo "Chain Image: ${{ inputs.chain-image }}" - echo "Chain A Tag: ${{ inputs.chain-a-tag }}" - echo "Chain B Tag: ${{ inputs.chain-b-tag }}" - echo "Upgrade Plan Name: ${{ inputs.upgrade-plan-name }}" - echo "Test Entry Point: ${{ inputs.test-entry-point }}" - echo "Test: ${{ inputs.test }}" - echo "Github Ref Name: ${{ github.ref_name }}" - - # we skip individual steps rather than the full job as e2e-tests will not run if this task - # is skipped. But will run if every individual task is skipped. There is no current way of conditionally needing - # a job. - docker-build: - runs-on: depot-ubuntu-22.04-4 - steps: - - uses: actions/checkout@v4 - if: ${{ inputs.build-and-push-docker-image }} - - name: Log in to the Container registry - if: ${{ inputs.build-and-push-docker-image }} - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - if: ${{ inputs.build-and-push-docker-image }} - id: meta - uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 - with: - images: ${{ env.REGISTRY }}/cosmos/${{ env.IMAGE_NAME }} - - - name: Build and push Docker image - if: ${{ inputs.build-and-push-docker-image }} - uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 - with: - context: . - push: true - tags: ${{ steps.meta.outputs.tags }} - build-args: | - IBC_GO_VERSION=${{ github.ref_name }} - - docker-build-wasm: - runs-on: depot-ubuntu-22.04-4 - steps: - - uses: actions/checkout@v4 - if: ${{ inputs.build-and-push-docker-image-wasm }} - - - uses: actions/setup-python@v5 - if: ${{ inputs.build-and-push-docker-image-wasm }} - with: - python-version: '3.10' - - - name: Install dependencies - if: ${{ inputs.build-and-push-docker-image-wasm }} - run: make python-install-deps - - - name: Determine Build arguments - if: ${{ inputs.build-and-push-docker-image-wasm }} - id: build-args - run: | - echo "version=$(scripts/get-libwasm-version.py --get-version)" >> $GITHUB_OUTPUT - echo "checksum=$(scripts/get-libwasm-version.py --get-checksum)" >> $GITHUB_OUTPUT - - - name: Log in to the Container registry - if: ${{ inputs.build-and-push-docker-image-wasm }} - uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - if: ${{ inputs.build-and-push-docker-image-wasm }} - id: meta - uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 - with: - images: ${{ env.REGISTRY }}/cosmos/${{ env.IMAGE_NAME_WASM }} - - - name: Build and push Docker image - if: ${{ inputs.build-and-push-docker-image-wasm }} - uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 - with: - context: . - push: true - tags: ${{ steps.meta.outputs.tags }} - file: modules/light-clients/08-wasm/Dockerfile - build-args: | - LIBWASM_VERSION=${{ steps.build-args.outputs.version }} - LIBWASM_CHECKSUM=${{ steps.build-args.outputs.checksum }} - - - # dynamically build a matrix of test/test suite pairs to run. - # this job runs a go tool located at cmd/build_test_matrix/main.go. - # it walks the e2e/test directory in order to locate all test suite / test name - # pairs. The output of this job can be fed in as input to a workflow matrix and - # will expand to jobs which will run all tests present. - build-test-matrix: - runs-on: depot-ubuntu-22.04-4 - outputs: - matrix: ${{ steps.set-matrix.outputs.matrix }} - steps: - - uses: actions/checkout@v4 - with: - repository: cosmos/ibc-go - - uses: actions/setup-go@v5 - with: - go-version: '1.23' - - id: set-matrix - run: | - output=$(go run cmd/build_test_matrix/main.go) - echo "matrix=$output" >> $GITHUB_OUTPUT - env: - TEST_ENTRYPOINT: '${{ inputs.test-entry-point }}' - TEST_EXCLUSIONS: '${{ inputs.test-exclusions }}' - TEST_NAME: '${{ inputs.test }}' - - # e2e-tests runs the actual go test command to trigger the test. - # the tests themselves are configured via environment variables to specify - # things like chain and relayer images and tags. - e2e-tests: - runs-on: depot-ubuntu-22.04-4 - needs: - - build-test-matrix - - docker-build - - docker-build-wasm - env: - CHAIN_IMAGE: '${{ inputs.chain-image }}' - CHAIN_UPGRADE_PLAN: '${{ inputs.upgrade-plan-name }}' - CHAIN_A_TAG: '${{ inputs.chain-a-tag }}' - CHAIN_B_TAG: '${{ inputs.chain-b-tag }}' - E2E_CONFIG_PATH: '${{ inputs.e2e-config-path }}' - strategy: - fail-fast: false - matrix: ${{ fromJSON(needs.build-test-matrix.outputs.matrix) }} - steps: - - uses: actions/checkout@v4 - with: - repository: cosmos/ibc-go - - uses: actions/setup-go@v5 - with: - go-version: '1.23' - cache-dependency-path: 'e2e/go.sum' - - name: Run e2e Test - id: e2e_test - run: | - cd e2e - make e2e-test test=${{ matrix.test }} - - name: Upload Diagnostics - uses: actions/upload-artifact@v4 - if: ${{ failure() && inputs.upload-logs }} - continue-on-error: true - with: - name: '${{ matrix.entrypoint }}-${{ matrix.test }}' - path: e2e/diagnostics - retention-days: 5 - - e2e-test-suites: - # temporary flag. eventually this field will not exist and this will be the default. - if: ${{ inputs.temp-run-full-suite }} - runs-on: depot-ubuntu-22.04-4 - needs: - - build-test-matrix - - docker-build - - docker-build-wasm - env: - CHAIN_IMAGE: '${{ inputs.chain-image }}' - CHAIN_A_TAG: '${{ inputs.chain-a-tag }}' - CHAIN_B_TAG: '${{ inputs.chain-b-tag }}' - E2E_CONFIG_PATH: '${{ inputs.e2e-config-path }}' - strategy: - fail-fast: false - matrix: - include: - # for now we explicitly specify this test suite. - - entrypoint: TestTransferTestSuite - - entrypoint: TestAuthzTransferTestSuite - - entrypoint: TestTransferTestSuiteSendReceive - - entrypoint: TestTransferTestSuiteSendEnabled - - entrypoint: TestTransferLocalhostTestSuite - - entrypoint: TestConnectionTestSuite - - entrypoint: TestInterchainAccountsGovTestSuite - steps: - - uses: actions/checkout@v4 - with: - repository: cosmos/ibc-go - - uses: actions/setup-go@v5 - with: - go-version: '1.23' - cache-dependency-path: 'e2e/go.sum' - - name: Run e2e Test - id: e2e_test - run: | - cd e2e - make e2e-suite entrypoint=${{ matrix.entrypoint }} - - name: Upload Diagnostics - uses: actions/upload-artifact@v4 - if: ${{ failure() && inputs.upload-logs }} - continue-on-error: true - with: - name: '${{ matrix.entrypoint }}-${{ matrix.test }}' - path: e2e/diagnostics - retention-days: 5 diff --git a/.github/workflows/e2e-upgrade.yaml b/.github/workflows/e2e-upgrade.yaml index 610362e1b86..5886ff7f4c1 100644 --- a/.github/workflows/e2e-upgrade.yaml +++ b/.github/workflows/e2e-upgrade.yaml @@ -17,8 +17,7 @@ env: jobs: e2e-upgrade-tests: - runs-on: depot-ubuntu-22.04-4 - needs: + runs-on: depot-ubuntu-24.04-4 strategy: fail-fast: false matrix: @@ -55,12 +54,12 @@ jobs: }, ] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: repository: cosmos/ibc-go - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' cache-dependency-path: 'e2e/go.sum' - name: Run e2e Test id: e2e_test @@ -68,16 +67,18 @@ jobs: CHAIN_IMAGE: '${{ env.DOCKER_IMAGE_NAME }}' CHAIN_A_TAG: '${{ matrix.test-config.tag }}' CHAIN_B_TAG: '${{ matrix.test-config.tag }}' + CHAIN_C_TAG: '${{ matrix.test-config.tag }}' + CHAIN_D_TAG: '${{ matrix.test-config.tag }}' CHAIN_UPGRADE_PLAN: '${{ matrix.test-config.upgrade-plan }}' E2E_CONFIG_PATH: 'ci-e2e-config.yaml' run: | cd e2e - make e2e-test test=${{ matrix.test }} + make e2e-test test=${{ matrix.test-config.test }} - name: Upload Diagnostics uses: actions/upload-artifact@v4 if: ${{ failure() }} continue-on-error: true with: - name: '${{ matrix.entrypoint }}-${{ matrix.test }}' + name: '${{ matrix.test-config.entrypoint }}-${{ matrix.test-config.test }}' path: e2e/diagnostics retention-days: 5 diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 5362c431b76..d76933279f0 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -3,6 +3,7 @@ on: workflow_dispatch: pull_request: paths: + - '**/go.mod' - '**/*.go' - '.github/workflows/e2e.yaml' concurrency: @@ -13,66 +14,58 @@ env: DOCKER_IMAGE_NAME: ghcr.io/cosmos/ibc-go-simd jobs: - determine-image-tag: - runs-on: depot-ubuntu-22.04-4 + docker-build: + runs-on: depot-ubuntu-24.04-4 outputs: simd-tag: ${{ steps.get-tag.outputs.simd-tag }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 with: - go-version: '1.23' + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - id: get-tag run: | if [ -z "${{ github.event.pull_request.number }}" ] then - echo "simd-tag=main" >> $GITHUB_OUTPUT + echo "simd-tag=e2e-${{ github.sha }}" >> $GITHUB_OUTPUT else - tag="pr-${{ github.event.pull_request.number }}" + tag="e2e-pr-${{ github.event.pull_request.number }}" echo "Using tag $tag" echo "simd-tag=$tag" >> $GITHUB_OUTPUT fi - docker-build: - runs-on: depot-ubuntu-22.04-4 - permissions: - packages: write - contents: read - steps: - - uses: actions/checkout@v4 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.DOCKER_IMAGE_NAME }} - - - name: Build and push Docker image + - name: Build Docker image uses: docker/build-push-action@v6 with: context: . - push: true - tags: ${{ steps.meta.outputs.tags }} + tags: ${{ env.DOCKER_IMAGE_NAME }}:${{ steps.get-tag.outputs.simd-tag }} + push: false build-args: | IBC_GO_VERSION=${{ github.ref_name }} + # To avoid having to push to the registry, making it possible for external contributors to run this + - name: Save Docker image as artifact + run: docker save $(echo ${{ env.DOCKER_IMAGE_NAME }}:${{ steps.get-tag.outputs.simd-tag }} | cut -d',' -f1) | gzip > simd-image.tar.gz + - uses: actions/upload-artifact@v4 + with: + name: simd-image + path: simd-image.tar.gz + build-test-matrix: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: repository: cosmos/ibc-go - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' cache-dependency-path: 'go.sum' - id: set-matrix run: | @@ -82,32 +75,41 @@ jobs: TEST_EXCLUSIONS: 'TestUpgradeTestSuite' e2e-tests: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 needs: - - determine-image-tag - build-test-matrix - docker-build strategy: fail-fast: false matrix: ${{ fromJSON(needs.build-test-matrix.outputs.matrix) }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: repository: cosmos/ibc-go - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' cache-dependency-path: 'e2e/go.sum' + + - name: Download image from docker build + uses: actions/download-artifact@v5 + with: + name: simd-image + - run: docker load -i simd-image.tar.gz + - name: Run e2e Test id: e2e_test env: CHAIN_IMAGE: '${{ env.DOCKER_IMAGE_NAME }}' - CHAIN_A_TAG: '${{ needs.determine-image-tag.outputs.simd-tag }}' - CHAIN_B_TAG: '${{ needs.determine-image-tag.outputs.simd-tag }}' + CHAIN_A_TAG: '${{ needs.docker-build.outputs.simd-tag }}' + CHAIN_B_TAG: '${{ needs.docker-build.outputs.simd-tag }}' + CHAIN_C_TAG: '${{ needs.docker-build.outputs.simd-tag }}' + CHAIN_D_TAG: '${{ needs.docker-build.outputs.simd-tag }}' E2E_CONFIG_PATH: 'ci-e2e-config.yaml' run: | cd e2e make e2e-test test=${{ matrix.test }} + - name: Upload Diagnostics uses: actions/upload-artifact@v4 if: ${{ failure() }} diff --git a/.github/workflows/golangci.yml b/.github/workflows/golangci.yml index 4a5a277ca42..40e98d9a3e6 100644 --- a/.github/workflows/golangci.yml +++ b/.github/workflows/golangci.yml @@ -10,22 +10,22 @@ permissions: jobs: golangci: name: lint - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 strategy: matrix: working-directory: ['.', 'modules/light-clients/08-wasm', 'e2e'] steps: - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' - - uses: actions/checkout@v4 + go-version: '1.24' + - uses: actions/checkout@v5 with: fetch-depth: 0 - name: golangci-lint uses: golangci/golangci-lint-action@v8.0.0 with: - version: v2.1 + version: v2.3 only-new-issues: true args: --timeout 10m working-directory: ${{ matrix.working-directory }} diff --git a/.github/workflows/proto-breaking-check.yml b/.github/workflows/proto-breaking-check.yml index 9ddcf83bdb5..1c90286f0b2 100644 --- a/.github/workflows/proto-breaking-check.yml +++ b/.github/workflows/proto-breaking-check.yml @@ -9,8 +9,8 @@ on: jobs: proto-breaking-check: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Run proto-breaking check run: make proto-check-breaking \ No newline at end of file diff --git a/.github/workflows/proto-registry.yml b/.github/workflows/proto-registry.yml index 8425cd9447f..f03e17385c2 100644 --- a/.github/workflows/proto-registry.yml +++ b/.github/workflows/proto-registry.yml @@ -13,9 +13,9 @@ on: jobs: push: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: bufbuild/buf-action@v1 with: token: ${{ secrets.BUF_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9c883314869..451a7668fb4 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,15 +14,15 @@ concurrency: jobs: build: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 strategy: matrix: go-arch: ['amd64', 'arm64'] steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' - name: Install compiler for arm64. if: matrix.go-arch == 'arm64' run: | @@ -49,7 +49,7 @@ jobs: done unit-tests: - runs-on: depot-ubuntu-22.04-4 + runs-on: depot-ubuntu-24.04-4 strategy: matrix: module: [ @@ -68,10 +68,10 @@ jobs: } ] steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' cache-dependency-path: '${{ matrix.module.path }}/go.sum' - name: test & coverage report creation @@ -80,7 +80,7 @@ jobs: - uses: codecov/codecov-action@v5 with: - fail_ci_if_error: true + fail_ci_if_error: false files: ${{ matrix.module.path }}/profile.out flags: ${{ matrix.module.name }} token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.golangci.yml b/.golangci.yml index 4195e0d6fb5..86409db0e83 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,7 @@ linters: default: none enable: - errcheck + - errorlint - goconst - gocritic - gosec @@ -18,73 +19,84 @@ linters: - unconvert - unparam - unused + - mirror + - copyloopvar + - iface + - importas + - intrange + - nakedret + - nilnesserr + - nilnil + - nonamedreturns + - prealloc + - predeclared + - reassign + - recvcheck + - testifylint + - testpackage + - usestdlibvars + - usetesting + - wastedassign settings: - gocritic: - disabled-checks: - - appendAssign gosec: excludes: - - G101 - - G107 - - G115 - - G404 - confidence: medium + - G107 # Url provided to HTTP request as taint input + - G115 # Potential integer overflow when converting between integer types + - G404 # Insecure random number source (rand) + confidence: low + nakedret: + max-func-lines: 0 revive: enable-all-rules: true + # See the rule descriptions here: https://github.com/mgechev/revive/blob/HEAD/RULES_DESCRIPTIONS.md rules: - - name: redundant-import-alias - disabled: true - - name: use-any + # Rules with configuration + - name: receiver-naming + disabled: false + arguments: + - max-length: 4 + - name: unhandled-error + disabled: false + arguments: # Disable for these ones only + - fmt.Printf + - fmt.Print + - fmt.Println + - name: var-naming + disabled: false + arguments: + - [] + - [] + - [ skipPackageNameChecks: true ] + # Disabled rules + # TODO: Some of these are kinda code smelly, should reconsider some of them + - name: add-constant disabled: true - - name: if-return + - name: argument-limit disabled: true - - name: max-public-structs + - name: banned-characters disabled: true - name: cognitive-complexity disabled: true - - name: argument-limit + - name: confusing-naming + disabled: true + - name: confusing-results disabled: true - name: cyclomatic disabled: true - - name: file-header + - name: deep-exit disabled: true - name: function-length disabled: true - - name: function-result-limit - disabled: true - - name: line-length-limit - disabled: true - name: flag-parameter disabled: true - - name: add-constant - disabled: true - - name: empty-lines - disabled: true - - name: banned-characters - disabled: true - - name: deep-exit - disabled: true - - name: confusing-results - disabled: true - - name: unused-parameter - disabled: true - - name: modifies-value-receiver - disabled: true - - name: early-return + - name: function-result-limit disabled: true - - name: confusing-naming + - name: line-length-limit disabled: true - - name: defer + - name: max-public-structs disabled: true - name: unused-parameter disabled: true - - name: unhandled-error - arguments: - - fmt.Printf - - fmt.Print - - fmt.Println - - myFunction - disabled: false exclusions: generated: lax presets: @@ -93,25 +105,24 @@ linters: - legacy - std-error-handling rules: - - linters: - - revive - text: differs only by capitalization to method - - linters: - - gosec - text: Use of weak random number generator - - linters: - - gosec - text: 'G115: integer overflow conversion' - linters: - staticcheck - text: 'SA1019:' - - linters: - - gosec - text: 'G115: integer overflow conversion' + text: 'SA1019:' # TODO: This should really be removed! + - path: _test\.go + linters: + - prealloc + - path: e2e/* + linters: + - prealloc + - testpackage + - path: testing/* + linters: + - prealloc paths: - third_party$ - builtin$ - examples$ + - testing/mock/* issues: max-issues-per-linter: 10000 max-same-issues: 10000 diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000000..9cadb163a9b --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,50 @@ +# Repository guidelines for coding agents + +This file provides guidance for automated agents contributing to this repository. + +## Repository Overview + +- This is a Cosmos SDK Go implementation of the Inter Blockchain Communication protocol. +- It implements both IBC v1 and IBC v2. +- The project is written in Go, and implements a set of Cosmos SDK modules that are organized under `modules/`. + - `core/` implements IBC Core: + - Common components for both IBC v1 and v2: + - keeper: Root IBC module msg server + - 02-client: light-client handling and routing + - 05-port: application port binding and routing + - 23-commitment: merkle tree types for provable commitments (such as packet commitments) + - 24-host: host state-machine and related keys + - IBC v1 only: + - 03-connection: connection handling and routing, including connection setup handshakes + - 04-channel: channel handling and routing, including channel setup handshakes + - IBC v2 only: + - api: port module router + - 02-client/v2 + - 04-channel/v2 + - `apps/` contains application level modules and middlewares: + - `light-clients/` provides implementations of IBC light clients. +- Protobuf definitions live under `proto/`. +- Unit and integration tests reside throughout the repo and rely on the `testing/` package. +- End to end tests live under `e2e/`, but agents are **not** expected to run them. + +## Development Workflow + +1. **Formatting and linting** + - Run `make lint` to lint all modules + - Run `make lint-fix` to automatically fix lint issues via `golangci-lint`. + - Run `make format` to format Go code with `gofumpt`. +2. **Testing** + - Execute all unit and integration tests with `make test-unit`. + - Do not run the e2e tests under `e2e/`. +3. **After making changes to dependencies** + - Run `make tidy-all` to tidy dependencies across all modules + +## Commit Messages + +- Follow the Conventional Commits specification. Examples of valid types are + `feat`, `fix`, `docs`, `test`, `deps`, and `chore`. +- Breaking changes must use the `(api)!` or `(statemachine)!` suffix. +- Include the proposed commit message in the pull request description. + +Refer to `docs/dev/pull-requests.md` for more details on commit conventions and +pull request guidelines. diff --git a/CHANGELOG.md b/CHANGELOG.md index cc70e396549..6c675cc18bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,40 @@ Ref: https://keepachangelog.com/en/1.0.0/ ## [Unreleased] +### Features +* [\#8573](https://github.com/cosmos/ibc-go/pull/8573) Support custom address codecs in transfer, PFM, and rate limiting. +* [\#8285](https://github.com/cosmos/ibc-go/pull/8285) Packet forward middleware. +* [\#8545](https://github.com/cosmos/ibc-go/pull/8545) Support sending multiple payloads in the same packet for atomic payload execution. +* [\#8473](https://github.com/cosmos/ibc-go/pull/8473) Support sending v2 packets on v1 channel identifiers using aliasing. + +### Dependencies + +* [\#8451](https://github.com/cosmos/ibc-go/pull/8451) Bump **go** to **1.24** +* (light-clients/08-wasm)[\#8500](https://github.com/cosmos/ibc-go/pull/8500) Bump **github.com/prysmaticlabs/prysm/v5** to **github.com/OffchainLabs/prysm/v6@v6.0.4** + +### API Breaking +* (apps) [\#8476](https://github.com/cosmos/ibc-go/pull/8476) Remove `ParamSubspace` from all `Keeper` constructors +* (light-clients/08-wasm) [\#8511](https://github.com/cosmos/ibc-go/pull/8511) Remove deprecated `Checksums` type +* (core/02-client) [\#8516](https://github.com/cosmos/ibc-go/pull/8516) Remove deprecated `SubmitMisbehaviour` message handler + +### State Machine Breaking + +### Improvements + +### Bug Fixes + +### Testing API + +* [\#8366](https://github.com/cosmos/ibc-go/pull/8366) - Replaced the deprecated `codec.ProtoMarshaler` interface with `proto.Message`. + +## [v10.4.0](https://github.com/cosmos/ibc-go/releases/tag/v10.4.0) - 2025-10-10 + +### Improvements + +* [\#8615](https://github.com/cosmos/ibc-go/pull/8615) Support custom address codecs in transfer. + +## [v10.3.0](https://github.com/cosmos/ibc-go/releases/tag/v10.3.0) - 2025-06-06 + ### Features ### Dependencies @@ -49,7 +83,8 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Improvements -* [\#8303](https://github.com/cosmos/ibc-go/pull/8303) Prefix-based routing in IBCv2 Router +* (core/api) [\#8303](https://github.com/cosmos/ibc-go/pull/8303) Prefix-based routing in IBCv2 Router +- (apps/callbacks) [\#8353](https://github.com/cosmos/ibc-go/pull/8353) Add field in callbacks data for custom calldata ### Bug Fixes @@ -58,7 +93,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * [\#8371](https://github.com/cosmos/ibc-go/pull/8371) e2e: Create only necessary number of chains for e2e suite. * [\#8375](https://github.com/cosmos/ibc-go/pull/8375) feat: parse IBC v2 packets from ABCI events -## [v10.2.0](https://github.com/cosmos/ibc-go/releases/tag/v10.2.0) - 2022-04-30 +## [v10.2.0](https://github.com/cosmos/ibc-go/releases/tag/v10.2.0) - 2025-04-30 ### Features @@ -82,7 +117,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * [\#8287](https://github.com/cosmos/ibc-go/pull/8287) rename total_escrow REST query from `denoms` to `total_escrow` -## [v10.1.0](https://github.com/cosmos/ibc-go/releases/tag/v10.1.0) - 2022-03-14 +## [v10.1.0](https://github.com/cosmos/ibc-go/releases/tag/v10.1.0) - 2025-03-14 ### Security Fixes diff --git a/Dockerfile b/Dockerfile index 6c01d89a1ce..3e4d1edb8cb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.8-alpine AS builder +FROM golang:1.24-alpine AS builder ARG IBC_GO_VERSION RUN set -eux; apk add --no-cache gcc git libusb-dev linux-headers make musl-dev; diff --git a/Makefile b/Makefile index 15dba16462f..2c1f0c37fff 100644 --- a/Makefile +++ b/Makefile @@ -201,9 +201,11 @@ check-docs-links: './docs/docs' lint-docs: - @command -v markdownlint-cli2 >/dev/null 2>&1 || { echo "ERROR: markdownlint-cli2 is not installed (https://github.com/DavidAnson/markdownlint-cli2#install)" >&2; exit 1; } + @command -v markdownlint-cli2 >/dev/null 2>&1 || { \ + echo "ERROR: markdownlint-cli2 is not installed" >&2; exit 1; \ + } @echo "Linting documentation..." - @markdownlint-cli2 ./docs/docs/**/*.md + @find docs/docs -type f -name '*.md' | xargs markdownlint-cli2 .PHONY: build-docs serve-docs tag-docs-version @@ -335,13 +337,6 @@ benchmark: ### Linting ### ############################################################################### -#? setup-pre-commit: Set pre commit git hook -setup-pre-commit: - @cp .git/hooks/pre-commit .git/hooks/pre-commit.bak 2>/dev/null || true - @echo "Installing pre-commit hook..." - @ln -sf ../../scripts/hooks/pre-commit.sh .git/hooks/pre-commit - @echo "Pre-commit hook was installed at .git/hooks/pre-commit" - #? lint: Run golangci-lint on all modules lint: @echo "--> Running linter" @@ -352,10 +347,9 @@ lint-fix: @echo "--> Running linter" @./scripts/go-lint-all.sh --fix -#? format: Run gofumpt and misspell +#? format: Run gofumpt format: - find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./docs/client/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' -not -name '*.pb.gw.go' | xargs gofumpt -w - find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./docs/client/statik/statik.go" -not -path "./tests/mocks/*" -not -name '*.pb.go' -not -name '*.pb.gw.go' | xargs misspell -w + find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./tests/mocks/*" -not -name '*.pb.go' -not -name '*.pb.gw.go' | xargs gofumpt -w .PHONY: format .PHONY: lint lint-fix format @@ -364,7 +358,7 @@ format: ### Protobuf ### ############################################################################### -protoVer=0.14.0 +protoVer=0.17.1 protoImageName=ghcr.io/cosmos/proto-builder:$(protoVer) protoImage=$(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace $(protoImageName) diff --git a/README.md b/README.md index 770730c6e77..954a521665b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -
+

ibc-go

@@ -30,50 +30,28 @@ Tests / Code Coverage Status E2E Status + Ask DeepWiki
-The [Inter-Blockchain Communication protocol (IBC)](https://ibcprotocol.dev/) allows blockchains to talk to each other. This end-to-end, connection-oriented, stateful protocol provides reliable, ordered, and authenticated communication between heterogeneous blockchains. For a high-level explanation of what IBC is and how it works, please read [this blog post](https://medium.com/the-interchain-foundation/eli5-what-is-ibc-def44d7b5b4c). - -This IBC implementation in Golang is built as a Cosmos SDK module. To understand more about how to use the `ibc-go` module as well as about the IBC protocol, please check out the Interchain Developer Academy [section on IBC](https://tutorials.cosmos.network/academy/3-ibc/), or [our docs](./docs/docs/01-ibc/01-overview.md). +The [Inter-Blockchain Communication protocol (IBC)](https://ibcprotocol.dev/) is a blockchain interoperability solution that allows blockchains to talk to each other. Blockchains that speak IBC can transfer any kind of data encoded in bytes — including tokens, messages, and arbitrary application logic. IBC is secure, permissionless, and designed to connect sovereign blockchains into a single interoperable network. For a high-level explanation of what IBC is and how it works, please read [this article](https://ibcprotocol.dev/how-ibc-works). -## Roadmap - -For an overview of upcoming changes to ibc-go take a look at the [roadmap project board](https://github.com/orgs/cosmos/projects/38/views/14). +The IBC implementation in Golang `ibc-go` has been used in production by the majority of the 200+ chains that have utilized IBC. It is built as a Cosmos SDK module. To understand more about how to use the `ibc-go` module as well as learn more about the IBC Protocol, please check out [our docs](./docs/docs/01-ibc/01-overview.md). ## Releases -The release lines currently supported are v7, and v8 (Note: v9 has been retracted and will be replaced by v10). - -Please refer to the [Stable Release Policy section of RELEASES.md](https://github.com/cosmos/ibc-go/blob/main/RELEASES.md#stable-release-policy) for more details. +The release lines currently supported are v7, v8, and v10. Please note that v9 has been retracted and has been replaced by v10. Please refer to our [versioning guide](https://github.com/cosmos/ibc-go/blob/main/RELEASES.md) for more information on how to understand our release versioning. -## Ecosystem - -Discover more applications and middleware in the [cosmos/ibc-apps repository](https://github.com/cosmos/ibc-apps#-bonus-content). - -## Community - -We have active, helpful communities on Discord and Telegram. +## Applications, Middleware, and Tools -For questions and support please use the `developers` channel in the [Cosmos Network Discord server](https://discord.com/channels/669268347736686612/1019978171367559208) or join the [Interchain Discord server](https://discord.com/invite/interchain). The issue list of this repo is exclusively for bug reports and feature requests. - -To receive announcements of new releases or other technical updates, please join the [Telegram group that Interchain Labs administers](https://t.me/cosmostechstack/1). - -## Contributing - -If you're interested in contributing to ibc-go, please take a look at the [contributing guidelines](./CONTRIBUTING.md). We welcome and appreciate community contributions! - -This project adheres to ibc-go's [code of conduct](./CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. - -To help contributors understand which issues are good to pick up, we have the following two categories: +IBC has an extensive list of applications, middleware, and tools, including relayers. View the list on the [IBC technical resource catalogue](https://ibcprotocol.dev/technical-resource-catalog) on our website. -- Issues with the label [`good first issue`](https://github.com/cosmos/ibc-go/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) should be pretty well defined and are best suited for developers new to ibc-go. -- Issues with the label [`help wanted`](https://github.com/cosmos/ibc-go/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) are a bit more involved and they usually require some familiarity already with the codebase. +## Developer Community and Support -If you are interested in working on an issue, please comment on it; then we will be able to assign it to you. We will be happy to answer any questions you may have and help you out while you work on the issue. +The issue list of this repo is exclusively for bug reports and feature requests. We have active, helpful communities on Discord, Telegram, and Slack. -If you have any general questions or feedback, please reach out to us in the [Interchain Discord server](https://discord.com/invite/interchain). +**| Need Help? | Support & Community: [Discord](https://discord.com/invite/interchain) - [Telegram](https://t.me/CosmosOG) - [Talk to an Expert](https://cosmos.network/interest-form) - [Join the #Cosmos-tech Slack Channel](https://forms.gle/A8jawLgB8zuL1FN36) |** ## Security @@ -89,6 +67,24 @@ The following audits have been performed on the `ibc-go` source code: - [ICS08 Wasm Clients](https://github.com/cosmos/ibc-go/blob/main/docs/audits/08-wasm/Ethan%20Frey%20-%20Wasm%20Client%20Review.pdf) by Ethan Frey/Confio. - [ICS04 Channel upgradability](https://github.com/cosmos/ibc-go/blob/main/docs/audits/04-channel-upgrades/Atredis%20Partners%20-%20Interchain%20Foundation%20IBC-Go%20Channel%20Upgrade%20Feature%20Assessment%20-%20Report%20v1.1.pdf) by Atredis Partners. +## Maintainers +[Cosmos Labs](https://cosmoslabs.io/) maintains the core components of the stack: Cosmos SDK, CometBFT, IBC, Cosmos EVM, and various developer tools and frameworks. The detailed maintenance policy can be found [here](https://github.com/cosmos/security/blob/main/POLICY.md). In addition to developing and maintaining the Cosmos Stack, Cosmos Labs provides advisory and engineering services for blockchain solutions. [Get in touch with Cosmos Labs](https://www.cosmoslabs.io/contact). + +Cosmos Labs is a wholly-owned subsidiary of the [Interchain Foundation](https://interchain.io/), the Swiss nonprofit responsible for treasury management, funding public goods, and supporting governance for Cosmos. + +The Cosmos Stack is supported by a robust community of open-source contributors. + +## Contributing to ibc-go + +If you're interested in contributing to ibc-go, please take a look at the [contributing guidelines](./CONTRIBUTING.md). We welcome and appreciate community contributions! + +To help contributors understand which issues are good to pick up, we have the following two categories: + +- Issues with the label [`good first issue`](https://github.com/cosmos/ibc-go/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) should be pretty well defined and are best suited for developers new to ibc-go. +- Issues with the label [`help wanted`](https://github.com/cosmos/ibc-go/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) are a bit more involved and they usually require some familiarity already with the codebase. + +If you are interested in working on an issue, please comment on it. We will be happy to answer any questions you may have and help you out while you work on the issue. + ## Quick Navigation 1. **[Core IBC Implementation](https://github.com/cosmos/ibc-go/tree/main/modules/core)** @@ -127,11 +123,14 @@ The following audits have been performed on the `ibc-go` source code: ## Documentation and Resources +### IBC Information - [IBC Website](https://ibcprotocol.dev/) -- [IBC Protocol Specification](https://github.com/cosmos/ibc) +- [IBC Protocol Specification and Standards](https://github.com/cosmos/ibc) - [Documentation](./docs/docs/01-ibc/01-overview.md) -- [Interchain Developer Academy](https://tutorials.cosmos.network/academy/3-ibc/) ---- +### Cosmos Stack Libraries -The development of ibc-go is led primarily by Interchain GmbH. Funding for this development comes primarily from the [Interchain Foundation](https://interchain.io), a Swiss non-profit. +- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk) - A framework for building + applications in Golang +- [CometBFT](https://github.com/cometbft/cometbft) - High-performance, 10k+ TPS configurable BFT consensus engine. +- [Cosmos EVM](https://github.com/cosmos/evm) - Native EVM layer for Cosmos SDK chains. diff --git a/SECURITY.md b/SECURITY.md index 485d39559b3..e83eaba8f0b 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,3 +14,7 @@ Please DO NOT file a public issue in this repository to report a security vulner For the most up-to-date version of the policies that govern vulnerability disclosure, please consult the [HackerOne program page](https://hackerone.com/cosmos?type=team&view_policy=true). The policy hosted on HackerOne is the official Coordinated Vulnerability Disclosure policy and Safe Harbor for the Interchain Stack, and the teams and infrastructure it supports, and it supersedes previous security policies that have been used in the past by individual teams and projects with targets in scope of the program. + +### More information + +* See [Maintenance and Security](https://github.com/cosmos/security) for detailed policies. diff --git a/cmd/build_test_matrix/main.go b/cmd/build_test_matrix/main.go index 3acb402e50f..18cc99ad047 100644 --- a/cmd/build_test_matrix/main.go +++ b/cmd/build_test_matrix/main.go @@ -90,7 +90,7 @@ func getGithubActionMatrixForTests(e2eRootDirectory, testName string, suite stri fset := token.NewFileSet() err := filepath.Walk(e2eRootDirectory, func(path string, info fs.FileInfo, err error) error { if err != nil { - return fmt.Errorf("error walking e2e directory: %s", err) + return fmt.Errorf("error walking e2e directory: %w", err) } // only look at test files @@ -100,7 +100,7 @@ func getGithubActionMatrixForTests(e2eRootDirectory, testName string, suite stri f, err := parser.ParseFile(fset, path, nil, 0) if err != nil { - return fmt.Errorf("failed parsing file: %s", err) + return fmt.Errorf("failed parsing file: %w", err) } suiteNameForFile, testCases, err := extractSuiteAndTestNames(f) diff --git a/cmd/build_test_matrix/main_test.go b/cmd/build_test_matrix/main_test.go index 280a4f620be..83bf3a151de 100644 --- a/cmd/build_test_matrix/main_test.go +++ b/cmd/build_test_matrix/main_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -20,7 +20,7 @@ func TestGetGithubActionMatrixForTests(t *testing.T) { t.Run("empty dir with no test cases fails", func(t *testing.T) { testingDir := t.TempDir() _, err := getGithubActionMatrixForTests(testingDir, "", "", nil) - assert.Error(t, err) + require.Error(t, err) }) t.Run("only test functions are picked up", func(t *testing.T) { @@ -28,7 +28,7 @@ func TestGetGithubActionMatrixForTests(t *testing.T) { createFileWithTestSuiteAndTests(t, "FeeMiddlewareTestSuite", "TestA", "TestB", testingDir, goTestFileNameOne) gh, err := getGithubActionMatrixForTests(testingDir, "", "", nil) - assert.NoError(t, err) + require.NoError(t, err) expected := GithubActionTestMatrix{ Include: []TestSuitePair{ @@ -51,7 +51,7 @@ func TestGetGithubActionMatrixForTests(t *testing.T) { createFileWithTestSuiteAndTests(t, "TransferTestSuite", "TestC", "TestD", testingDir, goTestFileNameTwo) gh, err := getGithubActionMatrixForTests(testingDir, "", "", nil) - assert.NoError(t, err) + require.NoError(t, err) expected := GithubActionTestMatrix{ Include: []TestSuitePair{ @@ -83,7 +83,7 @@ func TestGetGithubActionMatrixForTests(t *testing.T) { createFileWithTestSuiteAndTests(t, "TransferTestSuite", "TestC", "TestD", testingDir, goTestFileNameTwo) gh, err := getGithubActionMatrixForTests(testingDir, "TestA", "TestFeeMiddlewareTestSuite", nil) - assert.NoError(t, err) + require.NoError(t, err) expected := GithubActionTestMatrix{ Include: []TestSuitePair{ @@ -102,7 +102,7 @@ func TestGetGithubActionMatrixForTests(t *testing.T) { createFileWithTestSuiteAndTests(t, "FeeMiddlewareTestSuite", "TestA", "TestB", testingDir, goTestFileNameOne) _, err := getGithubActionMatrixForTests(testingDir, "TestThatDoesntExist", "TestFeeMiddlewareTestSuite", nil) - assert.Error(t, err) + require.Error(t, err) }) t.Run("non test files are skipped", func(t *testing.T) { @@ -110,8 +110,8 @@ func TestGetGithubActionMatrixForTests(t *testing.T) { createFileWithTestSuiteAndTests(t, "FeeMiddlewareTestSuite", "TestA", "TestB", testingDir, nonTestFile) gh, err := getGithubActionMatrixForTests(testingDir, "", "", nil) - assert.Error(t, err) - assert.Empty(t, gh.Include) + require.Error(t, err) + require.Empty(t, gh.Include) }) t.Run("fails when there are multiple suite runs", func(t *testing.T) { @@ -131,10 +131,10 @@ type FeeMiddlewareTestSuite struct {} ` err := os.WriteFile(path.Join(testingDir, goTestFileNameOne), []byte(fileWithTwoSuites), os.FileMode(0o777)) - assert.NoError(t, err) + require.NoError(t, err) _, err = getGithubActionMatrixForTests(testingDir, "", "", nil) - assert.Error(t, err) + require.Error(t, err) }) } @@ -159,7 +159,7 @@ func assertGithubActionTestMatricesEqual(t *testing.T, expected, actual GithubAc } return memberI.EntryPoint < memberJ.EntryPoint }) - assert.Equal(t, expected.Include, actual.Include) + require.Equal(t, expected.Include, actual.Include) } func goTestFileContents(suiteName, fnName1, fnName2 string) string { @@ -187,5 +187,5 @@ func createFileWithTestSuiteAndTests(t *testing.T, suiteName, fn1Name, fn2Name, t.Helper() goFileContents := goTestFileContents(suiteName, fn1Name, fn2Name) err := os.WriteFile(path.Join(dir, filename), []byte(goFileContents), os.FileMode(0o777)) - assert.NoError(t, err) + require.NoError(t, err) } diff --git a/codecov.yml b/codecov.yml index dbad545cada..90643b9dd06 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,3 +1,6 @@ +codecov: + notify: + wait_for_ci: true coverage: precision: 2 range: @@ -8,17 +11,21 @@ coverage: project: default: target: auto - threshold: 0% + threshold: 5 base: auto + informational: true + flags: + - ibc-go + - 08-wasm +comment: + require_changes: "coverage_drop OR uncovered_patch" # Only comment when coverage drops or there is uncovered code in the commit ignore: -- ^docs.* -- (?s:[^\/]*\.md.*)\Z -- (?s:.*/[^\/]*\.pb\.go.*)\Z -- (?s:.*/[^\/]*\.pb\.gw\.go.*)\Z -- (?s:modules/.*/.*/.*/[^\/]*\.pb\.go.*)\Z -- (?s:modules/.*/.*/.*/[^\/]*\.pb\.gw\.go.*)\Z -- ^testing/.* -- ^scripts/.* -- ^contrib/.* -- ^e2e/.* -- ^cmd/.* +- "**/*.pb.go" +- "**/*.pb.gw.go" +- "docs" +- "simapp" +- "testing" +- "modules/light-clients/08-wasm/testing" +- "scripts" +- "contrib" +- "cmd" diff --git a/docs/.markdownlint.jsonc b/docs/.markdownlint.jsonc index 35b40d4ef8a..2039e5cbce0 100644 --- a/docs/.markdownlint.jsonc +++ b/docs/.markdownlint.jsonc @@ -23,5 +23,6 @@ }, // https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md049---emphasis-style-should-be-consistent "MD050": { "style": "asterisk" - } // https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md050---strong-style-should-be-consistent + }, // https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md050---strong-style-should-be-consistent + "MD059": false, //https://github.com/DavidAnson/markdownlint/blob/main/doc/md059.md---undescriptive link names } diff --git a/docs/dev/go-style-guide.md b/docs/dev/go-style-guide.md index b4212e21fd5..e94eef9d940 100644 --- a/docs/dev/go-style-guide.md +++ b/docs/dev/go-style-guide.md @@ -3,7 +3,7 @@ In order to keep our code looking good with lots of programmers working on it, it helps to have a "style guide", so all the code generally looks quite similar. This doesn't mean there is only one "right way" to write code, or even that this standard is better than your style. But if we agree to a number of stylistic practices, it makes it much easier to read and modify new code. Please feel free to make suggestions if there's something you would like to add or modify. -We expect all contributors to be familiar with [Effective Go](https://golang.org/doc/effective_go.html) (and it's recommended reading for all Go programmers anyways). Additionally, we generally agree with the suggestions in [Uber's style guide](https://github.com/uber-go/guide/blob/master/style.md) and use that as a starting point. +We expect all contributors to be familiar with [Effective Go](https://golang.org/doc/effective_go.html) (and it's recommended reading for all Go programmers anyways). Additionally, we generally agree with the suggestions in [Google's style guide](https://google.github.io/styleguide/go/index) and use that as a starting point. ## Code Structure @@ -50,7 +50,7 @@ b := f ## Linting -- Run `make lint-fix` to fix any linting errors. +- Run `make lint` to see linting errors and `make lint-fix` to fix many issues (some linters do not support auto-fix). ## Various @@ -65,6 +65,7 @@ type middleware struct { - Acronyms are all capitalized, like "RPC", "gRPC", "API". "MyID", rather than "MyId". - Whenever it is safe to use Go's built-in `error` instantiation functions (as opposed to Cosmos SDK's error instantiation functions), prefer `errors.New()` instead of `fmt.Errorf()` unless you're actually using the format feature with arguments. +- As a general guideline, prefer to make the methods for a type [either all pointer methods or all value methods.](https://google.github.io/styleguide/go/decisions#receiver-type) ## Importing libraries @@ -117,3 +118,237 @@ sdkerrors.Wrapf( ) ``` + +## Common mistakes + +This is a compilation of some of the common mistakes we see in the repo that should be avoided. + +--- +Keep receiver names short [Details here](https://google.github.io/styleguide/go/decisions#receiver-names) + +```go +// bad +func (chain *TestChain) NextBlock() { + res, err := chain.App.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: chain.ProposedHeader.Height, + Time: chain.ProposedHeader.GetTime(), + NextValidatorsHash: chain.NextVals.Hash(), + }) + require.NoError(chain.TB, err) + chain.commitBlock(res) +} +``` + +```go +// good +func (c *TestChain) NextBlock() { + // Ommitted +``` + +--- +**Naked returns** + +We should always try to avoid naked returns. [Reference](https://google.github.io/styleguide/go/decisions#named-result-parameters) + +--- +**Function and method calls should not be separated based solely on line length** + +The signature of a function or method declaration [should remain on a single line](https://google.github.io/styleguide/go/decisions#function-formatting) to avoid indentation confusion. + +```go +// bad +func (im IBCMiddleware) OnRecvPacket( + ctx sdk.Context, + channelVersion string, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) ibcexported.Acknowledgement { + +// good +func (im IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { +``` + +--- +**Don't Use Get in function/Method names** +[Reference](https://google.github.io/styleguide/go/decisions#getters) + +```go +// bad + +// GetChainBID returns the chain-id for chain B. +func (tc TestConfig) GetChainBID() string { + if tc.ChainConfigs[1].ChainID != "" { + return tc.ChainConfigs[1].ChainID + } + return "chainB-1" +} + +// good +func (tc TestConfig) ChainID(i int) string { + if tc.ChainConfigs[i].ChainID != "" { + return tc.ChainConfigs[i].ChainID + } + return "chainB-1" +} +``` + +--- +**Do not make confusing indentation for saving vertical spaces** + +```go +// Bad +cases := []struct { + name string + malleate func() + expErr error + }{ + {"verification success", func() {}, nil}, + {"verification success: delay period passed", func() { + delayTimePeriod = uint64(1 * time.Second.Nanoseconds()) + }, nil}, + {"delay time period has not passed", func() { + delayTimePeriod = uint64(1 * time.Hour.Nanoseconds()) + }, errorsmod.Wrap(ibctm.ErrDelayPeriodNotPassed, "failed packet commitment verification for client (07-tendermint-0): cannot verify packet until time: 1577926940000000000, current time: 1577923345000000000")}, + {"client status is not active - client is expired", func() { + clientState, ok := path.EndpointB.GetClientState().(*ibctm.ClientState) + suite.Require().True(ok) + clientState.FrozenHeight = clienttypes.NewHeight(0, 1) + path.EndpointB.SetClientState(clientState) + }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, + } +``` + +```go +// Bad +{ + "nil underlying app", func() { + isNilApp = true + }, nil, +}, +``` + +```go +// Good +{ + "nil underlying app", + func() { + isNilApp = true + }, + nil, +}, +``` + +## Good Practices + +**Testing context** + +Go 1.24 added a (testing.TB).Context() method. In tests, prefer using (testing.TB).Context() over context.Background() to provide the initial context.Context used by the test. Helper functions, environment or test double setup, and other functions called from the test function body that require a context should have one explicitly passed. [Reference](https://google.github.io/styleguide/go/decisions#contexts) + +--- +**Error Logging** + +If you return an error, it’s usually better not to log it yourself but rather let the caller handle it. +[Reference](https://google.github.io/styleguide/go/best-practices.html#error-logging) + +--- +**Struct defined outside of the package** + +Must have fields specified. [Reference](https://google.github.io/styleguide/go/decisions#field-names) + +```go +// Good: +r := csv.Reader{ + Comma: ',', + Comment: '#', + FieldsPerRecord: 4, +} +``` + +```go +// Bad: +r := csv.Reader{',', '#', 4, false, false, false, false} +``` + +--- +**Naming struct fields in tabular tests** + +If tabular test struct has more than two fields, consider explicitly naming them. If the test struct has one name and one error field, then we can allow upto three fields. If test struct has more fields, consider naming them when writing test cases. + +```go +// Good + +tests := []struct { + name string + memo string + expectedPass bool + message string + registerInterfaceFn func(registry codectypes.InterfaceRegistry) + assertionFn func(t *testing.T, msgs []sdk.Msg) + }{ + { + name: "packet data generation succeeds (MsgDelegate & MsgSend)", + memo: "", + expectedPass: true, + message: multiMsg, + registerInterfaceFn: func(registry codectypes.InterfaceRegistry) { + stakingtypes.RegisterInterfaces(registry) + banktypes.RegisterInterfaces(registry) + }, + assertionFn: func(t *testing.T, msgs []sdk.Msg) { + t.Helper() + assertMsgDelegate(t, msgs[0]) + assertMsgBankSend(t, msgs[1]) + }, + }, + } +``` + +```go +// Bad +testCases := []struct { + name string + malleate func() + callbackFn func( + ctx sdk.Context, + packetDataUnmarshaler porttypes.PacketDataUnmarshaler, + packet channeltypes.Packet, + maxGas uint64, + ) (types.CallbackData, bool, error) + getSrc bool + }{ + { + "success: src_callback v1", + func() { + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"src_callback": {"address": "%s"}}`, sender), + } + + expCallbackData = expSrcCallBack + + s.path.EndpointA.ChannelConfig.Version = transfertypes.V1 + s.path.EndpointA.ChannelConfig.PortID = transfertypes.ModuleName + s.path.EndpointB.ChannelConfig.Version = transfertypes.V1 + s.path.EndpointB.ChannelConfig.PortID = transfertypes.ModuleName + }, + types.GetSourceCallbackData, + true, + }, + } +``` + +## Known Anti Patterns + +It's strongly recommended [not to create a custom context](https://google.github.io/styleguide/go/decisions#custom-contexts). The Cosmos SDK has it's own context that is passed around, and we should not try to work against that pattern to avoid confusion. + +--- +Test outputs should include the actual value that the function returned before printing the value that was expected. A standard format for printing test outputs is YourFunc(%v) = %v, want %v. Where you would write “actual” and “expected”, prefer using the words “got” and “want”, respectively. [Reference](https://google.github.io/styleguide/go/decisions#got-before-want) + +But testify has it other way around. + +`Require.Equal(Expected, Actual)` + +This is a known anti pattern that we allow as the testify package is used heavily in tests. diff --git a/docs/dev/release-management.md b/docs/dev/release-management.md index 668e85a7c5c..24df7d9bfc9 100644 --- a/docs/dev/release-management.md +++ b/docs/dev/release-management.md @@ -1,8 +1,8 @@ # Tagging a release -Before tagging a new release, please run the [compatibility e2e test suite](https://github.com/cosmos/ibc-go/actions/workflows/e2e-compatibility.yaml) for the corresponding release line. +## New release branch setup -## New major release branch +### New major release branch Pre-requisites for creating a release branch for a new major version: @@ -14,16 +14,79 @@ Once the above pre-requisites are satisfied: 1. Start on `main`. 2. Create the release branch (`release/vX.XX.X`). For example: `release/v3.0.x`. -## New minor release branch +### New minor release branch 1. Start on the latest release branch in the same major release line. For example: the latest release branch in the `v3` release line is `v3.2.x`. 2. Create branch from the release branch. For example: create branch `release/v3.3.x` from `v3.2.x`. +### Post branch creation Post-requisites for both new major and minor release branches: -1. Add branch protection rules to new release branch. -2. Add backport task to [`mergify.yml`](https://github.com/cosmos/ibc-go/blob/main/.github/mergify.yml). -3. Create label for backport (e.g.`backport-to-v3.0.x`). +1. Add backport task to [`mergify.yml`](https://github.com/cosmos/ibc-go/blob/main/.github/mergify.yml). +2. Create label for backport (e.g.`backport-to-v3.0.x`). +3. Add the branch to `e2e-compatibility.yaml` in the `workflow_dispatch.inputs.release-branch.options` list. + +## General release procedure + +For specifics around point release procedure, see [the documentation for that below](#point-release-procedure). + +NOTE: Since ibc-go v10, we release sub-modules (e.g. `modules/light-clients/08-wasm/v10.3.0`) from the same release branch as `ibc-go`. + +### 0: Before release + +1. Before tagging a new release, make sure to run the [compatibility e2e test suite](https://github.com/cosmos/ibc-go/actions/workflows/e2e-compatibility.yaml) for the corresponding release line. +2. Make sure the CHANGELOG.md is updated. + +### 1: Release ibc-go + +1. Create a new release in [GitHub](https://github.com/cosmos/ibc-go/releases) with "Draft new release". +2. Select the release branch (e.g. `release/v10.4.x`) +3. Set the correct tag (e.g. `v10.4.0`) +4. Write release notes +5. Check the `This is a pre-release` checkbox if needed (this applies for alpha, beta and release candidates). + +### 2: Release sub-modules + +For each sub-module, from the release branch: +1. Remove any ibc-go `replace` directives in the sub-module's `go.mod`. + ```diff + replace ( +- github.com/cosmos/ibc-go/v10 => ../../../ + ``` +2. Update the ibc-go version in the sub-modules's `go.mod` to the version release in the previous step. + ```diff +- github.com/cosmos/ibc-go/v10 v10.3.0 ++ github.com/cosmos/ibc-go/v10 v10.4.0 + ``` +3. Create a PR to the release branch, and after CI finishes, merge it. +4. Create a new release in [GitHub](https://github.com/cosmos/ibc-go/releases) with "Draft new release". +5. Select the release branch (e.g. `release/v10.4.x`) +6. Set the correct tag (e.g. `modules/light-clients/08-wasm/v10.4.0`) +7. Write release notes +8. Check the `This is a pre-release` checkbox if needed (this applies for alpha, beta and release candidates). + +### 3: Post-release procedure + +1. Clean up the release branch + - Put back the sub-module `replace` directives in the sub-module's `go.mod`. + - Put back the `[Unreleased]` section in the release branch (e.g. `release/v1.0.x`) with clean sections for each of the types of changelog entries, so that entries will be added for the PRs that are backported for the next release. +2. Clean up the main branch + - Update [`CHANGELOG.md`](../../CHANGELOG.md) in `main` (remove from the `[Unreleased]` section any items that are part of the release).` + - Update [version matrix](../../RELEASES.md#version-matrix) in `RELEASES.md`: add the new release and remove any tags that might not be recommended anymore. + - Additionally, for the first point release of a new major or minor release branch: + - Update the table of supported release lines (and End of Life dates) in [`RELEASES.md`](../../RELEASES.md): add the new release line and remove any release lines that might have become discontinued. + - Update the [list of supported release lines in README.md](../../RELEASES.md#releases), if necessary. + - Update the manual [e2e `simd`](https://github.com/cosmos/ibc-go/blob/main/.github/workflows/e2e-manual-simd.yaml) test workflow: + - Remove any tags that might not be recommended anymore. + - Update docs site: + - If the release is occurring on the main branch, on the latest version, then run `npm run docusaurus docs:version vX.Y.Z` in the `docs/` directory. (where `X.Y.Z` is the new version number) + - If the release is occurring on an older release branch, then make a PR to the main branch called `docs: new release vX.Y.Z` doing the following: + - Update the content of the docs found in `docs/versioned_docs/version-vx.y.z` if needed. (where `x.y.z` is the previous version number) + - Update the version number of the older release branch by changing the version number of the older release branch in: + - In `docs/versions.json`. + - Rename `docs/versioned_sidebars/version-vx.y.z-sidebars.json` + - Rename `docs/versioned_docs/version-vx.y.z` + - After changes to docs site are deployed, check [ibc.cosmos.network](https://ibc.cosmos.network) is updated. ## Point release procedure @@ -60,27 +123,3 @@ Finally, when a point release is ready to be made: - Write release notes. - Check the `This is a pre-release` checkbox if needed (this applies for alpha, beta and release candidates). -### Post-release procedure - -- Update [`CHANGELOG.md`](../../CHANGELOG.md) in `main` (remove from the `[Unreleased]` section any items that are part of the release).` -- Put back the `[Unreleased]` section in the release branch (e.g. `release/v1.0.x`) with clean sections for each of the types of changelog entries, so that entries will be added for the PRs that are backported for the next release. -- Update [version matrix](../../RELEASES.md#version-matrix) in `RELEASES.md`: add the new release and remove any tags that might not be recommended anymore. - -Additionally, for the first point release of a new major or minor release branch: - -- Update the table of supported release lines (and End of Life dates) in [`RELEASES.md`](../../RELEASES.md): add the new release line and remove any release lines that might have become discontinued. -- Update the [list of supported release lines in README.md](../../RELEASES.md#releases), if necessary. -- Update the manual [e2e `simd`](https://github.com/cosmos/ibc-go/blob/main/.github/workflows/e2e-manual-simd.yaml) test workflow: - - Remove any tags that might not be recommended anymore. -- Update docs site: - - If the release is occurring on the main branch, on the latest version, then run `npm run docusaurus docs:version vX.Y.Z` in the `docs/` directory. (where `X.Y.Z` is the new version number) - - If the release is occurring on an older release branch, then make a PR to the main branch called `docs: new release vX.Y.Z` doing the following: - - Update the content of the docs found in `docs/versioned_docs/version-vx.y.z` if needed. (where `x.y.z` is the previous version number) - - Update the version number of the older release branch by changing the version number of the older release branch in: - - In `docs/versions.json`. - - Rename `docs/versioned_sidebars/version-vx.y.z-sidebars.json` - - Rename `docs/versioned_docs/version-vx.y.z` -- After changes to docs site are deployed, check [ibc.cosmos.network](https://ibc.cosmos.network) is updated. -- Open issue in [SDK tutorials repo](https://github.com/cosmos/sdk-tutorials) to update tutorials to the released version of ibc-go. - -See [this PR](https://github.com/cosmos/ibc-go/pull/2919) for an example of the involved changes. diff --git a/docs/docs/01-ibc/02-integration.md b/docs/docs/01-ibc/02-integration.md index 104909aad60..26d14383a2c 100644 --- a/docs/docs/01-ibc/02-integration.md +++ b/docs/docs/01-ibc/02-integration.md @@ -184,7 +184,10 @@ func NewApp(...args) *App { #### IBC v2 Router -With IBC v2, there is a new [router](https://github.com/cosmos/ibc-go/blob/main/modules/core/api/router.go) that needs to register the routes for a portID to a given IBCModule. It routes IBCv2 messages based on the prefixes of port IDs. For example, if a route named `someModule` exists, messages addressed to port IDs like `someModuleRandomPort1`, `someModuleRandomPort2`, etc., will be passed to the corresponding module. +With IBC v2, there is a new [router](https://github.com/cosmos/ibc-go/blob/main/modules/core/api/router.go) that needs to register the routes for a portID to a given IBCModule. It supports two kinds of routes: direct routes and prefix-based routes. The direct routes match one specific port ID to a module, while the prefix-based routes match any port ID with a specific prefix to a module. +For example, if a direct route named `someModule` exists, only messages addressed to exactly that port ID will be passed to the corresponding module. +However, if instead, `someModule` is a prefix-based route, port IDs like `someModuleRandomPort1`, `someModuleRandomPort2`, etc., will be passed to the module. +Note that the router will panic when you add a route that conflicts with an already existing route. This is also the case if you add a prefix-based route that conflicts with an existing direct route or vice versa. ```go // IBC v2 router creation diff --git a/docs/docs/01-ibc/03-apps/01-apps.md b/docs/docs/01-ibc/03-apps/01-apps.md index 2b7089171dd..02db6ef1639 100644 --- a/docs/docs/01-ibc/03-apps/01-apps.md +++ b/docs/docs/01-ibc/03-apps/01-apps.md @@ -178,6 +178,23 @@ encoded version into each handhshake call as necessary. ICS20 currently implements basic string matching with a single supported version. +### ICS4Wrapper + +The IBC application interacts with core IBC through the `ICS4Wrapper` interface for any application-initiated actions like: `SendPacket` and `WriteAcknowledgement`. This may be directly the IBCChannelKeeper or a middleware that sits between the application and the IBC ChannelKeeper. + +If the application is being wired with a custom middleware, the application **must** have its ICS4Wrapper set to the middleware directly above it on the stack through the following call: + +```go +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the module's initialization to set the middleware which is above this +// module in the IBC application stack. +// The ICS4Wrapper **must** be used for sending packets and writing acknowledgements +// to ensure that the middleware can intercept and process these calls. +// Do not use the channel keeper directly to send packets or write acknowledgements +// as this will bypass the middleware. +SetICS4Wrapper(wrapper ICS4Wrapper) +``` + ### Custom Packets Modules connected by a channel must agree on what application data they are sending over the diff --git a/docs/docs/01-ibc/03-apps/07-address-codec.md b/docs/docs/01-ibc/03-apps/07-address-codec.md new file mode 100644 index 00000000000..cedaebf7b3b --- /dev/null +++ b/docs/docs/01-ibc/03-apps/07-address-codec.md @@ -0,0 +1,92 @@ +--- +title: Address Codec +sidebar_label: Address Codec +sidebar_position: 7 +slug: /ibc/apps/address-codec +--- + +# Custom Address Codec + +## Overview + +Starting in ibc-go `v10.4.0`, the IBC transfer module uses the application's configured address codec to parse sender and receiver addresses. This enables chains to accept multiple address formats in IBC packets—for example, both standard Cosmos bech32 addresses (`cosmos1...`) and Ethereum hex addresses (`0x...`). + +## Interface + +The Cosmos SDK defines a simple interface for converting between address representations: + +```go +type Codec interface { + StringToBytes(text string) ([]byte, error) + BytesToString(bz []byte) (string, error) +} +``` + +Applications configure a codec implementation on the `AccountKeeper`. The IBC transfer module retrieves this codec via `accountKeeper.AddressCodec()` and uses it throughout packet processing—validating sender addresses when creating packets and parsing receiver addresses when delivering funds. + +**Chain independence:** Each chain applies its own codec independently. The sending chain validates senders with its codec, the receiving chain validates receivers with its codec. This works seamlessly across chains with different codec configurations without any protocol changes. + +## Implementation + +A typical implementation composes the SDK's standard bech32 codec and extends it to parse hex addresses: + +```go +type EvmCodec struct { + bech32Codec address.Codec +} + +func (c *EvmCodec) StringToBytes(text string) ([]byte, error) { + if strings.HasPrefix(text, "0x") { + // Validate and parse hex address using go-ethereum/common + if !common.IsHexAddress(text) { + return nil, errors.New("invalid hex address") + } + addr := common.HexToAddress(text) + return addr.Bytes(), nil + } + // Default to bech32 parsing + return c.bech32Codec.StringToBytes(text) +} + +func (c *EvmCodec) BytesToString(bz []byte) (string, error) { + // Always return bech32 format + return c.bech32Codec.BytesToString(bz) +} +``` + +This pattern accepts both address formats as input while consistently outputting bech32. This makes the codec a drop-in replacement for the standard codec—existing tooling continues to work unchanged while users gain the ability to specify hex addresses where convenient. + +**Note:** A recommended address codec implementation is available in the [cosmos/evm repository](https://github.com/cosmos/evm/blob/main/encoding/address/address_codec.go). + +### Application Wiring + +After initializing your transfer keeper, configure the codec using the `SetAddressCodec` method: + +```go +app.TransferKeeper.SetAddressCodec(evmaddress.NewEvmCodec(sdk.GetConfig().GetBech32AccountAddrPrefix())) +``` + +For a complete example showing the transfer keeper initialization and address codec configuration, see [evmd app.go](https://github.com/cosmos/evm/blob/720ba9cf908a20a29b7401b19a136caeb8c4092f/evmd/app.go#L483-L494). + +## Usage + +Once configured, the chain accepts IBC transfers with receiver addresses in either format: + +```bash +# Standard bech32 address +gaiad tx ibc-transfer transfer transfer channel-0 \ +cosmos1p9p6h9m8jcn8f7l6h3k2wq9g6yx0l8a9u2n4lr 1000uatom --from sender + +# Ethereum hex address +gaiad tx ibc-transfer transfer transfer channel-0 \ +0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb 1000uatom --from sender +``` + +Both formats resolve to the same on-chain account when derived from the same private key. The codec handles conversion to the internal byte representation transparently. + +## Reference Implementation + +The cosmos/evm repository provides a complete implementation in `utils/address_codec.go` with integration examples in the `evmd` reference chain: + +- [**Implementation PR**](https://github.com/cosmos/evm/pull/665) +- [**Reference Chain "evmd"**](https://github.com/cosmos/evm/tree/main/evmd) diff --git a/docs/docs/01-ibc/04-middleware/02-develop.md b/docs/docs/01-ibc/04-middleware/02-develop.md index 88042caa0a3..409d021e90d 100644 --- a/docs/docs/01-ibc/04-middleware/02-develop.md +++ b/docs/docs/01-ibc/04-middleware/02-develop.md @@ -31,6 +31,10 @@ The interfaces a middleware must implement are found [here](https://github.com/c type Middleware interface { IBCModule // middleware has access to an underlying application which may be wrapped by more middleware ICS4Wrapper // middleware has access to ICS4Wrapper which may be core IBC Channel Handler or a higher-level middleware that wraps this middleware. + + // SetUnderlyingModule sets the underlying IBC module. This function may be used after + // the middleware's initialization to set the ibc module which is below this middleware. + SetUnderlyingApplication(IBCModule) } ``` @@ -42,14 +46,12 @@ An `IBCMiddleware` struct implementing the `Middleware` interface, can be define // IBCMiddleware implements the ICS26 callbacks and ICS4Wrapper for the fee middleware given the // fee keeper and the underlying application. type IBCMiddleware struct { - app porttypes.IBCModule - keeper keeper.Keeper + keeper *keeper.Keeper } // NewIBCMiddleware creates a new IBCMiddleware given the keeper and underlying application -func NewIBCMiddleware(app porttypes.IBCModule, k keeper.Keeper) IBCMiddleware { +func NewIBCMiddleware(k *keeper.Keeper) IBCMiddleware { return IBCMiddleware{ - app: app, keeper: k, } } @@ -476,3 +478,26 @@ func GetAppVersion( ``` See [here](https://github.com/cosmos/ibc-go/blob/v7.0.0/modules/apps/29-fee/keeper/relay.go#L58-L74) an example implementation of this function for the ICS-29 Fee Middleware module. + +## Wiring Interface Requirements + +Middleware must also implement the following functions so that they can be called in the stack builder in order to correctly wire the application stack together: `SetUnderlyingApplication` and `SetICS4Wrapper`. + +```go +// SetUnderlyingModule sets the underlying IBC module. This function may be used after +// the middleware's initialization to set the ibc module which is below this middleware. +SetUnderlyingApplication(IBCModule) + +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the module's initialization to set the middleware which is above this +// module in the IBC application stack. +// The ICS4Wrapper **must** be used for sending packets and writing acknowledgements +// to ensure that the middleware can intercept and process these calls. +// Do not use the channel keeper directly to send packets or write acknowledgements +// as this will bypass the middleware. +SetICS4Wrapper(wrapper ICS4Wrapper) +``` + +The middleware itself should have access to the `underlying app` (note this may be a base app or an application wrapped by layers of lower-level middleware(s)) and access to the higher layer `ICS4wrapper`. The `underlying app` gets called during the relayer initiated actions: `recvPacket`, `acknowledgePacket`, and `timeoutPacket`. The `ics4Wrapper` gets called on user-initiated actions like `sendPacket` and `writeAcknowledgement`. + +The functions above are used by the `StackBuilder` during application setup to wire the stack correctly. The stack must be wired first and have all of the wrappers and applications set correctly before transaction execution starts and packet processing begins. diff --git a/docs/docs/01-ibc/04-middleware/03-integration.md b/docs/docs/01-ibc/04-middleware/03-integration.md index 3d1ed88b83a..f1b13273dd7 100644 --- a/docs/docs/01-ibc/04-middleware/03-integration.md +++ b/docs/docs/01-ibc/04-middleware/03-integration.md @@ -25,10 +25,13 @@ The order of middleware **matters**, function calls from IBC to the application // middleware 1 and middleware 3 are stateful middleware, // perhaps implementing separate sdk.Msg and Handlers -mw1Keeper := mw1.NewKeeper(storeKey1, ..., ics4Wrapper: channelKeeper, ...) // in stack 1 & 3 +// NOTE: NewKeeper returns a pointer so that we can modify +// the keepers later after initialization +// They are all initialized to use the channelKeeper directly at the start +mw1Keeper := mw1.NewKeeper(storeKey1, ..., channelKeeper) // in stack 1 & 3 // middleware 2 is stateless -mw3Keeper1 := mw3.NewKeeper(storeKey3,..., ics4Wrapper: mw1Keeper, ...) // in stack 1 -mw3Keeper2 := mw3.NewKeeper(storeKey3,..., ics4Wrapper: channelKeeper, ...) // in stack 2 +mw3Keeper1 := mw3.NewKeeper(storeKey3,..., channelKeeper) // in stack 1 +mw3Keeper2 := mw3.NewKeeper(storeKey3,..., channelKeeper) // in stack 2 // Only create App Module **once** and register in app module // if the module maintains independent state and/or processes sdk.Msgs @@ -55,13 +58,26 @@ customIBCModule1 := custom.NewIBCModule(customKeeper1, "portCustom1") customIBCModule2 := custom.NewIBCModule(customKeeper2, "portCustom2") // create IBC stacks by combining middleware with base application +// IBC Stack builders are initialized with the IBC ChannelKeeper which is the top-level ICS4Wrapper // NOTE: since middleware2 is stateless it does not require a Keeper // stack 1 contains mw1 -> mw3 -> transfer -stack1 := mw1.NewIBCMiddleware(mw3.NewIBCMiddleware(transferIBCModule, mw3Keeper1), mw1Keeper) +stack1 := porttypes.NewStackBuilder(ibcChannelKeeper). + Base(transferIBCModule). + Next(mw3). + Next(mw1). + Build() // stack 2 contains mw3 -> mw2 -> custom1 -stack2 := mw3.NewIBCMiddleware(mw2.NewIBCMiddleware(customIBCModule1), mw3Keeper2) +stack2 := porttypes.NewStackBuilder(ibcChannelKeeper). + Base(customIBCModule1). + Next(mw2). + Next(mw3). + Build() // stack 3 contains mw2 -> mw1 -> custom2 -stack3 := mw2.NewIBCMiddleware(mw1.NewIBCMiddleware(customIBCModule2, mw1Keeper)) +stack3 := porttypes.NewStackBuilder(ibcChannelKeeper). + Base(customIBCModule2). + Next(mw1). + Next(mw2). + Build() // associate each stack with the moduleName provided by the underlying Keeper ibcRouter := porttypes.NewRouter() diff --git a/docs/docs/02-apps/01-transfer/10-IBCv2-transfer.md b/docs/docs/02-apps/01-transfer/10-IBCv2-transfer.md index eb8de4aefb8..a943a61c3e7 100644 --- a/docs/docs/02-apps/01-transfer/10-IBCv2-transfer.md +++ b/docs/docs/02-apps/01-transfer/10-IBCv2-transfer.md @@ -63,3 +63,27 @@ Because IBC v2 no longer uses channels, it is no longer possible to rely on a fi ## Changes to the application module interface Instead of implementing token transfer for `port.IBCModule`, IBC v2 uses the new application interface `api.IBCModule`. More information on the interface differences can be found in the [application section](../../01-ibc/03-apps/00-ibcv2apps.md). + +## MsgTransfer Entrypoint + +The `MsgTransfer` entrypoint has been retained in order to retain support for the common entrypoint integrated in most existing frontends. + +If `MsgTransfer` is used with a clientID as the `msg.SourceChannel` then the handler will automatically use the IBC v2 protocol. It will internally call the `MsgSendPacket` endpoint so that the execution flow is the same in the state machine for all IBC v2 packets while still presenting the same endpoint for users. + +Of course, we want to still retain support for sending v2 packets on existing channels. The denominations of tokens once they leave the origin chain are prefixed by the port and channel ID in IBC v1. Moreover, the transfer escrow accounts holding the original tokens are generated from the channel IDs. Thus, if we wish to interact these remote tokens using IBC v2, we must still use the v1 channel identifiers that they were originally sent with. + +Thus, `MsgTransfer` has an additional `UseAliasing` boolean field to indicate that we wish to use IBC v2 protocol while still using the old v1 channel identifiers. This enables users to interact with the same tokens, DEX pools, and cross-chain DEFI protocols using the same denominations that they had previously with the IBC v2 protocol. To use the `MsgTransfer` with aliasing we can submit the message like so: + +```go +MsgTransfer{ + SourcePort: "transfer", + SourceChannel: "channel-4", //note: we are using an existing v1 channel identiifer + Token: "uatom", + Sender: {senderAddr}, + Receiver: {receiverAddr}, + TimeoutHeight: ZeroHeight, // note: IBC v2 does not use timeout height + TimeoutTimestamp: 100_000_000, + Memo: "", + UseAliasing: true, // set aliasing to true so the handler uses IBC v2 instead of IBC v1 +} +``` diff --git a/docs/docs/02-apps/02-interchain-accounts/04-integration.md b/docs/docs/02-apps/02-interchain-accounts/04-integration.md index 14117661086..e0132730722 100644 --- a/docs/docs/02-apps/02-interchain-accounts/04-integration.md +++ b/docs/docs/02-apps/02-interchain-accounts/04-integration.md @@ -16,7 +16,7 @@ The Interchain Accounts module contains two submodules. Each submodule has its o Chains who wish to support ICS-27 may elect to act as a host chain, a controller chain or both. Disabling host or controller functionality may be done statically by excluding the host or controller submodule entirely from the `app.go` file or it may be done dynamically by taking advantage of the on-chain parameters which enable or disable the host or controller submodules. -Interchain Account authentication modules (both custom or generic, such as the `x/gov`, `x/group` or `x/auth` Cosmos SDK modules) can send messages to the controller submodule's [`MsgServer`](05-messages.md) to register interchain accounts and send packets to the interchain account. To accomplish this, the authentication module needs to be composed with `baseapp`'s `MsgServiceRouter`. +Interchain Account authentication modules (both custom or generic, such as the `x/gov` or `x/auth` Cosmos SDK modules) can send messages to the controller submodule's [`MsgServer`](05-messages.md) to register interchain accounts and send packets to the interchain account. To accomplish this, the authentication module needs to be composed with `baseapp`'s `MsgServiceRouter`. ![ica-v6.png](./images/ica-v6.png) diff --git a/docs/docs/04-middleware/02-packet-forward-middleware/01-overview.md b/docs/docs/04-middleware/02-packet-forward-middleware/01-overview.md new file mode 100644 index 00000000000..4b2b1647319 --- /dev/null +++ b/docs/docs/04-middleware/02-packet-forward-middleware/01-overview.md @@ -0,0 +1,39 @@ +--- +title: Overview +sidebar_label: Overview +sidebar_position: 1 +slug: /apps/packet-forward-middleware/overview +--- + +:::warning +Packet forward middleware is only compatible with IBC classic, not IBC v2 +::: + +# Overview + +Learn about packet forward middleware, a middleware that can be used in combination with token transfers (ICS-20) + +## What is Packet Forward Middleware? + +Packet Forward Middleware enables multi-hop token transfers by forwarding IBC packets through intermediate chains, which may not be directly connected. It supports: + +- **Path-Unwinding Functionality:** + Because the fungibility of tokens transferred between chains is determined by [the path the tokens have travelled](/02-apps/01-transfer/01-overview/#denomination-trace), i.e. the same token sent from chain A to chain B is not fungible with the same token sent from chain A, to chain C and then to chain B, packet forward middleware also enables routing tokens back through their source, before sending onto the final destination. +- **Asynchronous Acknowledgements:** + Acknowledgements are only written to the origin chain after all forwarding steps succeed or fail, users only need to monitor the source chain for the result. +- **Retry and Timeout Handling:** +The middleware can be configured to retry forwarding in the case that there was a timeout. +- **Forwarding across multiple chains with nested memos:** +Instructions on which route to take to forward a packet across more than one chain can be set within a nested JSON with the memo field +- **Configurable Fee Deduction on Recieve:** +Integrators of PFM can choose to deduct a percentage of tokens forwarded through their chain and distribute these tokens to the community pool. + +## How it works? + +1. User initiates a `MsgTransfer` with a memo JSON payload containing forwarding instructions. + +2. Intermediate chains (with PFM enabled) parse the memo and forward the packet to the destination specified. + +3. Acknowledgements are passed back step-by-step to the origin chain after the final hop succeeds or fails, along the same path used for forwarding. + +In practise, it can be challenging to correctly format the memo for the desired route. It is recommended to use the Skip API to correctly format the memo needed in `MsgTransfer` to make this easy. diff --git a/docs/docs/04-middleware/02-packet-forward-middleware/02-integration.md b/docs/docs/04-middleware/02-packet-forward-middleware/02-integration.md new file mode 100644 index 00000000000..9b25d9cc080 --- /dev/null +++ b/docs/docs/04-middleware/02-packet-forward-middleware/02-integration.md @@ -0,0 +1,177 @@ +--- +title: Integration +sidebar_label: Integration +sidebar_position: 1 +slug: /apps/packet-forward-middleware/integration +--- + +# Integration + +This document provides instructions on integrating and configuring the Packet Forward Middleware (PFM) within your +existing chain implementation. +The integration steps include the following: + +1. [Import the PFM, initialize the PFM Module & Keeper, initialize the store keys and module params, and initialize the Begin/End Block logic and InitGenesis order](#example-integration-of-the-packet-forward-middleware) +2. [Configure the IBC application stack including the transfer module](#configuring-the-transfer-application-stack-with-packet-forward-middleware) +3. [Configuration of additional options such as timeout period, number of retries on timeout, refund timeout period, and fee percentage](#configurable-options-in-the-packet-forward-middleware) + +Integration of the PFM should take approximately 20 minutes. + +## Example integration of the Packet Forward Middleware + +```go +// app.go + +// Import the packet forward middleware +import ( + "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10/packetforward" + packetforwardkeeper "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10/packetforward/keeper" + packetforwardtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v10/packetforward/types" +) + +... + +// Register the AppModule for the packet forward middleware module +ModuleBasics = module.NewBasicManager( + ... + packetforward.AppModuleBasic{}, + ... +) + +... + +// Add packet forward middleware Keeper +type App struct { + ... + PacketForwardKeeper *packetforwardkeeper.Keeper + ... +} + +... + +// Create store keys +keys := sdk.NewKVStoreKeys( + ... + packetforwardtypes.StoreKey, + ... +) + +... + +// Initialize the packet forward middleware Keeper +// It's important to note that the PFM Keeper must be initialized before the Transfer Keeper +app.PacketForwardKeeper = packetforwardkeeper.NewKeeper( + appCodec, + keys[packetforwardtypes.StoreKey], + nil, // will be zero-value here, reference is set later on with SetTransferKeeper. + app.IBCKeeper.ChannelKeeper, + appKeepers.DistrKeeper, + app.BankKeeper, + app.IBCKeeper.ChannelKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), +) + +// Initialize the transfer module Keeper +app.TransferKeeper = ibctransferkeeper.NewKeeper( + appCodec, + keys[ibctransfertypes.StoreKey], + app.GetSubspace(ibctransfertypes.ModuleName), + app.PacketForwardKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.AccountKeeper, + app.BankKeeper, + scopedTransferKeeper, +) + +app.PacketForwardKeeper.SetTransferKeeper(app.TransferKeeper) + +// See the section below for configuring an application stack with the packet forward middleware + +... + +// Register packet forward middleware AppModule +app.moduleManager = module.NewManager( + ... + packetforward.NewAppModule(app.PacketForwardKeeper, app.GetSubspace(packetforwardtypes.ModuleName)), +) + +... + +// Add packet forward middleware to begin blocker logic +app.moduleManager.SetOrderBeginBlockers( + ... + packetforwardtypes.ModuleName, + ... +) + +// Add packet forward middleware to end blocker logic +app.moduleManager.SetOrderEndBlockers( + ... + packetforwardtypes.ModuleName, + ... +) + +// Add packet forward middleware to init genesis logic +app.moduleManager.SetOrderInitGenesis( + ... + packetforwardtypes.ModuleName, + ... +) + +// Add packet forward middleware to init params keeper +func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { + ... + paramsKeeper.Subspace(packetforwardtypes.ModuleName).WithKeyTable(packetforwardtypes.ParamKeyTable()) + ... +} +``` + +## Configuring the transfer application stack with Packet Forward Middleware + +Here is an example of how to create an application stack using `transfer` and `packet-forward-middleware`. +The following `transferStack` is configured in `app/app.go` and added to the IBC `Router`. +The in-line comments describe the execution flow of packets between the application stack and IBC core. + +For more information on configuring an IBC application stack see the ibc-go docs [here](https://github.com/cosmos/ibc-go/blob/e69a833de764fa0f5bdf0338d9452fd6e579a675/docs/docs/04-middleware/01-ics29-fee/02-integration.md#configuring-an-application-stack-with-fee-middleware). + +```go +// Create Transfer Stack +// SendPacket, since it is originating from the application to core IBC: +// transferKeeper.SendPacket -> packetforward.SendPacket -> channel.SendPacket + +// RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way +// channel.RecvPacket -> packetforward.OnRecvPacket -> transfer.OnRecvPacket + +// transfer stack contains (from top to bottom): +// - Packet Forward Middleware +// - Transfer +var transferStack ibcporttypes.IBCModule +transferStack = transfer.NewIBCModule(app.TransferKeeper) +transferStack = packetforward.NewIBCMiddleware( + transferStack, + app.PacketForwardKeeper, + 0, // retries on timeout + packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp, // forward timeout +) + +// Add transfer stack to IBC Router +ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) +``` + +## Configurable options in the Packet Forward Middleware + +The Packet Forward Middleware has several configurable options available when initializing the IBC application stack. +You can see these passed in as arguments to `packetforward.NewIBCMiddleware` and they include the number of retries that +will be performed on a forward timeout, the timeout period that will be used for a forward, and the timeout period that +will be used for performing refunds in the case that a forward is taking too long. + +Additionally, there is a fee percentage parameter that can be set in `InitGenesis`, this is an optional parameter that +can be used to take a fee from each forwarded packet which will then be distributed to the community pool. In the +`OnRecvPacket` callback `ForwardTransferPacket` is invoked which will attempt to subtract a fee from the forwarded +packet amount if the fee percentage is non-zero. + +- Retries On Timeout - how many times will a forward be re-attempted in the case of a timeout. +- Timeout Period - how long can a forward be in progress before giving up. +- Refund Timeout - how long can a forward be in progress before issuing a refund back to the original source chain. +- Fee Percentage - % of the forwarded packet amount which will be subtracted and distributed to the community pool. diff --git a/docs/docs/04-middleware/02-packet-forward-middleware/03-example-usage.md b/docs/docs/04-middleware/02-packet-forward-middleware/03-example-usage.md new file mode 100644 index 00000000000..95b8e8aa377 --- /dev/null +++ b/docs/docs/04-middleware/02-packet-forward-middleware/03-example-usage.md @@ -0,0 +1,140 @@ +--- +title: Example Flows +sidebar_label: Example Flows +sidebar_position: 3 +slug: /apps/packet-forward-middleware/example-flows +--- + +# Packet Forward Middleware Flows + +This document outlines some example flows leveraging packet forward middleware and formats of the memo field. + +## Example Scenarios + +### Successful Transfer forwarding through chain B + +```mermaid +sequenceDiagram + autonumber + Chain A ->> Chain B: Send PFM transfer + Chain B ->> Chain C: Forward + Chain C ->> Chain B: ACK + Chain B ->> Chain A: ACK +``` + +### Memo for simple forward + +- The packet-forward-middleware integrated on Chain B. +- The packet data `receiver` for the `MsgTransfer` on Chain A is set to `"pfm"` or some other invalid bech32 string.* +- The packet `memo` is included in `MsgTransfer` by user on Chain A. + +```json +{ + "forward": { + "receiver": "chain-c-bech32-address", + "port": "transfer", + "channel": "channel-123" + } +} +``` + +### Error on Forwarding Hop, Refund to A + +```mermaid +sequenceDiagram + autonumber + Chain A ->> Chain B: PFM transfer + Chain B ->> Chain C: Forward + Chain B ->> Chain C: Forward (errors) + Chain C ->> Chain B: ☠️ ACK error + Chain B ->> Chain A: ☠️ ACK error +``` + +### Forwarding with Retry and Timeout Logic + +```mermaid +sequenceDiagram + autonumber + Chain A ->> Chain B: PFM transfer + Chain B ->> Chain C: Forward + Chain C --x Chain B: Timeout + Chain B ->> Chain C: Retry forward + Chain C --x Chain B: Timeout + Chain B ->> Chain A: ☠️ ACK error +``` + +### A -> B -> C full success + +1. `A` This sends packet over underlying ICS-004 wrapper with memo as is. +2. `B` This receives packet and parses it into ICS-020 packet. +3. `B` Validates `forward` packet on this step, return `ACK` error if fails. +4. `B` If other middleware not yet called ICS-020, call it and ACK error on fail. Tokens minted or unescrowed here. +5. `B` Handle denom. If denom prefix is from `B`, remove it. If denom prefix is other chain - add `B` prefix. +6. `B` Take fee, create new ICS-004 packet with timeout from forward for next step, and remaining inner `memo`. +7. `B` Send transfer to `C` with parameters obtained from `memo`. Tokens burnt or escrowed here. +8. `B` Store tracking `in flight packet` under next `(channel, port, ICS-20 transfer sequence)`, do not `ACK` packet yet. +9. `C` Handle ICS-020 packet as usual. +10. `B` On ICS-020 ACK from `C` find `in flight packet`, delete it and write `ACK` for original packet from `A`. +11. `A` Handle ICS-020 `ACK` as usual + +[Example](https://mintscan.io/osmosis-testnet/txs/FAB912347B8729FFCA92AC35E6B1E83BC8169DE7CC2C254A5A3F70C8EC35D771?height=3788973) of USDC transfer from Osmosis -> Noble -> Sei + +### A -> B -> C with C error ACK + +10. `B` On ICS-020 ACK from `C` find `in flight packet`, delete it +11. `B` Burns or escrows tokens. +12. `B` And write error `ACK` for original packet from `A`. +13. `A` Handle ICS-020 timeout as usual +14. `C` writes success `ACK` for packet from `B` + +Same behavior in case of timeout on `C` + +### A packet timeouts on B before C timeouts packet from B + +10. `A` Cannot timeout because `in flight packet` has proof on `B` of packet inclusion. +11. `B` waits for ACK or timeout from `C`. +12. `B` timeout from `C` becomes fail `ACK` on `B` for `A` +13. `A` receives success or fail `ACK`, but not timeout + +In this case `A` assets `hang` until final hop timeouts or ACK. + +### Memo for Retry and Timeout Logic, with Nested Memo (2 forwards) + +- The packet-forward-middleware integrated on Chain B and Chain C. +- The packet data `receiver` for the `MsgTransfer` on Chain A is set to `"pfm"` or some other invalid bech32 string. +- The forward metadata `receiver` for the hop from Chain B to Chain C is set to `"pfm"` or some other invalid bech32 string. +- The packet `memo` is included in `MsgTransfer` by user on Chain A. +- A packet timeout of 10 minutes and 2 retries is set for both forwards. + +In the case of a timeout after 10 minutes for either forward, the packet would be retried up to 2 times, afterwards an error ack would be written to issue a refund on the prior chain. + +`next` is the `memo` to pass for the next transfer hop. Per `memo` intended usage of a JSON string, it should be either JSON which will be Marshaled retaining key order, or an escaped JSON string which will be passed directly. + +`next` as JSON + +```json +{ + "forward": { + "receiver": "pfm", // intentionally invalid + "port": "transfer", + "channel": "channel-123", + "timeout": "10m", + "retries": 2, + "next": { + "forward": { + "receiver": "chain-d-bech32-address", + "port": "transfer", + "channel": "channel-234", + "timeout": "10m", + "retries": 2 + } + } + } +} +``` + +## Intermediate Address Security + +Intermediate chains don’t need a valid receiver address. Instead, they derive a secure address from the packet’s sender and channel, preventing users from forwarding tokens to arbitrary accounts. + +To avoid accidental transfers to chains without PFM, use an invalid bech32 address (e.g., "pfm") for intermediate receivers. diff --git a/docs/docs/04-middleware/02-packet-forward-middleware/_category_.json b/docs/docs/04-middleware/02-packet-forward-middleware/_category_.json new file mode 100644 index 00000000000..c341825b742 --- /dev/null +++ b/docs/docs/04-middleware/02-packet-forward-middleware/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Packet Forward Middleware", + "position": 2, + "link": null +} \ No newline at end of file diff --git a/docs/docs/04-middleware/03-rate-limit-middleware/01-overview.md b/docs/docs/04-middleware/03-rate-limit-middleware/01-overview.md new file mode 100644 index 00000000000..fe842b372a4 --- /dev/null +++ b/docs/docs/04-middleware/03-rate-limit-middleware/01-overview.md @@ -0,0 +1,30 @@ +--- +title: Overview +sidebar_label: Overview +sidebar_position: 1 +slug: /apps/rate-limit-middleware/overview +--- + +# Overview + +Learn about rate limit middleware, a middleware that can be used in combination with token transfers (ICS-20) to control the amount of in and outflows of assets in a certain time period. + +## What is Rate Limit Middleware? + +The rate limit middleware enforces rate limits on IBC token transfers coming into and out of a chain. It supports: + +- **Risk Mitigation:** In case of a bug exploit, attack or economic failure of a connected chain, it limits the impact to the in/outflow specified for a given time period. +- **Token Filtering:** Through the use of a blacklist, the middleware can completely block tokens entering or leaving a domain, relevant for complicance or giving asset issuers greater control over the domains token can be sent to. +- **Uninterupted Packet Flow:** When desired, rate limits can be bypassed by using the whitelist, to avoid any restriction on asset in or outflows. + +## How it works + +The rate limiting middleware determines whether tokens can flow into or out of a chain. The middleware does this by: + +1. Check transfer limits for an asset (Quota): When tokens are recieved or sent, the middleware determines whether the amount of tokens flowing in or out have exceeded the limit. + +2. Track in or outflow: When tokens enter or leave the chain, the amount transferred is tracked in state + +3. Block or allow token flow: Dependent on the limit, the middleware will either allow the tokens to pass through or block the tokens. + +4. Handle failures: If the packet timesout or fails to be delivered, the middleware ensures limits are correctly recorded. diff --git a/docs/docs/04-middleware/03-rate-limit-middleware/02-integration.md b/docs/docs/04-middleware/03-rate-limit-middleware/02-integration.md new file mode 100644 index 00000000000..ca3b3a97266 --- /dev/null +++ b/docs/docs/04-middleware/03-rate-limit-middleware/02-integration.md @@ -0,0 +1,10 @@ +--- +title: Integration +sidebar_label: Integration +sidebar_position: 2 +slug: /apps/rate-limit-middleware/integration +--- + +# Integration + +This section should be completed once the middleware wiring approach is finalised. diff --git a/docs/docs/04-middleware/03-rate-limit-middleware/03-setting-limits.md b/docs/docs/04-middleware/03-rate-limit-middleware/03-setting-limits.md new file mode 100644 index 00000000000..6ce0f236379 --- /dev/null +++ b/docs/docs/04-middleware/03-rate-limit-middleware/03-setting-limits.md @@ -0,0 +1,22 @@ +--- +title: Setting Rate Limits +sidebar_label: Setting Rate Limits +sidebar_position: 3 +slug: /apps/rate-limit-middleware/setting-rate-limits +--- + +# Setting Rate Limits + +Rate limits are set through a governance-gated authority on a per denom, and per channel / client basis. To add a rate limit, the [`MsgAddRateLimit`](https://github.com/cosmos/ibc-go/blob/main/modules/apps/rate-limiting/types/msgs.go#L26-L34) message must be executed which includes: + +- Denom: the asset that the rate limit should be applied to +- ChannelOrClientId: the channelID for use with IBC classic connections, or the clientID for use with IBC v2 connections +- MaxPercentSend: the outflow threshold as a percentage of the `channelValue`. More explicitly, a packet being sent would exceed the threshold quota if: (Outflow - Inflow + Packet Amount) / channelValue is greater than MaxPercentSend +- MaxPercentRecv: the inflow threshold as a percentage of the `channelValue` +- DurationHours: the length of time, after which the rate limits reset + +## Updating, Removing or Resetting Rate Limits + +- If rate limits were set to be too low or high for a given channel/client, they can be updated with [`MsgUpdateRateLimit`](https://github.com/cosmos/ibc-go/blob/main/modules/apps/rate-limiting/types/msgs.go#L81-L89). +- If rate limits are no longer needed, they can be removed with [`MsgRemoveRateLimit`](https://github.com/cosmos/ibc-go/blob/main/modules/apps/rate-limiting/types/msgs.go#L136-L141). +- If the flow counter needs to be resent for a given rate limit, it is possible to do so with [`MsgResetRateLimit`](https://github.com/cosmos/ibc-go/blob/main/modules/apps/rate-limiting/types/msgs.go#L169-L174). diff --git a/docs/docs/04-middleware/03-rate-limit-middleware/_category_.json b/docs/docs/04-middleware/03-rate-limit-middleware/_category_.json new file mode 100644 index 00000000000..8ea2f530c41 --- /dev/null +++ b/docs/docs/04-middleware/03-rate-limit-middleware/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Rate Limit Middleware", + "position": 3, + "link": null +} \ No newline at end of file diff --git a/docs/docs/05-migrations/14-v10-to-v11.md b/docs/docs/05-migrations/14-v10-to-v11.md new file mode 100644 index 00000000000..f740404f9ec --- /dev/null +++ b/docs/docs/05-migrations/14-v10-to-v11.md @@ -0,0 +1,64 @@ +--- +title: IBC-Go v10 to v11 +sidebar_label: IBC-Go v10 to v11 +sidebar_position: 14 +slug: /migrations/v10-to-v11 +--- + +# Migrating from v10 to v11 + +This guide provides instructions for migrating to a new version of ibc-go. + +**Note:** ibc-go supports golang semantic versioning and therefore all imports must be updated on major version releases. + +Diff examples are shown after the list of overall changes: + +- Chains will need to remove the `ParamSubspace` arg from all calls to `Keeper` constructors + +```diff + app.IBCKeeper = ibckeeper.NewKeeper( + appCodec, + runtime.NewKVStoreService(keys[ibcexported.StoreKey]), +- app.GetSubspace(ibcexported.ModuleName), + app.UpgradeKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) +``` + +The transfer module, the packet forward middleware, and the rate limiting middleware support custom address codecs. This feature is primarily added to support Cosmos EVM for IBC transfers. In a standard Cosmos SDK app, they are wired as follows: + +```diff + app.TransferKeeper = ibctransferkeeper.NewKeeper( + appCodec, ++ app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), + app.IBCKeeper.ChannelKeeper, + app.MsgServiceRouter(), + app.AccountKeeper, app.BankKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) +``` + +```diff + app.RateLimitKeeper = ratelimitkeeper.NewKeeper( + appCodec, ++ app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[ratelimittypes.StoreKey]), + app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ClientKeeper, + app.BankKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String() + ) +``` + +```diff + app.PFMKeeper = packetforwardkeeper.NewKeeper( + appCodec, ++ app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), + app.TransferKeeper, + app.IBCKeeper.ChannelKeeper, + app.BankKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String() + ) +``` diff --git a/docs/docs/05-migrations/15-support-stackbuilder.md b/docs/docs/05-migrations/15-support-stackbuilder.md new file mode 100644 index 00000000000..a06c65b6439 --- /dev/null +++ b/docs/docs/05-migrations/15-support-stackbuilder.md @@ -0,0 +1,148 @@ +--- +title: Support the new StackBuilder primitive for Wiring Middlewares in the chain application +sidebar_label: Support StackBuilder Wiring +sidebar_position: 1 +slug: /migrations/support-stackbuilder +--- + +# Migration for Chains wishing to use StackBuilder + +The StackBuilder struct is a new primitive for wiring middleware in a simpler and less error-prone manner. It is not a breaking change thus the existing method of wiring middleware still works, though it is highly recommended to transition to the new wiring method. + +Refer to the [integration guide](../01-ibc/04-middleware/03-integration.md) to understand how to use this new middleware to improve middleware wiring in the chain application setup. + +# Migrations for Application Developers + +In order to be wired with the new StackBuilder primitive, applications and middlewares must implement new methods as part of their respective interfaces. + +IBC Applications must implement a new `SetICS4Wrapper` which will set the `ICS4Wrapper` through which the application will call `SendPacket` and `WriteAcknowledgement`. It is recommended that IBC applications are initialized first with the IBC ChannelKeeper directly, and then modified with a middleware ICS4Wrapper during the stack wiring. + +```go +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the module's initialization to set the middleware which is above this +// module in the IBC application stack. +// The ICS4Wrapper **must** be used for sending packets and writing acknowledgements +// to ensure that the middleware can intercept and process these calls. +// Do not use the channel keeper directly to send packets or write acknowledgements +// as this will bypass the middleware. +SetICS4Wrapper(wrapper ICS4Wrapper) +``` + +Many applications have a stateful keeper that executes the logic for sending packets and writing acknowledgements. In this case, the keeper in the application must be a **pointer** reference so that it can be modified in place after initialization. + +The initialization should be modified to no longer take in an addition `ics4Wrapper` as this gets modified later by `SetICS4Wrapper`. The constructor function must also return a **pointer** reference so that it may be modified in-place by the stack builder. + +Below is an example IBCModule that supports the stack builder wiring. + +E.g. + +```go +type IBCModule struct { + keeper *keeper.Keeper +} + +// NewIBCModule creates a new IBCModule given the keeper +func NewIBCModule(k *keeper.Keeper) *IBCModule { + return &IBCModule{ + keeper: k, + } +} + +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the module's initialization to set the middleware which is above this +// module in the IBC application stack. +func (im IBCModule) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } + + im.keeper.WithICS4Wrapper(wrapper) +} + +/// Keeper file that has ICS4Wrapper internal to its own struct + +// Keeper defines the IBC fungible transfer keeper +type Keeper struct { + ... + ics4Wrapper porttypes.ICS4Wrapper + + // Keeper is initialized with ICS4Wrapper + // being equal to the top-level channelKeeper + // this can be changed by calling WithICS4Wrapper + // with a different middleware ICS4Wrapper + channelKeeper types.ChannelKeeper + ... +} + +// WithICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the keepers creation to set the middleware which is above this module +// in the IBC application stack. +func (k *Keeper) WithICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + k.ics4Wrapper = wrapper +} +``` + +# Migration for Middleware Developers + +Since Middleware is itself implement the IBC application interface, it must also implement `SetICS4Wrapper` in the same way as IBC applications. + +Additionally, IBC Middleware has an underlying IBC application that it calls into as well. Previously this application would be set in the middleware upon construction. With the stack builder primitive, the application is only set during upon calling `stack.Build()`. Thus, middleware is additionally responsible for implementing the new method: `SetUnderlyingApplication`: + +```go +// SetUnderlyingModule sets the underlying IBC module. This function may be used after +// the middleware's initialization to set the ibc module which is below this middleware. +SetUnderlyingApplication(IBCModule) +``` + +The initialization should not include the ICS4Wrapper and application as this gets set later. The constructor function for Middlewares **must** be modified to return a **pointer** reference so that it can be modified in place by the stack builder. + +Below is an example middleware setup: + +```go +// IBCMiddleware implements the ICS26 callbacks +type IBCMiddleware struct { + app porttypes.PacketUnmarshalerModule + ics4Wrapper porttypes.ICS4Wrapper + + // this is a stateful middleware with its own internal keeper + mwKeeper *keeper.MiddlewareKeeper + + // this is a middleware specific field + mwField any +} + +// NewIBCMiddleware creates a new IBCMiddleware given the keeper and underlying application. +// NOTE: It **must** return a pointer reference so it can be +// modified in place by the stack builder +// NOTE: We do not pass in the underlying app and ICS4Wrapper here as this happens later +func NewIBCMiddleware( + mwKeeper *keeper.MiddlewareKeeper, mwField any, +) *IBCMiddleware { + return &IBCMiddleware{ + mwKeeper: mwKeeper, + mwField, mwField, + } +} + +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after the +// middleware's creation to set the middleware which is above this module in +// the IBC application stack. +func (im *IBCMiddleware) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } + im.mwKeeper.WithICS4Wrapper(wrapper) +} + +// SetUnderlyingApplication sets the underlying IBC module. This function may be used after +// the middleware's creation to set the ibc module which is below this middleware. +func (im *IBCMiddleware) SetUnderlyingApplication(app porttypes.IBCModule) { + if app == nil { + panic(errors.New("underlying application cannot be nil")) + } + if im.app != nil { + panic(errors.New("underlying application already set")) + } + im.app = app +} +``` diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index b01ef016282..668a92925bd 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -49,14 +49,14 @@ const config = { // Exclude template markdown files from the docs exclude: ["**/*.template.md"], // Select the latest version - lastVersion: "v10.1.x", + lastVersion: "v10.4.x", // Assign banners to specific versions versions: { current: { path: "main", banner: "unreleased", }, - "v10.1.x": { + "v10.4.x": { path: "v10", banner: "none", }, @@ -98,6 +98,10 @@ const config = { ], ], + markdown: { + mermaid: true, +}, + themeConfig: /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ ({ @@ -194,7 +198,7 @@ const config = { }, { label: "interchaintest", - href: "https://github.com/strangelove-ventures/interchaintest", + href: "https://github.com/cosmos/interchaintest", }, { label: "CosmWasm", @@ -215,7 +219,7 @@ const config = { }, { label: "Privacy Policy", - href: "https://v1.cosmos.network/privacy", + href: "https://cosmos.network/privacy/", }, ], }, @@ -252,7 +256,10 @@ const config = { ], }, }), - themes: ["docusaurus-theme-github-codeblock"], + themes: [ + "docusaurus-theme-github-codeblock", + "@docusaurus/theme-mermaid" + ], plugins: [ [ 'docusaurus-pushfeedback', { diff --git a/docs/package-lock.json b/docs/package-lock.json index 1db551d1423..eefc49096d9 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -14,6 +14,7 @@ "@docusaurus/plugin-google-gtag": "^3.4.0", "@docusaurus/plugin-sitemap": "^3.4.0", "@docusaurus/preset-classic": "^3.4.0", + "@docusaurus/theme-mermaid": "^3.4.0", "@easyops-cn/docusaurus-search-local": "^0.40.1", "@gracefullight/docusaurus-plugin-microsoft-clarity": "^1.0.0", "@mdx-js/react": "^3.0.0", @@ -258,104 +259,44 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", - "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", "dependencies": { - "@babel/highlight": "^7.23.4", - "chalk": "^2.4.2" + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/code-frame/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/code-frame/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/code-frame/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/@babel/compat-data": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", - "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.27.5.tgz", + "integrity": "sha512-KiRAp/VoJaWkkte84TvUd9qjdbZAdiqyvMxrGl1N6vzFogKmaLgoM3L1kgtLicp2HP5fBJS8JrZKLVIZGVJAVg==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.7.tgz", - "integrity": "sha512-+UpDgowcmqe36d4NwqvKsyPMlOLNGMsfMmQ5WGCu+siCe3t3dfe9njrzGfdN4qq+bcNUt0+Vw6haRxBOycs4dw==", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.27.4.tgz", + "integrity": "sha512-bXYxrXFubeYdvB0NhD/NBB3Qi6aZeV20GOWVI47t2dkecCEoneR4NPVcb7abpXDEvejgrUfFtG6vG/zxAKmg+g==", + "license": "MIT", "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.23.5", - "@babel/generator": "^7.23.6", - "@babel/helper-compilation-targets": "^7.23.6", - "@babel/helper-module-transforms": "^7.23.3", - "@babel/helpers": "^7.23.7", - "@babel/parser": "^7.23.6", - "@babel/template": "^7.22.15", - "@babel/traverse": "^7.23.7", - "@babel/types": "^7.23.6", + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.27.3", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-module-transforms": "^7.27.3", + "@babel/helpers": "^7.27.4", + "@babel/parser": "^7.27.4", + "@babel/template": "^7.27.2", + "@babel/traverse": "^7.27.4", + "@babel/types": "^7.27.3", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -379,49 +320,42 @@ } }, "node_modules/@babel/generator": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz", - "integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==", + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.27.5.tgz", + "integrity": "sha512-ZGhA37l0e/g2s1Cnzdix0O3aLYm66eF8aufiVteOgnwxgnRP8GoyMj7VWsgWnQbVKXyge7hqrFh2K2TQM6t1Hw==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.23.6", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", - "jsesc": "^2.5.1" + "@babel/parser": "^7.27.5", + "@babel/types": "^7.27.3", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", - "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", - "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz", + "integrity": "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.22.15" + "@babel/types": "^7.27.3" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", - "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz", + "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==", + "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.23.5", - "@babel/helper-validator-option": "^7.23.5", - "browserslist": "^4.22.2", + "@babel/compat-data": "^7.27.2", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" }, @@ -433,23 +367,23 @@ "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.23.7.tgz", - "integrity": "sha512-xCoqR/8+BoNnXOY7RVSgv6X+o7pmT5q1d+gGcRlXYkI+9B31glE4jeejhKVpA04O1AtzOt7OSQ6VYKP5FcRl9g==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-member-expression-to-functions": "^7.23.0", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.20", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.27.1.tgz", + "integrity": "sha512-QwGAmuvM17btKU5VqXfb+Giw4JcN0hjuufz3DYnpeVDvZLAObloM77bhMXiqry3Iio+Ai4phVRDwl6WU10+r5A==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-member-expression-to-functions": "^7.27.1", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/traverse": "^7.27.1", "semver": "^6.3.1" }, "engines": { @@ -463,17 +397,19 @@ "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", - "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.27.1.tgz", + "integrity": "sha512-uVDC72XVf8UbrH5qQTc18Agb8emwjTiZrQE11Nv3CuBEZmVvTwwE9CBUEvHku06gQCAyYf8Nv6ja1IN+6LMbxQ==", + "license": "MIT", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "regexpu-core": "^5.3.1", + "@babel/helper-annotate-as-pure": "^7.27.1", + "regexpu-core": "^6.2.0", "semver": "^6.3.1" }, "engines": { @@ -492,9 +428,10 @@ } }, "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.4.tgz", - "integrity": "sha512-QcJMILQCu2jm5TFPGA3lCpJJTeEP+mqeXooG/NZbg/h5FTFi6V0+99ahlRsW8/kRLyb24LZVCCiclDedhLKcBA==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.4.tgz", + "integrity": "sha512-jljfR1rGnXXNWnmQg2K3+bvhkxB51Rl32QRaOTuwwjviGrHzIbSc8+x9CpraDtbT7mfyjXObULP4w/adunNwAw==", + "license": "MIT", "dependencies": { "@babel/helper-compilation-targets": "^7.22.6", "@babel/helper-plugin-utils": "^7.22.5", @@ -506,69 +443,41 @@ "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", - "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", - "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", - "dependencies": { - "@babel/template": "^7.22.15", - "@babel/types": "^7.23.0" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz", - "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.27.1.tgz", + "integrity": "sha512-E5chM8eWjTp/aNoVpcbfM7mLxu9XGLWYise2eBKGQomAk/Mb4XoxyqXTZbuTohbsl8EKqdlMhnDI2CCLfcs9wA==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.23.0" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-imports": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", - "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.22.15" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", - "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.27.3.tgz", + "integrity": "sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg==", + "license": "MIT", "dependencies": { - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-simple-access": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/helper-validator-identifier": "^7.22.20" + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.27.3" }, "engines": { "node": ">=6.9.0" @@ -578,32 +487,35 @@ } }, "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", - "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz", + "integrity": "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.24.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.5.tgz", - "integrity": "sha512-xjNLDopRzW2o6ba0gKbkZq5YWEBaK3PCyTOY1K2P/O07LGMhMqlMXPxwN4S5/RhWuCobT8z0jrlKGlYmeR1OhQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz", + "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", - "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.27.1.tgz", + "integrity": "sha512-7fiA521aVw8lSPeI4ZOD3vRFkoqkJcS+z4hFo82bFSH/2tNd6eJ5qCVMS5OzDmZh/kaHQeBaeyxK6wljcPtveA==", + "license": "MIT", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-wrap-function": "^7.22.20" + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-wrap-function": "^7.27.1", + "@babel/traverse": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -613,13 +525,14 @@ } }, "node_modules/@babel/helper-replace-supers": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz", - "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.27.1.tgz", + "integrity": "sha512-7EHz6qDZc8RYS5ElPoShMheWvEgERonFCs7IAonWLLUTXW59DP14bCZt89/GKyreYn8g3S83m21FelHKbeDCKA==", + "license": "MIT", "dependencies": { - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-member-expression-to-functions": "^7.22.15", - "@babel/helper-optimise-call-expression": "^7.22.5" + "@babel/helper-member-expression-to-functions": "^7.27.1", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/traverse": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -628,183 +541,126 @@ "@babel/core": "^7.0.0" } }, - "node_modules/@babel/helper-simple-access": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", - "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", - "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", - "dependencies": { - "@babel/types": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz", + "integrity": "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==", + "license": "MIT", "dependencies": { - "@babel/types": "^7.22.5" + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", - "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", - "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-wrap-function": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz", - "integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.27.1.tgz", + "integrity": "sha512-NFJK2sHUvrjo8wAU/nQTWU890/zB2jj0qBcCbZbbf+005cAsv6tMjXz31fBign6M5ov1o0Bllu+9nbqkfsjjJQ==", + "license": "MIT", "dependencies": { - "@babel/helper-function-name": "^7.22.5", - "@babel/template": "^7.22.15", - "@babel/types": "^7.22.19" + "@babel/template": "^7.27.1", + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.7.tgz", - "integrity": "sha512-6AMnjCoC8wjqBzDHkuqpa7jAKwvMo4dC+lr/TFBz+ucfulO1XMpDnwWPGBNwClOKZ8h6xn5N81W/R5OrcKtCbQ==", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.27.4.tgz", + "integrity": "sha512-Y+bO6U+I7ZKaM5G5rDUZiYfUvQPUibYmAFe7EnKdnKBbVXDZxvp+MWOH5gYciY0EPk4EScsuFMQBbEfpdRKSCQ==", + "license": "MIT", "dependencies": { - "@babel/template": "^7.22.15", - "@babel/traverse": "^7.23.7", - "@babel/types": "^7.23.6" + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.3" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/highlight": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", - "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", + "node_modules/@babel/parser": { + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.5.tgz", + "integrity": "sha512-OsQd175SxWkGlzbny8J3K8TnnDD0N3lrIUtB92xwyRpzaenGZhxDvxN/JgU00U3CDZNj9tPuDJ5H0WS4Nt3vKg==", + "license": "MIT", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.20", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0" + "@babel/types": "^7.27.3" }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/highlight/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" + "bin": { + "parser": "bin/babel-parser.js" }, "engines": { - "node": ">=4" + "node": ">=6.0.0" } }, - "node_modules/@babel/highlight/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.27.1.tgz", + "integrity": "sha512-QPG3C9cCVRQLxAVwmefEmwdTanECuUBMQZ/ym5kiw3XKCGA7qkuQLcjWWHcrD/GKbn/WmJwaezfuuAOcyKlRPA==", + "license": "MIT", "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" }, "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/highlight/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/highlight/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/highlight/node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@babel/highlight/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/highlight/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/@babel/plugin-bugfix-safari-class-field-initializer-scope": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.27.1.tgz", + "integrity": "sha512-qNeq3bCKnGgLkEXUuFry6dPlGfCdQNZbn7yUAPCInwAJHMU7THJfrBSozkcWq5sNM6RcF3S8XyQL2A52KNR9IA==", + "license": "MIT", "dependencies": { - "has-flag": "^3.0.0" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/parser": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.6.tgz", - "integrity": "sha512-Z2uID7YJ7oNvAI20O9X0bblw7Qqs8Q2hFy0R9tAfnfLkp5MW0UH9eUvnDSnFwKZ0AvgS1ucqR4KzvVHgnke1VQ==", - "bin": { - "parser": "bin/babel-parser.js" + "node": ">=6.9.0" }, - "engines": { - "node": ">=6.0.0" + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.23.3.tgz", - "integrity": "sha512-iRkKcCqb7iGnq9+3G6rZ+Ciz5VywC4XNRHe57lKM+jOeYAoR0lVqdeeDRfh0tQcTfw/+vBhHn926FmQhLtlFLQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.27.1.tgz", + "integrity": "sha512-g4L7OYun04N1WyqMNjldFwlfPCLVkgB54A/YCXICZYBsvJJE3kByKv9c9+R/nAfmIfjl2rKYLNyMHboYbZaWaA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -814,13 +670,14 @@ } }, "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.23.3.tgz", - "integrity": "sha512-WwlxbfMNdVEpQjZmK5mhm7oSwD3dS6eU+Iwsi4Knl9wAletWem7kaRsGOG+8UEbRyqxY4SS5zvtfXwX+jMxUwQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.27.1.tgz", + "integrity": "sha512-oO02gcONcD5O1iTLi/6frMJBIwWEHceWGSGqrpCmEL8nogiS6J9PBlE48CaK20/Jx1LuRml9aDftLgdjXT8+Cw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/plugin-transform-optional-chaining": "^7.23.3" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-transform-optional-chaining": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -830,12 +687,13 @@ } }, "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.23.7.tgz", - "integrity": "sha512-LlRT7HgaifEpQA1ZgLVOIJZZFVPWN5iReq/7/JixwBtwcoeVGDBD53ZV28rrsLYOZs1Y/EHhA8N/Z6aazHR8cw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.27.1.tgz", + "integrity": "sha512-6BpaYGDavZqkI6yT+KSPdpZFfpnd68UKXbcjI9pJ13pvHhPrCKWOOLp+ysvMeA+DxnhuPpgIaRpxRxo5A9t5jw==", + "license": "MIT", "dependencies": { - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -855,10 +713,10 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", "dependencies": { "@babel/helper-plugin-utils": "^7.8.0" }, @@ -866,23 +724,28 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.27.1.tgz", + "integrity": "sha512-UT/Jrhw57xg4ILHLFnzFpPDlMbcdEicaAtjPQpbj9wa8T4r5KVWCimHcL/460g8Ht0DMxDyjsLgiWSkVjnwPFg==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.12.13" + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz", + "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -891,34 +754,28 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz", + "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" + "@babel/helper-plugin-utils": "^7.27.1" }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.8.3" + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.23.3.tgz", - "integrity": "sha512-lPgDSU+SJLK3xmFDTV2ZRQAiM7UuUjGidwBywFavObCiZc1BeAAcMtHJKUya92hPHO+at63JJPLygilZard8jw==", + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz", + "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -927,48 +784,62 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-import-attributes": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.23.3.tgz", - "integrity": "sha512-pawnE0P9g10xgoP7yKr6CK63K2FMsTE+FZidZO/1PwRdzmAPVs+HS1mAURUsgaoxammTJvULUdIkEK0gOcU2tA==", + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/plugin-syntax-import-meta": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", - "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.27.1.tgz", + "integrity": "sha512-8Z4TGic6xW70FKThA5HYEKKyBpOOsucTOD1DjU3fZxDg+K3zBJcXMFnt/4yQiZnf5+MiOMSXQ9PaEK/Ilh1DeA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.27.1.tgz", + "integrity": "sha512-eST9RrwlpaoJBDHShc+DS2SG4ATTi2MYNb4OxYkf3n+7eb49LWpnS+HSpVfW4x927qQwgk8A2hGNVaajAEw0EA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-remap-async-to-generator": "^7.27.1", + "@babel/traverse": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz", - "integrity": "sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg==", + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.27.1.tgz", + "integrity": "sha512-NREkZsZVJS4xmTr8qzE5y8AfIPqsdQfRuUiLRTEzb7Qii8iFWCyDKaUV2c0rCuh4ljDZ98ALHP/PetiBV2nddA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-remap-async-to-generator": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -977,78 +848,111 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.27.1.tgz", + "integrity": "sha512-cnqkuOtZLapWYZUYM5rVIdv1nXYuFVIltZ6ZJ7nIj585QsjKM5dhL2Fu/lICXZ1OyIAFc7Qy+bvDAtTXqGrlhg==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.27.5.tgz", + "integrity": "sha512-JF6uE2s67f0y2RZcm2kpAUEbD50vH62TyWVebxwHAlbSdM49VqPz8t4a1uIjp4NIOIZ4xzLfjY5emt/RCyC7TQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.27.1.tgz", + "integrity": "sha512-D0VcalChDMtuRvJIu3U/fwWjf8ZMykz5iZsg77Nuj821vCKI3zCyRLwRdWbsuJ/uRwZhZ002QtCqIkwC/ZkvbA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.10.4" + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.27.1.tgz", + "integrity": "sha512-s734HmYU78MVzZ++joYM+NkJusItbdRcbm+AGRgJCt3iA+yux0QpD9cBVdz3tKyrjVYWRl7j0mHSmv4lhV0aoA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.12.0" } }, - "node_modules/@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "node_modules/@babel/plugin-transform-classes": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.27.1.tgz", + "integrity": "sha512-7iLhfFAubmpeJe/Wo2TVuDrykh/zlWXLzPNdL0Jqn/Xu8R3QQ8h9ff8FQoISZOsw74/HFqFI7NX63HN7QFIHKA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-compilation-targets": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1", + "@babel/traverse": "^7.27.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.27.1.tgz", + "integrity": "sha512-lj9PGWvMTVksbWiDT2tW68zGS/cyo4AkZ/QTp0sQT0mjPopCmrSkzxeXkznjqBxzDI6TclZhOJbBmbBLjuOZUw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.8.0" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/template": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.27.3.tgz", + "integrity": "sha512-s4Jrok82JpiaIprtY2nHsYmrThKvvwgHwjgd7UMiYhZaN0asdXNLr0y+NjTfkA7SyQE5i2Fb7eawUOZmLvyqOA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1057,12 +961,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.27.1.tgz", + "integrity": "sha512-gEbkDVGRvjj7+T1ivxrfgygpT7GUd4vmODtYpbs0gZATdkX8/iSnOtZSxiZnsgm1YjTgjI6VKBGSJJevkrclzw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.14.5" + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1071,12 +977,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz", - "integrity": "sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ==", + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.27.1.tgz", + "integrity": "sha512-MTyJk98sHvSs+cvZ4nOauwTTG1JeonDjSGvGGUNHreGQns+Mpt6WX/dVzWBHgg+dYZhkC4X+zTDfkTU+Vy9y7Q==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1085,13 +992,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-unicode-sets-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", - "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "node_modules/@babel/plugin-transform-duplicate-named-capturing-groups-regex": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.27.1.tgz", + "integrity": "sha512-hkGcueTEzuhB30B3eJCbCYeCaaEQOmQR0AdvzpD4LoN0GXMWzzGSuRrxR2xTnCrvNbVwK9N6/jQ92GSLfiZWoQ==", + "license": "MIT", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1100,199 +1008,13 @@ "@babel/core": "^7.0.0" } }, - "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.23.3.tgz", - "integrity": "sha512-NzQcQrzaQPkaEwoTm4Mhyl8jI1huEL/WWIEvudjTCMJ9aBZNpsJbMASx7EQECtQQPS/DcnFpo0FIh3LvEO9cxQ==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-generator-functions": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.23.7.tgz", - "integrity": "sha512-PdxEpL71bJp1byMG0va5gwQcXHxuEYC/BgI/e88mGTtohbZN28O5Yit0Plkkm/dBzCF/BxmbNcses1RH1T+urA==", - "dependencies": { - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-remap-async-to-generator": "^7.22.20", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.23.3.tgz", - "integrity": "sha512-A7LFsKi4U4fomjqXJlZg/u0ft/n8/7n7lpffUP/ZULx/DtV9SGlNKZolHH6PE8Xl1ngCc0M11OaeZptXVkfKSw==", - "dependencies": { - "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-remap-async-to-generator": "^7.22.20" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.23.3.tgz", - "integrity": "sha512-vI+0sIaPIO6CNuM9Kk5VmXcMVRiOpDh7w2zZt9GXzmE/9KD70CUEVhvPR/etAeNK/FAEkhxQtXOzVF3EuRL41A==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.4.tgz", - "integrity": "sha512-0QqbP6B6HOh7/8iNR4CQU2Th/bbRtBp4KS9vcaZd1fZ0wSh5Fyssg0UCIHwxh+ka+pNDREbVLQnHCMHKZfPwfw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-properties": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.23.3.tgz", - "integrity": "sha512-uM+AN8yCIjDPccsKGlw271xjJtGii+xQIF/uMPS8H15L12jZTsLfF4o5vNO7d/oUguOyfdikHGc/yi9ge4SGIg==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-class-static-block": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.23.4.tgz", - "integrity": "sha512-nsWu/1M+ggti1SOALj3hfx5FXzAY06fwPJsUZD4/A5e1bWi46VUIWtD+kOX6/IdhXGsXBWllLFDSnqSCdUNydQ==", - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-transform-classes": { - "version": "7.23.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.23.5.tgz", - "integrity": "sha512-jvOTR4nicqYC9yzOHIhXG5emiFEOpappSJAl73SDSEDcybD+Puuze8Tnpb9p9qEyYup24tq891gkaygIFvWDqg==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-compilation-targets": "^7.22.15", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-optimise-call-expression": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.20", - "@babel/helper-split-export-declaration": "^7.22.6", - "globals": "^11.1.0" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.23.3.tgz", - "integrity": "sha512-dTj83UVTLw/+nbiHqQSFdwO9CbTtwq1DsDqm3CUEtDrZNET5rT5E6bIdTlOftDTDLMYxvxHNEYO4B9SLl8SLZw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/template": "^7.22.15" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.3.tgz", - "integrity": "sha512-n225npDqjDIr967cMScVKHXJs7rout1q+tt50inyBCPkyZ8KxeI6d+GIbSBTT/w/9WdlWDOej3V9HE5Lgk57gw==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.23.3.tgz", - "integrity": "sha512-vgnFYDHAKzFaTVp+mneDsIEbnJ2Np/9ng9iviHw3P/KVcgONxpNULEW/51Z/BaFojG2GI2GwwXck5uV1+1NOYQ==", - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.23.3.tgz", - "integrity": "sha512-RrqQ+BQmU3Oyav3J+7/myfvRCq7Tbz+kKLLshUmMwNlDHExbGL7ARhajvoBJEvc+fCguPPu887N+3RRXBVKZUA==", - "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-transform-dynamic-import": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.23.4.tgz", - "integrity": "sha512-V6jIbLhdJK86MaLh4Jpghi8ho5fGzt3imHOBu/x0jlBaPYqDoWz4RDXjmMOfnh+JWNaQleEAByZLV0QzBT4YQQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.27.1.tgz", + "integrity": "sha512-MHzkWQcEmjzzVW9j2q8LGjwGWpG2mjwaaB0BNQwst3FIjqsg8Ct/mIZlvSPJvfi9y2AC8mi/ktxbFVL9pZ1I4A==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1302,12 +1024,12 @@ } }, "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.23.3.tgz", - "integrity": "sha512-5fhCsl1odX96u7ILKHBj4/Y8vipoqwsJMh4csSA8qFfxrZDEA4Ssku2DyNvMJSmZNOEBT750LfFPbtrnTP90BQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.27.1.tgz", + "integrity": "sha512-uspvXnhHvGKf2r4VVtBpeFnuDWsJLQ6MF6lGJLC89jBR1uoVeqM416AZtTuhTezOfgHicpJQmoD5YUakO/YmXQ==", + "license": "MIT", "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1317,12 +1039,12 @@ } }, "node_modules/@babel/plugin-transform-export-namespace-from": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.23.4.tgz", - "integrity": "sha512-GzuSBcKkx62dGzZI1WVgTWvkkz84FZO5TC5T8dl/Tht/rAla6Dg/Mz9Yhypg+ezVACf/rgDuQt3kbWEv7LdUDQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.27.1.tgz", + "integrity": "sha512-tQvHWSZ3/jH2xuq/vZDy0jNn+ZdXJeM8gHvX4lnJmsc3+50yPlWdZXIc5ay+umX+2/tJIqHqiEqcJvxlmIvRvQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1332,12 +1054,13 @@ } }, "node_modules/@babel/plugin-transform-for-of": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.23.6.tgz", - "integrity": "sha512-aYH4ytZ0qSuBbpfhuofbg/e96oQ7U2w1Aw/UQmKT+1l39uEhUPoFS3fHevDc1G0OvewyDudfMKY1OulczHzWIw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.27.1.tgz", + "integrity": "sha512-BfbWFFEJFQzLCQ5N8VocnCtA8J1CLkNTe2Ms2wocj75dd6VpiqS5Z5quTYcUoo4Yq+DN0rtikODccuv7RU81sw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1347,13 +1070,14 @@ } }, "node_modules/@babel/plugin-transform-function-name": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.23.3.tgz", - "integrity": "sha512-I1QXp1LxIvt8yLaib49dRW5Okt7Q4oaxao6tFVKS/anCdEOMtYwWVKoiOA1p34GOWIZjUK0E+zCp7+l1pfQyiw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.27.1.tgz", + "integrity": "sha512-1bQeydJF9Nr1eBCMMbC+hdwmRlsv5XYOMu03YSWFwNs0HsAmtSxxF1fyuYPqemVldVyFmlCU7w8UE14LupUSZQ==", + "license": "MIT", "dependencies": { - "@babel/helper-compilation-targets": "^7.22.15", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-compilation-targets": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/traverse": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1363,12 +1087,12 @@ } }, "node_modules/@babel/plugin-transform-json-strings": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.23.4.tgz", - "integrity": "sha512-81nTOqM1dMwZ/aRXQ59zVubN9wHGqk6UtqRK+/q+ciXmRy8fSolhGVvG09HHRGo4l6fr/c4ZhXUQH0uFW7PZbg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.27.1.tgz", + "integrity": "sha512-6WVLVJiTjqcQauBhn1LkICsR2H+zm62I3h9faTDKt1qP4jn2o72tSvqMwtGFKGTpojce0gJs+76eZ2uCHRZh0Q==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-json-strings": "^7.8.3" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1378,11 +1102,12 @@ } }, "node_modules/@babel/plugin-transform-literals": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.23.3.tgz", - "integrity": "sha512-wZ0PIXRxnwZvl9AYpqNUxpZ5BiTGrYt7kueGQ+N5FiQ7RCOD4cm8iShd6S6ggfVIWaJf2EMk8eRzAh52RfP4rQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.27.1.tgz", + "integrity": "sha512-0HCFSepIpLTkLcsi86GG3mTUzxV5jpmbv97hTETW3yzrAij8aqlD36toB1D0daVFJM8NK6GvKO0gslVQmm+zZA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1392,12 +1117,12 @@ } }, "node_modules/@babel/plugin-transform-logical-assignment-operators": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.23.4.tgz", - "integrity": "sha512-Mc/ALf1rmZTP4JKKEhUwiORU+vcfarFVLfcFiolKUo6sewoxSEgl36ak5t+4WamRsNr6nzjZXQjM35WsU+9vbg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.27.1.tgz", + "integrity": "sha512-SJvDs5dXxiae4FbSL1aBJlG4wvl594N6YEVVn9e3JGulwioy6z3oPjx/sQBO3Y4NwUu5HNix6KJ3wBZoewcdbw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1407,11 +1132,12 @@ } }, "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.23.3.tgz", - "integrity": "sha512-sC3LdDBDi5x96LA+Ytekz2ZPk8i/Ck+DEuDbRAll5rknJ5XRTSaPKEYwomLcs1AA8wg9b3KjIQRsnApj+q51Ag==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.27.1.tgz", + "integrity": "sha512-hqoBX4dcZ1I33jCSWcXrP+1Ku7kdqXf1oeah7ooKOIiAdKQ+uqftgCFNOSzA5AMS2XIHEYeGFg4cKRCdpxzVOQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1421,12 +1147,13 @@ } }, "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.3.tgz", - "integrity": "sha512-vJYQGxeKM4t8hYCKVBlZX/gtIY2I7mRGFNcm85sgXGMTBcoV3QdVtdpbcWEbzbfUIUZKwvgFT82mRvaQIebZzw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.27.1.tgz", + "integrity": "sha512-iCsytMg/N9/oFq6n+gFTvUYDZQOMK5kEdeYxmxt91fcJGycfxVP9CnrxoliM0oumFERba2i8ZtwRUCMhvP1LnA==", + "license": "MIT", "dependencies": { - "@babel/helper-module-transforms": "^7.23.3", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1436,13 +1163,13 @@ } }, "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.3.tgz", - "integrity": "sha512-aVS0F65LKsdNOtcz6FRCpE4OgsP2OFnW46qNxNIX9h3wuzaNcSQsJysuMwqSibC98HPrf2vCgtxKNwS0DAlgcA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.27.1.tgz", + "integrity": "sha512-OJguuwlTYlN0gBZFRPqwOGNWssZjfIUdS7HMYtN8c1KmwpwHFBwTeFZrg9XZa+DFTitWOW5iTAG7tyCUPsCCyw==", + "license": "MIT", "dependencies": { - "@babel/helper-module-transforms": "^7.23.3", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-simple-access": "^7.22.5" + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1452,14 +1179,15 @@ } }, "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.3.tgz", - "integrity": "sha512-ZxyKGTkF9xT9YJuKQRo19ewf3pXpopuYQd8cDXqNzc3mUNbOME0RKMoZxviQk74hwzfQsEe66dE92MaZbdHKNQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.27.1.tgz", + "integrity": "sha512-w5N1XzsRbc0PQStASMksmUeqECuzKuTJer7kFagK8AXgpCMkeDMO5S+aaFb7A51ZYDF7XI34qsTX+fkHiIm5yA==", + "license": "MIT", "dependencies": { - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-module-transforms": "^7.23.3", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.20" + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1", + "@babel/traverse": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1469,12 +1197,13 @@ } }, "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.23.3.tgz", - "integrity": "sha512-zHsy9iXX2nIsCBFPud3jKn1IRPWg3Ing1qOZgeKV39m1ZgIdpJqvlWVeiHBZC6ITRG0MfskhYe9cLgntfSFPIg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.27.1.tgz", + "integrity": "sha512-iQBE/xC5BV1OxJbp6WG7jq9IWiD+xxlZhLrdwpPkTX3ydmXdvoCpyfJN7acaIBZaOqTfr76pgzqBJflNbeRK+w==", + "license": "MIT", "dependencies": { - "@babel/helper-module-transforms": "^7.23.3", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-module-transforms": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1484,12 +1213,13 @@ } }, "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", - "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.27.1.tgz", + "integrity": "sha512-SstR5JYy8ddZvD6MhV0tM/j16Qds4mIpJTOd1Yu9J9pJjH93bxHECF7pgtc28XvkzTD6Pxcm/0Z73Hvk7kb3Ng==", + "license": "MIT", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1499,11 +1229,12 @@ } }, "node_modules/@babel/plugin-transform-new-target": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.23.3.tgz", - "integrity": "sha512-YJ3xKqtJMAT5/TIZnpAR3I+K+WaDowYbN3xyxI8zxx/Gsypwf9B9h0VB+1Nh6ACAAPRS5NSRje0uVv5i79HYGQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.27.1.tgz", + "integrity": "sha512-f6PiYeqXQ05lYq3TIfIDu/MtliKUbNwkGApPUvyo6+tc7uaR4cPjPe7DFPr15Uyycg2lZU6btZ575CuQoYh7MQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1513,12 +1244,12 @@ } }, "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.23.4.tgz", - "integrity": "sha512-jHE9EVVqHKAQx+VePv5LLGHjmHSJR76vawFPTdlxR/LVJPfOEGxREQwQfjuZEOPTwG92X3LINSh3M40Rv4zpVA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.27.1.tgz", + "integrity": "sha512-aGZh6xMo6q9vq1JGcw58lZ1Z0+i0xB2x0XaauNIUXd6O1xXc3RwoWEBlsTQrY4KQ9Jf0s5rgD6SiNkaUdJegTA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1528,12 +1259,12 @@ } }, "node_modules/@babel/plugin-transform-numeric-separator": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.23.4.tgz", - "integrity": "sha512-mps6auzgwjRrwKEZA05cOwuDc9FAzoyFS4ZsG/8F43bTLf/TgkJg7QXOrPO1JO599iA3qgK9MXdMGOEC8O1h6Q==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.27.1.tgz", + "integrity": "sha512-fdPKAcujuvEChxDBJ5c+0BTaS6revLV7CJL08e4m3de8qJfNIuCc2nc7XJYOjBoTMJeqSmwXJ0ypE14RCjLwaw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1543,15 +1274,15 @@ } }, "node_modules/@babel/plugin-transform-object-rest-spread": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.23.4.tgz", - "integrity": "sha512-9x9K1YyeQVw0iOXJlIzwm8ltobIIv7j2iLyP2jIhEbqPRQ7ScNgwQufU2I0Gq11VjyG4gI4yMXt2VFags+1N3g==", + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.27.3.tgz", + "integrity": "sha512-7ZZtznF9g4l2JCImCo5LNKFHB5eXnN39lLtLY5Tg+VkR0jwOt7TBciMckuiQIOIW7L5tkQOCh3bVGYeXgMx52Q==", + "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.23.3", - "@babel/helper-compilation-targets": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.23.3" + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/plugin-transform-destructuring": "^7.27.3", + "@babel/plugin-transform-parameters": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1561,12 +1292,13 @@ } }, "node_modules/@babel/plugin-transform-object-super": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.23.3.tgz", - "integrity": "sha512-BwQ8q0x2JG+3lxCVFohg+KbQM7plfpBwThdW9A6TMtWwLsbDA01Ek2Zb/AgDN39BiZsExm4qrXxjk+P1/fzGrA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.27.1.tgz", + "integrity": "sha512-SFy8S9plRPbIcxlJ8A6mT/CxFdJx/c04JEctz4jf8YZaVS2px34j7NXRrlGlHkN/M2gnpL37ZpGRGVFLd3l8Ng==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-replace-supers": "^7.22.20" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-replace-supers": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1576,12 +1308,12 @@ } }, "node_modules/@babel/plugin-transform-optional-catch-binding": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.23.4.tgz", - "integrity": "sha512-XIq8t0rJPHf6Wvmbn9nFxU6ao4c7WhghTR5WyV8SrJfUFzyxhCm4nhC+iAp3HFhbAKLfYpgzhJ6t4XCtVwqO5A==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.27.1.tgz", + "integrity": "sha512-txEAEKzYrHEX4xSZN4kJ+OfKXFVSWKB2ZxM9dpcE3wT7smwkNmXo5ORRlVzMVdJbD+Q8ILTgSD7959uj+3Dm3Q==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1591,13 +1323,13 @@ } }, "node_modules/@babel/plugin-transform-optional-chaining": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.4.tgz", - "integrity": "sha512-ZU8y5zWOfjM5vZ+asjgAPwDaBjJzgufjES89Rs4Lpq63O300R/kOz30WCLo6BxxX6QVEilwSlpClnG5cZaikTA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.27.1.tgz", + "integrity": "sha512-BQmKPPIuc8EkZgNKsv0X4bPmOoayeu4F1YCwx2/CfmDSXDbp7GnzlUH+/ul5VGfRg1AoFPsrIThlEBj2xb4CAg==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1607,11 +1339,12 @@ } }, "node_modules/@babel/plugin-transform-parameters": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.23.3.tgz", - "integrity": "sha512-09lMt6UsUb3/34BbECKVbVwrT9bO6lILWln237z7sLaWnMsTi7Yc9fhX5DLpkJzAGfaReXI22wP41SZmnAA3Vw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.27.1.tgz", + "integrity": "sha512-018KRk76HWKeZ5l4oTj2zPpSh+NbGdt0st5S6x0pga6HgrjBOJb24mMDHorFopOOd6YHkLgOZ+zaCjZGPO4aKg==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1621,12 +1354,13 @@ } }, "node_modules/@babel/plugin-transform-private-methods": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.23.3.tgz", - "integrity": "sha512-UzqRcRtWsDMTLrRWFvUBDwmw06tCQH9Rl1uAjfh6ijMSmGYQ+fpdB+cnqRC8EMh5tuuxSv0/TejGL+7vyj+50g==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.27.1.tgz", + "integrity": "sha512-10FVt+X55AjRAYI9BrdISN9/AQWHqldOeZDUoLyif1Kn05a56xVBXb8ZouL8pZ9jem8QpXaOt8TS7RHUIS+GPA==", + "license": "MIT", "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1636,14 +1370,14 @@ } }, "node_modules/@babel/plugin-transform-private-property-in-object": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.23.4.tgz", - "integrity": "sha512-9G3K1YqTq3F4Vt88Djx1UZ79PDyj+yKRnUy7cZGSMe+a7jkwD259uKKuUzQlPkGam7R+8RJwh5z4xO27fA1o2A==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.27.1.tgz", + "integrity": "sha512-5J+IhqTi1XPa0DXF83jYOaARrX+41gOewWbkPyjMNRDqgOCqdffGh8L3f/Ek5utaEBZExjSAzcyjmV9SSAWObQ==", + "license": "MIT", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-create-class-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1653,11 +1387,12 @@ } }, "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.23.3.tgz", - "integrity": "sha512-jR3Jn3y7cZp4oEWPFAlRsSWjxKe4PZILGBSd4nis1TsC5qeSpb+nrtihJuDhNI7QHiVbUaiXa0X2RZY3/TI6Nw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.27.1.tgz", + "integrity": "sha512-oThy3BCuCha8kDZ8ZkgOg2exvPYUlprMukKQXI1r1pJ47NCvxfkEy8vK+r/hT9nF0Aa4H1WUPZZjHTFtAhGfmQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1681,11 +1416,12 @@ } }, "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.23.3.tgz", - "integrity": "sha512-GnvhtVfA2OAtzdX58FJxU19rhoGeQzyVndw3GgtdECQvQFXPEZIOVULHVZGAYmOgmqjXpVpfocAbSjh99V/Fqw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.27.1.tgz", + "integrity": "sha512-p9+Vl3yuHPmkirRrg021XiP+EETmPMQTLr6Ayjj85RLNEbb3Eya/4VI0vAdzQG9SEAl2Lnt7fy5lZyMzjYoZQQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1695,15 +1431,16 @@ } }, "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.23.4.tgz", - "integrity": "sha512-5xOpoPguCZCRbo/JeHlloSkTA8Bld1J/E1/kLfD1nsuiW1m8tduTA1ERCgIZokDflX/IBzKcqR3l7VlRgiIfHA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.27.1.tgz", + "integrity": "sha512-2KH4LWGSrJIkVf5tSiBFYuXDAoWRq2MMwgivCf+93dd0GQi8RXLjKA/0EvRnVV5G0hrHczsquXuD01L8s6dmBw==", + "license": "MIT", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-jsx": "^7.23.3", - "@babel/types": "^7.23.4" + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1713,11 +1450,12 @@ } }, "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", - "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.27.1.tgz", + "integrity": "sha512-ykDdF5yI4f1WrAolLqeF3hmYU12j9ntLQl/AOG1HAS21jxyg1Q0/J/tpREuYLfatGdGmXp/3yS0ZA76kOlVq9Q==", + "license": "MIT", "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.22.5" + "@babel/plugin-transform-react-jsx": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1727,12 +1465,13 @@ } }, "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.23.3.tgz", - "integrity": "sha512-qMFdSS+TUhB7Q/3HVPnEdYJDQIk57jkntAwSuz9xfSE4n+3I+vHYCli3HoHawN1Z3RfCz/y1zXA/JXjG6cVImQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.27.1.tgz", + "integrity": "sha512-JfuinvDOsD9FVMTHpzA/pBLisxpv1aSf+OIV8lgH3MuWrks19R27e6a6DipIg4aX1Zm9Wpb04p8wljfKrVSnPA==", + "license": "MIT", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1742,12 +1481,12 @@ } }, "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.23.3.tgz", - "integrity": "sha512-KP+75h0KghBMcVpuKisx3XTu9Ncut8Q8TuvGO4IhY+9D5DFEckQefOuIsB/gQ2tG71lCke4NMrtIPS8pOj18BQ==", + "version": "7.27.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.27.5.tgz", + "integrity": "sha512-uhB8yHerfe3MWnuLAhEbeQ4afVoqv8BQsPqrTv7e/jZ9y00kJL6l9a/f4OWaKxotmjzewfEyXE1vgDJenkQ2/Q==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "regenerator-transform": "^0.15.2" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1756,12 +1495,29 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-transform-regexp-modifiers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.27.1.tgz", + "integrity": "sha512-TtEciroaiODtXvLZv4rmfMhkCv8jx3wgKpL68PuiPh2M4fvz5jhsA7697N1gMvkvr/JTF13DrFYyEbY9U7cVPA==", + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.23.3.tgz", - "integrity": "sha512-QnNTazY54YqgGxwIexMZva9gqbPa15t/x9VS+0fsEFWplwVpXYZivtgl43Z1vMpc1bdPP2PP8siFeVcnFvA3Cg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.27.1.tgz", + "integrity": "sha512-V2ABPHIJX4kC7HegLkYoDpfg9PVmuWy/i6vUM5eGK22bx4YVFD3M5F0QQnWQoDs6AGsUWTVOopBiMFQgHaSkVw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1771,15 +1527,16 @@ } }, "node_modules/@babel/plugin-transform-runtime": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.23.7.tgz", - "integrity": "sha512-fa0hnfmiXc9fq/weK34MUV0drz2pOL/vfKWvN7Qw127hiUPabFCUMgAbYWcchRzMJit4o5ARsK/s+5h0249pLw==", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.27.4.tgz", + "integrity": "sha512-D68nR5zxU64EUzV8i7T3R5XP0Xhrou/amNnddsRQssx6GrTLdZl1rLxyjtVZBd+v/NVX4AbTPOB5aU8thAZV1A==", + "license": "MIT", "dependencies": { - "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", - "babel-plugin-polyfill-corejs2": "^0.4.7", - "babel-plugin-polyfill-corejs3": "^0.8.7", - "babel-plugin-polyfill-regenerator": "^0.5.4", + "@babel/helper-module-imports": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.11.0", + "babel-plugin-polyfill-regenerator": "^0.6.1", "semver": "^6.3.1" }, "engines": { @@ -1798,11 +1555,12 @@ } }, "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.23.3.tgz", - "integrity": "sha512-ED2fgqZLmexWiN+YNFX26fx4gh5qHDhn1O2gvEhreLW2iI63Sqm4llRLCXALKrCnbN4Jy0VcMQZl/SAzqug/jg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.27.1.tgz", + "integrity": "sha512-N/wH1vcn4oYawbJ13Y/FxcQrWk63jhfNa7jef0ih7PHSIHX2LB7GWE1rkPrOnka9kwMxb6hMl19p7lidA+EHmQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1812,12 +1570,13 @@ } }, "node_modules/@babel/plugin-transform-spread": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.23.3.tgz", - "integrity": "sha512-VvfVYlrlBVu+77xVTOAoxQ6mZbnIq5FM0aGBSFEcIh03qHf+zNqA4DC/3XMUozTg7bZV3e3mZQ0i13VB6v5yUg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.27.1.tgz", + "integrity": "sha512-kpb3HUqaILBJcRFVhFUs6Trdd4mkrzcGXss+6/mxUd273PfbWqSDHRzMT2234gIg2QYfAjvXLSquP1xECSg09Q==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1827,11 +1586,12 @@ } }, "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.23.3.tgz", - "integrity": "sha512-HZOyN9g+rtvnOU3Yh7kSxXrKbzgrm5X4GncPY1QOquu7epga5MxKHVpYu2hvQnry/H+JjckSYRb93iNfsioAGg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.27.1.tgz", + "integrity": "sha512-lhInBO5bi/Kowe2/aLdBAawijx+q1pQzicSgnkB6dUPc1+RC8QmJHKf2OjvU+NZWitguJHEaEmbV6VWEouT58g==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1841,11 +1601,12 @@ } }, "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.23.3.tgz", - "integrity": "sha512-Flok06AYNp7GV2oJPZZcP9vZdszev6vPBkHLwxwSpaIqx75wn6mUd3UFWsSsA0l8nXAKkyCmL/sR02m8RYGeHg==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.27.1.tgz", + "integrity": "sha512-fBJKiV7F2DxZUkg5EtHKXQdbsbURW3DZKQUWphDum0uRP6eHGGa/He9mc0mypL680pb+e/lDIthRohlv8NCHkg==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1855,11 +1616,12 @@ } }, "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.23.3.tgz", - "integrity": "sha512-4t15ViVnaFdrPC74be1gXBSMzXk3B4Us9lP7uLRQHTFpV5Dvt33pn+2MyyNxmN3VTTm3oTrZVMUmuw3oBnQ2oQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.1.tgz", + "integrity": "sha512-RiSILC+nRJM7FY5srIyc4/fGIwUhyDuuBSdWn4y6yT6gm652DpCHZjIipgn6B7MQ1ITOUnAKWixEUjQRIBIcLw==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1869,14 +1631,16 @@ } }, "node_modules/@babel/plugin-transform-typescript": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.23.6.tgz", - "integrity": "sha512-6cBG5mBvUu4VUD04OHKnYzbuHNP8huDsD3EDqqpIpsswTDoqHCjLoHb6+QgsV1WsT2nipRqCPgxD3LXnEO7XfA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.27.1.tgz", + "integrity": "sha512-Q5sT5+O4QUebHdbwKedFBEwRLb02zJ7r4A5Gg2hUoLuU3FjdMcyqcywqUrLCaDsFCxzokf7u9kuy7qz51YUuAg==", + "license": "MIT", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.22.5", - "@babel/helper-create-class-features-plugin": "^7.23.6", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/plugin-syntax-typescript": "^7.23.3" + "@babel/helper-annotate-as-pure": "^7.27.1", + "@babel/helper-create-class-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1886,11 +1650,12 @@ } }, "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.23.3.tgz", - "integrity": "sha512-OMCUx/bU6ChE3r4+ZdylEqAjaQgHAgipgW8nsCfu5pGqDcFytVd91AwRvUJSBZDz0exPGgnjoqhgRYLRjFZc9Q==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.27.1.tgz", + "integrity": "sha512-Ysg4v6AmF26k9vpfFuTZg8HRfVWzsh1kVfowA23y9j/Gu6dOuahdUVhkLqpObp3JIv27MLSii6noRnuKN8H0Mg==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1900,12 +1665,13 @@ } }, "node_modules/@babel/plugin-transform-unicode-property-regex": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.23.3.tgz", - "integrity": "sha512-KcLIm+pDZkWZQAFJ9pdfmh89EwVfmNovFBcXko8szpBeF8z68kWIPeKlmSOkT9BXJxs2C0uk+5LxoxIv62MROA==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.27.1.tgz", + "integrity": "sha512-uW20S39PnaTImxp39O5qFlHLS9LJEmANjMG7SxIhap8rCHqu0Ik+tLEPX5DKmHn6CsWQ7j3lix2tFOa5YtL12Q==", + "license": "MIT", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1915,12 +1681,13 @@ } }, "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.23.3.tgz", - "integrity": "sha512-wMHpNA4x2cIA32b/ci3AfwNgheiva2W0WUKWTK7vBHBhDKfPsc5cFGNWm69WBqpwd86u1qwZ9PWevKqm1A3yAw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.27.1.tgz", + "integrity": "sha512-xvINq24TRojDuyt6JGtHmkVkrfVV3FPT16uytxImLeBZqW3/H52yN+kM1MGuyPkIQxrzKwPHs5U/MP3qKyzkGw==", + "license": "MIT", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1930,12 +1697,13 @@ } }, "node_modules/@babel/plugin-transform-unicode-sets-regex": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.23.3.tgz", - "integrity": "sha512-W7lliA/v9bNR83Qc3q1ip9CQMZ09CcHDbHfbLRDNuAhn1Mvkr1ZNF7hPmztMQvtTGVLJ9m8IZqWsTkXOml8dbw==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.27.1.tgz", + "integrity": "sha512-EtkOujbc4cgvb0mlpQefi4NTPBzhSIevblFevACNLUspmrALgmEBdL/XfnyyITfd8fKBZrZys92zOWcik7j9Tw==", + "license": "MIT", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5" + "@babel/helper-create-regexp-features-plugin": "^7.27.1", + "@babel/helper-plugin-utils": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -1945,89 +1713,79 @@ } }, "node_modules/@babel/preset-env": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.23.7.tgz", - "integrity": "sha512-SY27X/GtTz/L4UryMNJ6p4fH4nsgWbz84y9FE0bQeWJP6O5BhgVCt53CotQKHCOeXJel8VyhlhujhlltKms/CA==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.27.2.tgz", + "integrity": "sha512-Ma4zSuYSlGNRlCLO+EAzLnCmJK2vdstgv+n7aUP+/IKZrOfWHOJVdSJtuub8RzHTj3ahD37k5OKJWvzf16TQyQ==", + "license": "MIT", "dependencies": { - "@babel/compat-data": "^7.23.5", - "@babel/helper-compilation-targets": "^7.23.6", - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.23.5", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.23.3", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.23.3", - "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.23.7", + "@babel/compat-data": "^7.27.2", + "@babel/helper-compilation-targets": "^7.27.2", + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.27.1", + "@babel/plugin-bugfix-safari-class-field-initializer-scope": "^7.27.1", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.27.1", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.27.1", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.27.1", "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.23.3", - "@babel/plugin-syntax-import-attributes": "^7.23.3", - "@babel/plugin-syntax-import-meta": "^7.10.4", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-import-assertions": "^7.27.1", + "@babel/plugin-syntax-import-attributes": "^7.27.1", "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", - "@babel/plugin-transform-arrow-functions": "^7.23.3", - "@babel/plugin-transform-async-generator-functions": "^7.23.7", - "@babel/plugin-transform-async-to-generator": "^7.23.3", - "@babel/plugin-transform-block-scoped-functions": "^7.23.3", - "@babel/plugin-transform-block-scoping": "^7.23.4", - "@babel/plugin-transform-class-properties": "^7.23.3", - "@babel/plugin-transform-class-static-block": "^7.23.4", - "@babel/plugin-transform-classes": "^7.23.5", - "@babel/plugin-transform-computed-properties": "^7.23.3", - "@babel/plugin-transform-destructuring": "^7.23.3", - "@babel/plugin-transform-dotall-regex": "^7.23.3", - "@babel/plugin-transform-duplicate-keys": "^7.23.3", - "@babel/plugin-transform-dynamic-import": "^7.23.4", - "@babel/plugin-transform-exponentiation-operator": "^7.23.3", - "@babel/plugin-transform-export-namespace-from": "^7.23.4", - "@babel/plugin-transform-for-of": "^7.23.6", - "@babel/plugin-transform-function-name": "^7.23.3", - "@babel/plugin-transform-json-strings": "^7.23.4", - "@babel/plugin-transform-literals": "^7.23.3", - "@babel/plugin-transform-logical-assignment-operators": "^7.23.4", - "@babel/plugin-transform-member-expression-literals": "^7.23.3", - "@babel/plugin-transform-modules-amd": "^7.23.3", - "@babel/plugin-transform-modules-commonjs": "^7.23.3", - "@babel/plugin-transform-modules-systemjs": "^7.23.3", - "@babel/plugin-transform-modules-umd": "^7.23.3", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", - "@babel/plugin-transform-new-target": "^7.23.3", - "@babel/plugin-transform-nullish-coalescing-operator": "^7.23.4", - "@babel/plugin-transform-numeric-separator": "^7.23.4", - "@babel/plugin-transform-object-rest-spread": "^7.23.4", - "@babel/plugin-transform-object-super": "^7.23.3", - "@babel/plugin-transform-optional-catch-binding": "^7.23.4", - "@babel/plugin-transform-optional-chaining": "^7.23.4", - "@babel/plugin-transform-parameters": "^7.23.3", - "@babel/plugin-transform-private-methods": "^7.23.3", - "@babel/plugin-transform-private-property-in-object": "^7.23.4", - "@babel/plugin-transform-property-literals": "^7.23.3", - "@babel/plugin-transform-regenerator": "^7.23.3", - "@babel/plugin-transform-reserved-words": "^7.23.3", - "@babel/plugin-transform-shorthand-properties": "^7.23.3", - "@babel/plugin-transform-spread": "^7.23.3", - "@babel/plugin-transform-sticky-regex": "^7.23.3", - "@babel/plugin-transform-template-literals": "^7.23.3", - "@babel/plugin-transform-typeof-symbol": "^7.23.3", - "@babel/plugin-transform-unicode-escapes": "^7.23.3", - "@babel/plugin-transform-unicode-property-regex": "^7.23.3", - "@babel/plugin-transform-unicode-regex": "^7.23.3", - "@babel/plugin-transform-unicode-sets-regex": "^7.23.3", + "@babel/plugin-transform-arrow-functions": "^7.27.1", + "@babel/plugin-transform-async-generator-functions": "^7.27.1", + "@babel/plugin-transform-async-to-generator": "^7.27.1", + "@babel/plugin-transform-block-scoped-functions": "^7.27.1", + "@babel/plugin-transform-block-scoping": "^7.27.1", + "@babel/plugin-transform-class-properties": "^7.27.1", + "@babel/plugin-transform-class-static-block": "^7.27.1", + "@babel/plugin-transform-classes": "^7.27.1", + "@babel/plugin-transform-computed-properties": "^7.27.1", + "@babel/plugin-transform-destructuring": "^7.27.1", + "@babel/plugin-transform-dotall-regex": "^7.27.1", + "@babel/plugin-transform-duplicate-keys": "^7.27.1", + "@babel/plugin-transform-duplicate-named-capturing-groups-regex": "^7.27.1", + "@babel/plugin-transform-dynamic-import": "^7.27.1", + "@babel/plugin-transform-exponentiation-operator": "^7.27.1", + "@babel/plugin-transform-export-namespace-from": "^7.27.1", + "@babel/plugin-transform-for-of": "^7.27.1", + "@babel/plugin-transform-function-name": "^7.27.1", + "@babel/plugin-transform-json-strings": "^7.27.1", + "@babel/plugin-transform-literals": "^7.27.1", + "@babel/plugin-transform-logical-assignment-operators": "^7.27.1", + "@babel/plugin-transform-member-expression-literals": "^7.27.1", + "@babel/plugin-transform-modules-amd": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-modules-systemjs": "^7.27.1", + "@babel/plugin-transform-modules-umd": "^7.27.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.27.1", + "@babel/plugin-transform-new-target": "^7.27.1", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.27.1", + "@babel/plugin-transform-numeric-separator": "^7.27.1", + "@babel/plugin-transform-object-rest-spread": "^7.27.2", + "@babel/plugin-transform-object-super": "^7.27.1", + "@babel/plugin-transform-optional-catch-binding": "^7.27.1", + "@babel/plugin-transform-optional-chaining": "^7.27.1", + "@babel/plugin-transform-parameters": "^7.27.1", + "@babel/plugin-transform-private-methods": "^7.27.1", + "@babel/plugin-transform-private-property-in-object": "^7.27.1", + "@babel/plugin-transform-property-literals": "^7.27.1", + "@babel/plugin-transform-regenerator": "^7.27.1", + "@babel/plugin-transform-regexp-modifiers": "^7.27.1", + "@babel/plugin-transform-reserved-words": "^7.27.1", + "@babel/plugin-transform-shorthand-properties": "^7.27.1", + "@babel/plugin-transform-spread": "^7.27.1", + "@babel/plugin-transform-sticky-regex": "^7.27.1", + "@babel/plugin-transform-template-literals": "^7.27.1", + "@babel/plugin-transform-typeof-symbol": "^7.27.1", + "@babel/plugin-transform-unicode-escapes": "^7.27.1", + "@babel/plugin-transform-unicode-property-regex": "^7.27.1", + "@babel/plugin-transform-unicode-regex": "^7.27.1", + "@babel/plugin-transform-unicode-sets-regex": "^7.27.1", "@babel/preset-modules": "0.1.6-no-external-plugins", - "babel-plugin-polyfill-corejs2": "^0.4.7", - "babel-plugin-polyfill-corejs3": "^0.8.7", - "babel-plugin-polyfill-regenerator": "^0.5.4", - "core-js-compat": "^3.31.0", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.11.0", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.40.0", "semver": "^6.3.1" }, "engines": { @@ -2059,16 +1817,17 @@ } }, "node_modules/@babel/preset-react": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.23.3.tgz", - "integrity": "sha512-tbkHOS9axH6Ysf2OUEqoSZ6T3Fa2SrNH6WTWSPBboxKzdxNc9qOICeLXkNG0ZEwbQ1HY8liwOce4aN/Ceyuq6w==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.27.1.tgz", + "integrity": "sha512-oJHWh2gLhU9dW9HHr42q0cI0/iHHXTLGe39qvpAZZzagHy0MzYLCnCVV0symeRvzmjHyVU7mw2K06E6u/JwbhA==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.15", - "@babel/plugin-transform-react-display-name": "^7.23.3", - "@babel/plugin-transform-react-jsx": "^7.22.15", - "@babel/plugin-transform-react-jsx-development": "^7.22.5", - "@babel/plugin-transform-react-pure-annotations": "^7.23.3" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-transform-react-display-name": "^7.27.1", + "@babel/plugin-transform-react-jsx": "^7.27.1", + "@babel/plugin-transform-react-jsx-development": "^7.27.1", + "@babel/plugin-transform-react-pure-annotations": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -2078,15 +1837,16 @@ } }, "node_modules/@babel/preset-typescript": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.23.3.tgz", - "integrity": "sha512-17oIGVlqz6CchO9RFYn5U6ZpWRZIngayYCtrPRSgANSwC2V1Jb+iP74nVxzzXJte8b8BYxrL1yY96xfhTBrNNQ==", + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.27.1.tgz", + "integrity": "sha512-l7WfQfX0WK4M0v2RudjuQK4u99BS6yLHYEmdtVPP7lKV013zr9DygFuWNlnbvQ9LR+LS0Egz/XAvGx5U9MX0fQ==", + "license": "MIT", "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.15", - "@babel/plugin-syntax-jsx": "^7.23.3", - "@babel/plugin-transform-modules-commonjs": "^7.23.3", - "@babel/plugin-transform-typescript": "^7.23.3" + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-typescript": "^7.27.1" }, "engines": { "node": ">=6.9.0" @@ -2095,60 +1855,52 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==" - }, "node_modules/@babel/runtime": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.7.tgz", - "integrity": "sha512-w06OXVOFso7LcbzMiDGt+3X7Rh7Ho8MmgPoWU3rarH+8upf+wSU/grlGbWzQyr3DkdN6ZeuMFjpdwW0Q+HxobA==", - "dependencies": { - "regenerator-runtime": "^0.14.0" - }, + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.4.tgz", + "integrity": "sha512-t3yaEOuGu9NlIZ+hIeGbBjFtZT7j2cb2tg0fuaJKeGotchRjjLfrBA9Kwf8quhpP1EUuxModQg04q/mBwyg8uA==", + "license": "MIT", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/runtime-corejs3": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.7.tgz", - "integrity": "sha512-ER55qzLREVA5YxeyQ3Qu48tgsF2ZrFjFjUS6V6wF0cikSw+goBJgB9PBRM1T6+Ah4iiM+sxmfS/Sy/jdzFfhiQ==", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.27.4.tgz", + "integrity": "sha512-H7QhL0ucCGOObsUETNbB2PuzF4gAvN8p32P6r91bX7M/hk4bx+3yz2hTwHL9d/Efzwu1upeb4/cd7oSxCzup3w==", + "license": "MIT", "dependencies": { - "core-js-pure": "^3.30.2", - "regenerator-runtime": "^0.14.0" + "core-js-pure": "^3.30.2" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/template": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", - "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.23.7", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.7.tgz", - "integrity": "sha512-tY3mM8rH9jM0YHFGyfC0/xf+SB5eKUu7HPj7/k3fpi9dAlsMc5YbQvDi0Sh2QTPXqMhyaAtzAr807TIyfQrmyg==", - "dependencies": { - "@babel/code-frame": "^7.23.5", - "@babel/generator": "^7.23.6", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.6", - "@babel/types": "^7.23.6", + "version": "7.27.4", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.27.4.tgz", + "integrity": "sha512-oNcu2QbHqts9BtOWJosOVJapWjBDSxGCpFvikNR5TGDYDQf3JwpIoMzIKrvfoti93cLfPJEG4tH9SPVeyCGgdA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.27.3", + "@babel/parser": "^7.27.4", + "@babel/template": "^7.27.2", + "@babel/types": "^7.27.3", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -2157,18 +1909,24 @@ } }, "node_modules/@babel/types": { - "version": "7.23.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.6.tgz", - "integrity": "sha512-+uarb83brBzPKN38NX1MkB6vb6+mwvR6amUulqAE7ccQw1pEl+bCia9TbdG1lsnFP7lZySvUn37CHyXQdfTwzg==", + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.3.tgz", + "integrity": "sha512-Y1GkI4ktrtvmawoSq+4FCVHNryea6uR+qUQy0AGxLSsjCX0nVmkYQMBLHDkXZuo5hGx7eYdnIaslsdBFm7zbUw==", + "license": "MIT", "dependencies": { - "@babel/helper-string-parser": "^7.23.4", - "@babel/helper-validator-identifier": "^7.22.20", - "to-fast-properties": "^2.0.0" + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.27.1" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@braintree/sanitize-url": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", + "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==", + "license": "MIT" + }, "node_modules/@colors/colors": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", @@ -2707,6 +2465,28 @@ "react-dom": "^18.0.0" } }, + "node_modules/@docusaurus/theme-mermaid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-mermaid/-/theme-mermaid-3.4.0.tgz", + "integrity": "sha512-3w5QW0HEZ2O6x2w6lU3ZvOe1gNXP2HIoKDMJBil1VmLBc9PmpAG17VmfhI/p3L2etNmOiVs5GgniUqvn8AFEGQ==", + "license": "MIT", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/module-type-aliases": "3.4.0", + "@docusaurus/theme-common": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", + "mermaid": "^10.4.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, "node_modules/@docusaurus/theme-search-algolia": { "version": "3.4.0", "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.4.0.tgz", @@ -3019,13 +2799,14 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -3040,9 +2821,10 @@ } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", "engines": { "node": ">=6.0.0" } @@ -3062,9 +2844,10 @@ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.20", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz", - "integrity": "sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -3795,6 +3578,27 @@ "@types/node": "*" } }, + "node_modules/@types/d3-scale": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.9.tgz", + "integrity": "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==", + "license": "MIT", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==", + "license": "MIT" + }, + "node_modules/@types/d3-time": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.4.tgz", + "integrity": "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==", + "license": "MIT" + }, "node_modules/@types/debug": { "version": "4.1.12", "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", @@ -3822,9 +3626,10 @@ } }, "node_modules/@types/estree": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", - "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz", + "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", + "license": "MIT" }, "node_modules/@types/estree-jsx": { "version": "1.0.3", @@ -4111,145 +3916,162 @@ "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" }, "node_modules/@webassemblyjs/ast": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.11.6.tgz", - "integrity": "sha512-IN1xI7PwOvLPgjcf180gC1bqn3q/QaOCwYUahIOhbYUu8KA/3tw2RT/T0Gidi1l7Hhj5D/INhJxiICObqpMu4Q==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "license": "MIT", "dependencies": { - "@webassemblyjs/helper-numbers": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6" + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" } }, "node_modules/@webassemblyjs/floating-point-hex-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz", - "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-api-error": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz", - "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-buffer": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.6.tgz", - "integrity": "sha512-z3nFzdcp1mb8nEOFFk8DrYLpHvhKC3grJD2ardfKOzmbmJvEf/tPIqCY+sNcwZIY8ZD7IkB2l7/pqhUhqm7hLA==" + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-numbers": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz", - "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "license": "MIT", "dependencies": { - "@webassemblyjs/floating-point-hex-parser": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/helper-wasm-bytecode": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz", - "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "license": "MIT" }, "node_modules/@webassemblyjs/helper-wasm-section": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.6.tgz", - "integrity": "sha512-LPpZbSOwTpEC2cgn4hTydySy1Ke+XEu+ETXuoyvuyezHO3Kjdu90KK95Sh9xTbmjrCsUwvWwCOQQNta37VrS9g==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" } }, "node_modules/@webassemblyjs/ieee754": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz", - "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "license": "MIT", "dependencies": { "@xtuc/ieee754": "^1.2.0" } }, "node_modules/@webassemblyjs/leb128": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz", - "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "license": "Apache-2.0", "dependencies": { "@xtuc/long": "4.2.2" } }, "node_modules/@webassemblyjs/utf8": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz", - "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", + "license": "MIT" }, "node_modules/@webassemblyjs/wasm-edit": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.6.tgz", - "integrity": "sha512-Ybn2I6fnfIGuCR+Faaz7YcvtBKxvoLV3Lebn1tM4o/IAJzmi9AWYIPWpyBfU8cC+JxAO57bk4+zdsTjJR+VTOw==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/helper-wasm-section": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-opt": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6", - "@webassemblyjs/wast-printer": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-gen": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.6.tgz", - "integrity": "sha512-3XOqkZP/y6B4F0PBAXvI1/bky7GryoogUtfwExeP/v7Nzwo1QLcq5oQmpKlftZLbT+ERUOAZVQjuNVak6UXjPA==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wasm-opt": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.6.tgz", - "integrity": "sha512-cOrKuLRE7PCe6AsOVl7WasYf3wbSo4CeOk6PkrjS7g57MFfVUF9u6ysQBBODX0LdgSvQqRiGz3CXvIDKcPNy4g==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-buffer": "1.11.6", - "@webassemblyjs/wasm-gen": "1.11.6", - "@webassemblyjs/wasm-parser": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" } }, "node_modules/@webassemblyjs/wasm-parser": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.6.tgz", - "integrity": "sha512-6ZwPeGzMJM3Dqp3hCsLgESxBGtT/OeCvCZ4TA1JUPYgmhAx38tTPR9JaKy0S5H3evQpO/h2uWs2j6Yc/fjkpTQ==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", - "@webassemblyjs/helper-api-error": "1.11.6", - "@webassemblyjs/helper-wasm-bytecode": "1.11.6", - "@webassemblyjs/ieee754": "1.11.6", - "@webassemblyjs/leb128": "1.11.6", - "@webassemblyjs/utf8": "1.11.6" + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" } }, "node_modules/@webassemblyjs/wast-printer": { - "version": "1.11.6", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.11.6.tgz", - "integrity": "sha512-JM7AhRcE+yW2GWYaKeHL5vt4xqee5N2WcezptmgyhNS+ScggqcT1OtXykhAb13Sn5Yas0j2uv9tHgrjwvzAP4A==", + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "license": "MIT", "dependencies": { - "@webassemblyjs/ast": "1.11.6", + "@webassemblyjs/ast": "1.14.1", "@xtuc/long": "4.2.2" } }, "node_modules/@xtuc/ieee754": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz", - "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==" + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "license": "BSD-3-Clause" }, "node_modules/@xtuc/long": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz", - "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==" + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "license": "Apache-2.0" }, "node_modules/accepts": { "version": "1.3.8", @@ -4283,9 +4105,10 @@ } }, "node_modules/acorn": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", - "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "license": "MIT", "bin": { "acorn": "bin/acorn" }, @@ -4293,14 +4116,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-assertions": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", - "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -4530,9 +4345,9 @@ } }, "node_modules/autoprefixer": { - "version": "10.4.19", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.19.tgz", - "integrity": "sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==", + "version": "10.4.21", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz", + "integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==", "funding": [ { "type": "opencollective", @@ -4547,12 +4362,13 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "browserslist": "^4.23.0", - "caniuse-lite": "^1.0.30001599", + "browserslist": "^4.24.4", + "caniuse-lite": "^1.0.30001702", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", + "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "bin": { @@ -4566,9 +4382,10 @@ } }, "node_modules/babel-loader": { - "version": "9.1.3", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.1.3.tgz", - "integrity": "sha512-xG3ST4DglodGf8qSwv0MdeWLhrDsw/32QMdTO5T1ZIp9gQur0HkCyFs7Awskr10JKXFXwpAhiCuYX5oGXnRGbw==", + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-9.2.1.tgz", + "integrity": "sha512-fqe8naHt46e0yIdkjUZYqddSXfej3AHajX+CSO5X7oy0EmPc6o5Xh+RClNoHjnieWz9AW4kZxW9yyFMhVB1QLA==", + "license": "MIT", "dependencies": { "find-cache-dir": "^4.0.0", "schema-utils": "^4.0.0" @@ -4590,12 +4407,13 @@ } }, "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.4.7", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.7.tgz", - "integrity": "sha512-LidDk/tEGDfuHW2DWh/Hgo4rmnw3cduK6ZkOI1NPFceSK3n/yAGeOsNT7FLnSGHkXj3RHGSEVkN3FsCTY6w2CQ==", + "version": "0.4.13", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.13.tgz", + "integrity": "sha512-3sX/eOms8kd3q2KZ6DAhKPc0dgm525Gqq5NtWKZ7QYYZEv57OQ54KtblzJzH1lQF/eQxO8KjWGIK9IPUJNus5g==", + "license": "MIT", "dependencies": { "@babel/compat-data": "^7.22.6", - "@babel/helper-define-polyfill-provider": "^0.4.4", + "@babel/helper-define-polyfill-provider": "^0.6.4", "semver": "^6.3.1" }, "peerDependencies": { @@ -4606,28 +4424,31 @@ "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", "bin": { "semver": "bin/semver.js" } }, "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.8.7", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.7.tgz", - "integrity": "sha512-KyDvZYxAzkC0Aj2dAPyDzi2Ym15e5JKZSK+maI7NAwSqofvuFglbSsxE7wUOvTg9oFVnHMzVzBKcqEb4PJgtOA==", + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.11.1.tgz", + "integrity": "sha512-yGCqvBT4rwMczo28xkH/noxJ6MZ4nJfkVYdoDaC/utLtWrXxv27HVrzAeSbqR8SxDsp46n0YF47EbHoixy6rXQ==", + "license": "MIT", "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.4.4", - "core-js-compat": "^3.33.1" + "@babel/helper-define-polyfill-provider": "^0.6.3", + "core-js-compat": "^3.40.0" }, "peerDependencies": { "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.4.tgz", - "integrity": "sha512-S/x2iOCvDaCASLYsOOgWOq4bCfKYVqvO/uxjkaYyZ3rVsVE3CeAI/c84NpyuBBymEgNvHgjEot3a9/Z/kXvqsg==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.4.tgz", + "integrity": "sha512-7gD3pRadPrbjhjLyxebmx/WrFYcuSjZ0XbdUujQMZ/fcE9oeewk2U/7PCvez84UeuK3oSjmPZ0Ch0dlupQvGzw==", + "license": "MIT", "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.4.4" + "@babel/helper-define-polyfill-provider": "^0.6.4" }, "peerDependencies": { "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" @@ -4669,20 +4490,21 @@ } }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", - "raw-body": "2.5.1", + "qs": "6.13.0", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -4695,6 +4517,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -4703,6 +4526,7 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } @@ -4710,7 +4534,8 @@ "node_modules/body-parser/node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" }, "node_modules/bonjour-service": { "version": "1.1.1", @@ -4759,20 +4584,21 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "version": "4.25.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.0.tgz", + "integrity": "sha512-PJ8gYKeS5e/whHBh8xrwYK+dAvEj7JXtz6uTucnMRB8OiGTsKccFekoRrjajPBHV8oOY+2tI4uxeceSimKwMFA==", "funding": [ { "type": "opencollective", @@ -4787,11 +4613,12 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "caniuse-lite": "^1.0.30001718", + "electron-to-chromium": "^1.5.160", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.3" }, "bin": { "browserslist": "cli.js" @@ -4862,6 +4689,35 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -4910,9 +4766,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001617", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001617.tgz", - "integrity": "sha512-mLyjzNI9I+Pix8zwcrpxEbGlfqOkF9kM3ptzmKNw5tizSyYwMe+nGLTqMK9cO+0E+Bh6TsBxNAaHWEM8xwSsmA==", + "version": "1.0.30001721", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001721.tgz", + "integrity": "sha512-cOuvmUVtKrtEaoKiO0rSc29jcjwMwX5tOHDy4MgVFEWiUXj4uBMJkwI8MDySkgXidpMiHUcviogAvFi4pA2hDQ==", "funding": [ { "type": "opencollective", @@ -4926,7 +4782,8 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ] + ], + "license": "CC-BY-4.0" }, "node_modules/ccount": { "version": "2.0.1", @@ -5215,7 +5072,8 @@ "node_modules/colorette": { "version": "2.0.20", "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", - "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==" + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "license": "MIT" }, "node_modules/combine-promises": { "version": "1.2.0", @@ -5358,6 +5216,7 @@ "version": "1.0.5", "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -5368,9 +5227,10 @@ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" }, "node_modules/cookie": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", - "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", + "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -5466,11 +5326,12 @@ } }, "node_modules/core-js-compat": { - "version": "3.35.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.35.0.tgz", - "integrity": "sha512-5blwFAddknKeNgsjBzilkdQ0+YK8L1PfqPYq40NOYMYFSS38qj+hpTcLLWwpIwA2A5bje/x5jmVn2tzUMg9IVw==", + "version": "3.42.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.42.0.tgz", + "integrity": "sha512-bQasjMfyDGyaeWKBIu33lHh9qlSR0MFE/Nmc6nMjf/iU9b3rSMdAYz1Baxrv4lPdGUsTqZudHA4jIGSJy0SWZQ==", + "license": "MIT", "dependencies": { - "browserslist": "^4.22.2" + "browserslist": "^4.24.4" }, "funding": { "type": "opencollective", @@ -5492,6 +5353,15 @@ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, + "node_modules/cose-base": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-1.0.3.tgz", + "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "license": "MIT", + "dependencies": { + "layout-base": "^1.0.0" + } + }, "node_modules/cosmiconfig": { "version": "8.3.6", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", @@ -5518,9 +5388,10 @@ } }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -5823,503 +5694,557 @@ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, - "node_modules/debounce": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", - "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + "node_modules/cytoscape": { + "version": "3.32.0", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.32.0.tgz", + "integrity": "sha512-5JHBC9n75kz5851jeklCPmZWcg3hUe6sjqJvyk3+hVqFaKcHwHgxsjeN1yLmggoUc6STbtm9/NQyabQehfjvWQ==", + "license": "MIT", + "engines": { + "node": ">=0.10" + } }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "node_modules/cytoscape-cose-bilkent": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", + "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "license": "MIT", "dependencies": { - "ms": "2.1.2" + "cose-base": "^1.0.0" }, - "engines": { - "node": ">=6.0" + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/d3": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/d3/-/d3-7.9.0.tgz", + "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "license": "ISC", + "dependencies": { + "d3-array": "3", + "d3-axis": "3", + "d3-brush": "3", + "d3-chord": "3", + "d3-color": "3", + "d3-contour": "4", + "d3-delaunay": "6", + "d3-dispatch": "3", + "d3-drag": "3", + "d3-dsv": "3", + "d3-ease": "3", + "d3-fetch": "3", + "d3-force": "3", + "d3-format": "3", + "d3-geo": "3", + "d3-hierarchy": "3", + "d3-interpolate": "3", + "d3-path": "3", + "d3-polygon": "3", + "d3-quadtree": "3", + "d3-random": "3", + "d3-scale": "4", + "d3-scale-chromatic": "3", + "d3-selection": "3", + "d3-shape": "3", + "d3-time": "3", + "d3-time-format": "4", + "d3-timer": "3", + "d3-transition": "3", + "d3-zoom": "3" }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "engines": { + "node": ">=12" } }, - "node_modules/decode-named-character-reference": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", - "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", + "license": "ISC", "dependencies": { - "character-entities": "^2.0.0" + "internmap": "1 - 2" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "engines": { + "node": ">=12" } }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "node_modules/d3-axis": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-3.0.0.tgz", + "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-brush": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-3.0.0.tgz", + "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", + "license": "ISC", "dependencies": { - "mimic-response": "^3.1.0" + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "3", + "d3-transition": "3" }, "engines": { - "node": ">=10" + "node": ">=12" + } + }, + "node_modules/d3-chord": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-3.0.1.tgz", + "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "license": "ISC", + "dependencies": { + "d3-path": "1 - 3" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=12" } }, - "node_modules/decompress-response/node_modules/mimic-response": { + "node_modules/d3-color": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", + "license": "ISC", "engines": { - "node": ">=10" + "node": ">=12" + } + }, + "node_modules/d3-contour": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-4.0.2.tgz", + "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "license": "ISC", + "dependencies": { + "d3-array": "^3.2.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=12" } }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "node_modules/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "license": "ISC", + "dependencies": { + "delaunator": "5" + }, "engines": { - "node": ">=4.0.0" + "node": ">=12" } }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", + "license": "ISC", "engines": { - "node": ">=0.10.0" + "node": ">=12" } }, - "node_modules/default-gateway": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", + "license": "ISC", "dependencies": { - "execa": "^5.0.0" + "d3-dispatch": "1 - 3", + "d3-selection": "3" }, "engines": { - "node": ">= 10" + "node": ">=12" } }, - "node_modules/defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "node_modules/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "license": "ISC", + "dependencies": { + "commander": "7", + "iconv-lite": "0.6", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json.js", + "csv2tsv": "bin/dsv2dsv.js", + "dsv2dsv": "bin/dsv2dsv.js", + "dsv2json": "bin/dsv2json.js", + "json2csv": "bin/json2dsv.js", + "json2dsv": "bin/json2dsv.js", + "json2tsv": "bin/json2dsv.js", + "tsv2csv": "bin/dsv2dsv.js", + "tsv2json": "bin/dsv2json.js" + }, "engines": { - "node": ">=10" + "node": ">=12" } }, - "node_modules/define-data-property": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", - "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", + "node_modules/d3-dsv/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/d3-dsv/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", "dependencies": { - "get-intrinsic": "^1.2.1", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.0" + "safer-buffer": ">= 2.1.2 < 3.0.0" }, "engines": { - "node": ">= 0.4" + "node": ">=0.10.0" } }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", + "license": "BSD-3-Clause", "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/define-properties": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", - "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "node_modules/d3-fetch": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-3.0.1.tgz", + "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", + "license": "ISC", "dependencies": { - "define-data-property": "^1.0.1", - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" + "d3-dsv": "1 - 3" }, "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "node": ">=12" } }, - "node_modules/del": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", - "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", + "license": "ISC", "dependencies": { - "globby": "^11.0.1", - "graceful-fs": "^4.2.4", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.2", - "p-map": "^4.0.0", - "rimraf": "^3.0.2", - "slash": "^3.0.0" + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=12" } }, - "node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "node_modules/d3-format": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz", + "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", + "license": "ISC", "engines": { - "node": ">= 0.8" + "node": ">=12" } }, - "node_modules/dequal": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", - "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "node_modules/d3-geo": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-3.1.1.tgz", + "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "license": "ISC", + "dependencies": { + "d3-array": "2.5.0 - 3" + }, "engines": { - "node": ">=6" + "node": ">=12" } }, - "node_modules/destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "node_modules/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", + "license": "ISC", "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" + "node": ">=12" } }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" - }, - "node_modules/detect-port": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", - "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", + "license": "ISC", "dependencies": { - "address": "^1.0.1", - "debug": "4" + "d3-color": "1 - 3" }, - "bin": { - "detect": "bin/detect-port.js", - "detect-port": "bin/detect-port.js" + "engines": { + "node": ">=12" } }, - "node_modules/detect-port-alt": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", - "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", - "dependencies": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "bin": { - "detect": "bin/detect-port", - "detect-port": "bin/detect-port" - }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", + "license": "ISC", "engines": { - "node": ">= 4.2.1" + "node": ">=12" } }, - "node_modules/detect-port-alt/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" + "node_modules/d3-polygon": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-3.0.1.tgz", + "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "license": "ISC", + "engines": { + "node": ">=12" } }, - "node_modules/detect-port-alt/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/devlop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", - "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", - "dependencies": { - "dequal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "license": "ISC", + "engines": { + "node": ">=12" } }, - "node_modules/didyoumean": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", - "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==" - }, - "node_modules/dir-glob": { + "node_modules/d3-random": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dependencies": { - "path-type": "^4.0.0" - }, + "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", + "license": "ISC", "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/dlv": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", - "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==" - }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" - }, - "node_modules/dns-packet": { - "version": "5.6.1", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", - "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "node_modules/d3-sankey": { + "version": "0.12.3", + "resolved": "https://registry.npmjs.org/d3-sankey/-/d3-sankey-0.12.3.tgz", + "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", + "license": "BSD-3-Clause", "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" + "d3-array": "1 - 2", + "d3-shape": "^1.2.0" } }, - "node_modules/docusaurus-pushfeedback": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/docusaurus-pushfeedback/-/docusaurus-pushfeedback-1.0.0.tgz", - "integrity": "sha512-caf9C8bRpGhzd7foJ0WqSLTSZonPgAFLPEN0U+lKYpiFLVJOHnEK3gOn26kibhhqrVx7FrF2fRBHLO5kJbnkhg==", - "peerDependencies": { - "@docusaurus/core": "3.x" + "node_modules/d3-sankey/node_modules/d3-array": { + "version": "2.12.1", + "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-2.12.1.tgz", + "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", + "license": "BSD-3-Clause", + "dependencies": { + "internmap": "^1.0.0" } }, - "node_modules/docusaurus-theme-github-codeblock": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/docusaurus-theme-github-codeblock/-/docusaurus-theme-github-codeblock-2.0.1.tgz", - "integrity": "sha512-qwLgDVThQidqBZLyEmyV0MpOkQHY7XOwAkPZv05feTezeywtmLL+BC6czEl4poEZJf5YQXI3bAkF0UW/3Mv8Ng==", + "node_modules/d3-sankey/node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==", + "license": "BSD-3-Clause" + }, + "node_modules/d3-sankey/node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", + "license": "BSD-3-Clause", "dependencies": { - "@docusaurus/types": "^3.0.0" + "d3-path": "1" } }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "node_modules/d3-sankey/node_modules/internmap": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-1.0.1.tgz", + "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==", + "license": "ISC" + }, + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", + "license": "ISC", "dependencies": { - "utila": "~0.4" + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" + }, + "engines": { + "node": ">=12" } }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "node_modules/d3-scale-chromatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", + "license": "ISC", "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" + "d3-color": "1 - 3", + "d3-interpolate": "1 - 3" }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + "engines": { + "node": ">=12" } }, - "node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ] + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "license": "ISC", + "engines": { + "node": ">=12" + } }, - "node_modules/domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", + "license": "ISC", "dependencies": { - "domelementtype": "^2.3.0" + "d3-path": "^3.1.0" }, "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" + "node": ">=12" } }, - "node_modules/domutils": { + "node_modules/d3-time": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", - "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "license": "ISC", "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3" + "d3-array": "2 - 3" }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" + "engines": { + "node": ">=12" } }, - "node_modules/dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "license": "ISC", "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" + "d3-time": "1 - 3" + }, + "engines": { + "node": ">=12" } }, - "node_modules/dot-prop": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", - "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "license": "ISC", "dependencies": { - "is-obj": "^2.0.0" + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" }, "engines": { - "node": ">=10" + "node": ">=12" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "peerDependencies": { + "d3-selection": "2 - 3" } }, - "node_modules/dot-prop/node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", + "license": "ISC", + "dependencies": { + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" + }, "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" - }, - "node_modules/electron-to-chromium": { - "version": "1.4.763", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.763.tgz", - "integrity": "sha512-k4J8NrtJ9QrvHLRo8Q18OncqBCB7tIUyqxRcJnlonQ0ioHKYB988GcDFF3ZePmnb8eHEopDs/wPHR/iGAFgoUQ==" + "node_modules/dagre-d3-es": { + "version": "7.0.10", + "resolved": "https://registry.npmjs.org/dagre-d3-es/-/dagre-d3-es-7.0.10.tgz", + "integrity": "sha512-qTCQmEhcynucuaZgY5/+ti3X/rnszKZhEQH/ZdWdtP1tA/y3VoHJzcVrO9pjjJCNpigfscAtoUB5ONcd2wNn0A==", + "license": "MIT", + "dependencies": { + "d3": "^7.8.2", + "lodash-es": "^4.17.21" + } }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + "node_modules/dayjs": { + "version": "1.11.13", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.13.tgz", + "integrity": "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==", + "license": "MIT" }, - "node_modules/emojilib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", - "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, "engines": { - "node": ">= 4" + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, - "node_modules/emoticon": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", - "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/enhanced-resolve": { - "version": "5.15.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz", - "integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==", + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "engines": { - "node": ">=0.12" + "mimic-response": "^3.1.0" }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dependencies": { - "is-arrayish": "^0.2.1" - } - }, - "node_modules/es-module-lexer": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.4.1.tgz", - "integrity": "sha512-cXLGjP0c4T3flZJKQSuziYoq7MlT+rnvfZjfp7h+I7K9BNX54kP9nyWvdbwjQ4u1iWbOL4u96fgeZLToQlZC7w==" - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", "engines": { - "node": ">=6" - } - }, - "node_modules/escape-goat": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", - "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", - "engines": { - "node": ">=12" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", "engines": { "node": ">=10" }, @@ -6327,275 +6252,170 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", "engines": { - "node": ">=8.0.0" + "node": ">=4.0.0" } }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", "engines": { - "node": ">=4" + "node": ">=0.10.0" } }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", "dependencies": { - "estraverse": "^5.2.0" + "execa": "^5.0.0" }, "engines": { - "node": ">=4.0" - } - }, - "node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "engines": { - "node": ">=4.0" + "node": ">= 10" } }, - "node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-util-attach-comments": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", - "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", - "dependencies": { - "@types/estree": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "node": ">=10" } }, - "node_modules/estree-util-build-jsx": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", - "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "node_modules/define-data-property": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", + "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", "dependencies": { - "@types/estree-jsx": "^1.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "estree-walker": "^3.0.0" + "get-intrinsic": "^1.2.1", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.0" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/estree-util-is-identifier-name": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", - "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "engines": { + "node": ">= 0.4" } }, - "node_modules/estree-util-to-js": { + "node_modules/define-lazy-prop": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", - "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", - "dependencies": { - "@types/estree-jsx": "^1.0.0", - "astring": "^1.8.0", - "source-map": "^0.7.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" } }, - "node_modules/estree-util-value-to-estree": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.0.1.tgz", - "integrity": "sha512-b2tdzTurEIbwRh+mKrEcaWfu1wgb8J1hVsgREg7FFiecWwK/PhO8X0kyc+0bIcKNtD4sqxIdNoRy6/p/TvECEA==", + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", "dependencies": { - "@types/estree": "^1.0.0", - "is-plain-obj": "^4.0.0" + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" }, "engines": { - "node": ">=16.0.0" + "node": ">= 0.4" }, "funding": { - "url": "https://github.com/sponsors/remcohaszing" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/estree-util-visit": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", - "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/unist": "^3.0.0" + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "node_modules/delaunator": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/delaunator/-/delaunator-5.0.1.tgz", + "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "license": "ISC", "dependencies": { - "@types/estree": "^1.0.0" + "robust-predicates": "^3.0.2" } }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", "engines": { - "node": ">=0.10.0" + "node": ">= 0.8" } }, - "node_modules/eta": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", - "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", "engines": { - "node": ">=6.0.0" - }, - "funding": { - "url": "https://github.com/eta-dev/eta?sponsor=1" + "node": ">=6" } }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "license": "MIT", "engines": { - "node": ">= 0.6" + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" } }, - "node_modules/eval": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", - "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", - "dependencies": { - "@types/node": "*", - "require-like": ">= 0.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "engines": { - "node": ">=0.8.x" - } + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" }, - "node_modules/execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "node_modules/detect-port": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", + "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" + "address": "^1.0.1", + "debug": "4" }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" } }, - "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", "dependencies": { - "accepts": "~1.3.8", - "array-flatten": "1.1.1", - "body-parser": "1.20.1", - "content-disposition": "0.5.4", - "content-type": "~1.0.4", - "cookie": "0.5.0", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "2.0.0", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.2.0", - "fresh": "0.5.2", - "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.7", - "qs": "6.11.0", - "range-parser": "~1.2.1", - "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "type-is": "~1.6.18", - "utils-merge": "1.0.1", - "vary": "~1.1.2" + "address": "^1.0.1", + "debug": "^2.6.0" }, - "engines": { - "node": ">= 0.10.0" - } - }, - "node_modules/express/node_modules/array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" - }, - "node_modules/express/node_modules/content-disposition": { - "version": "0.5.4", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", - "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", - "dependencies": { - "safe-buffer": "5.2.1" + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" }, "engines": { - "node": ">= 0.6" + "node": ">= 4.2.1" } }, - "node_modules/express/node_modules/debug": { + "node_modules/detect-port-alt/node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", @@ -6603,2271 +6423,3340 @@ "ms": "2.0.0" } }, - "node_modules/express/node_modules/ms": { + "node_modules/detect-port-alt/node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" }, - "node_modules/express/node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } }, - "node_modules/express/node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==" + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "license": "BSD-3-Clause", "engines": { - "node": ">= 0.6" + "node": ">=0.3.1" } }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", "dependencies": { - "is-extendable": "^0.1.0" + "path-type": "^4.0.0" }, "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==" }, - "node_modules/fast-glob": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", - "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "node_modules/dns-equal": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", + "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==" + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" + "@leichtgewicht/ip-codec": "^2.0.1" }, "engines": { - "node": ">=8.6.0" + "node": ">=6" } }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + "node_modules/docusaurus-pushfeedback": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/docusaurus-pushfeedback/-/docusaurus-pushfeedback-1.0.0.tgz", + "integrity": "sha512-caf9C8bRpGhzd7foJ0WqSLTSZonPgAFLPEN0U+lKYpiFLVJOHnEK3gOn26kibhhqrVx7FrF2fRBHLO5kJbnkhg==", + "peerDependencies": { + "@docusaurus/core": "3.x" + } }, - "node_modules/fast-url-parser": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", + "node_modules/docusaurus-theme-github-codeblock": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/docusaurus-theme-github-codeblock/-/docusaurus-theme-github-codeblock-2.0.1.tgz", + "integrity": "sha512-qwLgDVThQidqBZLyEmyV0MpOkQHY7XOwAkPZv05feTezeywtmLL+BC6czEl4poEZJf5YQXI3bAkF0UW/3Mv8Ng==", "dependencies": { - "punycode": "^1.3.2" + "@docusaurus/types": "^3.0.0" } }, - "node_modules/fastq": { - "version": "1.16.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.16.0.tgz", - "integrity": "sha512-ifCoaXsDrsdkWTtiNJX5uzHDsrck5TzfKKDcuFFTIrrc/BS076qgEIfoIy1VeZqViznfKiysPYTh/QeHtnIsYA==", + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", "dependencies": { - "reusify": "^1.0.4" + "utila": "~0.4" } }, - "node_modules/fault": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", - "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", "dependencies": { - "format": "^0.2.0" + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" } }, - "node_modules/faye-websocket": { - "version": "0.11.4", - "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", - "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", "dependencies": { - "websocket-driver": ">=0.5.1" + "domelementtype": "^2.3.0" }, "engines": { - "node": ">=0.8.0" + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" } }, - "node_modules/feed": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", - "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", + "node_modules/dompurify": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.1.6.tgz", + "integrity": "sha512-cTOAhc36AalkjtBpfG6O8JimdTMWNXjiePT2xQH/ppBGi/4uIpmj8eKyIkMJErXWARyINV/sB38yf8JCLF5pbQ==", + "license": "(MPL-2.0 OR Apache-2.0)" + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", "dependencies": { - "xml-js": "^1.6.11" + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" }, - "engines": { - "node": ">=0.4.0" + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" } }, - "node_modules/file-loader": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", - "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", "dependencies": { - "loader-utils": "^2.0.0", - "schema-utils": "^3.0.0" + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" }, "engines": { - "node": ">= 10.13.0" + "node": ">=10" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - }, - "peerDependencies": { - "webpack": "^4.0.0 || ^5.0.0" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/file-loader/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "engines": { + "node": ">= 0.4" } }, - "node_modules/file-loader/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" }, - "node_modules/file-loader/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" }, - "node_modules/file-loader/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.165", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.165.tgz", + "integrity": "sha512-naiMx1Z6Nb2TxPU6fiFrUrDTjyPMLdTtaOd2oLmG8zVSg2hCWGkhPyxwk+qRmZ1ytwVqUv0u7ZcDA5+ALhaUtw==", + "license": "ISC" + }, + "node_modules/elkjs": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/elkjs/-/elkjs-0.9.3.tgz", + "integrity": "sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ==", + "license": "EPL-2.0" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", "engines": { - "node": ">= 10.13.0" - }, + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", + "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/filesize": { - "version": "8.0.7", - "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", - "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", "engines": { - "node": ">= 0.4.0" + "node": ">= 0.8" } }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "node_modules/enhanced-resolve": { + "version": "5.18.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz", + "integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==", + "license": "MIT", "dependencies": { - "to-regex-range": "^5.0.1" + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" }, "engines": { - "node": ">=8" + "node": ">=10.13.0" } }, - "node_modules/finalhandler": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", - "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==", - "dependencies": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "2.4.1", - "parseurl": "~1.3.3", - "statuses": "2.0.1", - "unpipe": "~1.0.0" - }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", "engines": { - "node": ">= 0.8" + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" } }, - "node_modules/finalhandler/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", "dependencies": { - "ms": "2.0.0" + "is-arrayish": "^0.2.1" } }, - "node_modules/finalhandler/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } }, - "node_modules/find-cache-dir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", - "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", - "dependencies": { - "common-path-prefix": "^3.0.0", - "pkg-dir": "^7.0.0" - }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">= 0.4" } }, - "node_modules/find-up": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", - "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", + "node_modules/es-module-lexer": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.4.1.tgz", + "integrity": "sha512-cXLGjP0c4T3flZJKQSuziYoq7MlT+rnvfZjfp7h+I7K9BNX54kP9nyWvdbwjQ4u1iWbOL4u96fgeZLToQlZC7w==" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", "dependencies": { - "locate-path": "^7.1.0", - "path-exists": "^5.0.0" + "es-errors": "^1.3.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">= 0.4" } }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "bin": { - "flat": "cli.js" + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" } }, - "node_modules/follow-redirects": { - "version": "1.15.4", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz", - "integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], + "node_modules/escape-goat": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-4.0.0.tgz", + "integrity": "sha512-2Sd4ShcWxbx6OY1IHyla/CVNwvg7XwZVoXZHcSu9w9SReNP1EzzD5T8NWKIR38fIqEns9kDWKUQTXXAmlDrdPg==", "engines": { - "node": ">=4.0" + "node": ">=12" }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/foreground-child": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", - "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", "engines": { - "node": ">=14" + "node": ">=10" }, "funding": { - "url": "https://github.com/sponsors/isaacs" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/foreground-child/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "engines": { - "node": ">=14" + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "engines": { + "node": ">=8.0.0" } }, - "node_modules/fork-ts-checker-webpack-plugin": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", - "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", - "dependencies": { - "@babel/code-frame": "^7.8.3", - "@types/json-schema": "^7.0.5", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "cosmiconfig": "^6.0.0", - "deepmerge": "^4.2.2", - "fs-extra": "^9.0.0", - "glob": "^7.1.6", - "memfs": "^3.1.2", - "minimatch": "^3.0.4", - "schema-utils": "2.7.0", - "semver": "^7.3.2", - "tapable": "^1.0.0" + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" }, "engines": { - "node": ">=10", - "yarn": ">=1.0.0" - }, - "peerDependencies": { - "eslint": ">= 6", - "typescript": ">= 2.7", - "vue-template-compiler": "*", - "webpack": ">= 4" - }, - "peerDependenciesMeta": { - "eslint": { - "optional": true - }, - "vue-template-compiler": { - "optional": true - } + "node": ">=4" } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "estraverse": "^5.2.0" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" + "engines": { + "node": ">=4.0" } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "engines": { + "node": ">=4.0" } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", - "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", "dependencies": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.1.0", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.7.2" + "@types/estree": "^1.0.0" }, - "engines": { - "node": ">=8" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" }, - "engines": { - "node": ">=10" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", - "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", "dependencies": { - "@types/json-schema": "^7.0.4", - "ajv": "^6.12.2", - "ajv-keywords": "^3.4.1" - }, - "engines": { - "node": ">= 8.9.0" + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/webpack" + "url": "https://opencollective.com/unified" } }, - "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "engines": { - "node": ">=6" + "node_modules/estree-util-value-to-estree": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.4.0.tgz", + "integrity": "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/remcohaszing" } }, - "node_modules/form-data-encoder": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", - "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", - "engines": { - "node": ">= 14.17" + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/format": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", - "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", - "engines": { - "node": ">=0.4.x" + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" } }, - "node_modules/forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "engines": { - "node": ">= 0.6" + "node": ">=0.10.0" } }, - "node_modules/fraction.js": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", - "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "node_modules/eta": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz", + "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==", "engines": { - "node": "*" + "node": ">=6.0.0" }, "funding": { - "type": "patreon", - "url": "https://github.com/sponsors/rawify" + "url": "https://github.com/eta-dev/eta?sponsor=1" } }, - "node_modules/fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, - "node_modules/fs-extra": { - "version": "11.2.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", - "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", + "node_modules/eval": { + "version": "0.1.8", + "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz", + "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==", "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" + "@types/node": "*", + "require-like": ">= 0.1.1" }, "engines": { - "node": ">=14.14" - } - }, - "node_modules/fs-monkey": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz", - "integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==" - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + "node": ">= 0.8" } }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" }, - "node_modules/gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "engines": { - "node": ">=6.9.0" + "node": ">=0.8.x" } }, - "node_modules/get-intrinsic": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz", - "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==", + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", "dependencies": { - "function-bind": "^1.1.2", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "hasown": "^2.0.0" + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-own-enumerable-property-symbols": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", - "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" - }, - "node_modules/get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", "engines": { "node": ">=10" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sindresorhus/execa?sponsor=1" } }, - "node_modules/github-slugger": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", - "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "node_modules/express": { + "version": "4.21.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", + "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "license": "MIT", "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.3", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.7.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.3.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.12", + "proxy-addr": "~2.0.7", + "qs": "6.13.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.19.0", + "serve-static": "1.16.2", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" }, "engines": { - "node": "*" + "node": ">= 0.10.0" }, "funding": { - "url": "https://github.com/sponsors/isaacs" + "type": "opencollective", + "url": "https://opencollective.com/express" } }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "node_modules/express/node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/express/node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", "dependencies": { - "is-glob": "^4.0.1" + "safe-buffer": "5.2.1" }, "engines": { - "node": ">= 6" + "node": ">= 0.6" } }, - "node_modules/glob-to-regexp": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", - "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==" - }, - "node_modules/global-dirs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", - "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "node_modules/express/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "dependencies": { - "ini": "2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "ms": "2.0.0" } }, - "node_modules/global-dirs/node_modules/ini": { + "node_modules/express/node_modules/ms": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/express/node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", + "license": "MIT" + }, + "node_modules/express/node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", "engines": { - "node": ">=10" + "node": ">= 0.6" } }, - "node_modules/global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", "dependencies": { - "global-prefix": "^3.0.0" + "is-extendable": "^0.1.0" }, "engines": { - "node": ">=6" + "node": ">=0.10.0" } }, - "node_modules/global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dependencies": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" }, "engines": { - "node": ">=6" + "node": ">=8.6.0" } }, - "node_modules/global-prefix/node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fastq": { + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.16.0.tgz", + "integrity": "sha512-ifCoaXsDrsdkWTtiNJX5uzHDsrck5TzfKKDcuFFTIrrc/BS076qgEIfoIy1VeZqViznfKiysPYTh/QeHtnIsYA==", "dependencies": { - "isexe": "^2.0.0" + "reusify": "^1.0.4" + } + }, + "node_modules/fault": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fault/-/fault-2.0.1.tgz", + "integrity": "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ==", + "dependencies": { + "format": "^0.2.0" }, - "bin": { - "which": "bin/which" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "node_modules/faye-websocket": { + "version": "0.11.4", + "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz", + "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==", + "dependencies": { + "websocket-driver": ">=0.5.1" + }, "engines": { - "node": ">=4" + "node": ">=0.8.0" } }, - "node_modules/globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "node_modules/feed": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/feed/-/feed-4.2.2.tgz", + "integrity": "sha512-u5/sxGfiMfZNtJ3OvQpXcvotFpYkL0n9u9mM2vkui2nGo8b4wvDkJ8gAkYqbA8QpGyFCv3RK0Z+Iv+9veCS9bQ==", "dependencies": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" + "xml-js": "^1.6.11" }, "engines": { - "node": ">=10" + "node": ">=0.4.0" + } + }, + "node_modules/file-loader": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz", + "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==", + "dependencies": { + "loader-utils": "^2.0.0", + "schema-utils": "^3.0.0" + }, + "engines": { + "node": ">= 10.13.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^4.0.0 || ^5.0.0" } }, - "node_modules/gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "node_modules/file-loader/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dependencies": { - "get-intrinsic": "^1.1.3" + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/got": { - "version": "12.6.1", - "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", - "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", + "node_modules/file-loader/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" + } + }, + "node_modules/file-loader/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/file-loader/node_modules/schema-utils": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", + "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", "dependencies": { - "@sindresorhus/is": "^5.2.0", - "@szmarczak/http-timer": "^5.0.1", - "cacheable-lookup": "^7.0.0", - "cacheable-request": "^10.2.8", - "decompress-response": "^6.0.0", - "form-data-encoder": "^2.1.2", - "get-stream": "^6.0.1", - "http2-wrapper": "^2.1.10", - "lowercase-keys": "^3.0.0", - "p-cancelable": "^3.0.0", - "responselike": "^3.0.0" + "@types/json-schema": "^7.0.8", + "ajv": "^6.12.5", + "ajv-keywords": "^3.5.2" }, "engines": { - "node": ">=14.16" + "node": ">= 10.13.0" }, "funding": { - "url": "https://github.com/sindresorhus/got?sponsor=1" + "type": "opencollective", + "url": "https://opencollective.com/webpack" } }, - "node_modules/got/node_modules/@sindresorhus/is": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", - "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", + "node_modules/filesize": { + "version": "8.0.7", + "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz", + "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==", "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sindresorhus/is?sponsor=1" + "node": ">= 0.4.0" } }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" - }, - "node_modules/gray-matter": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", - "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", "dependencies": { - "js-yaml": "^3.13.1", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" + "to-regex-range": "^5.0.1" }, "engines": { - "node": ">=6.0" + "node": ">=8" } }, - "node_modules/gray-matter/node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "node_modules/finalhandler": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", + "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "license": "MIT", "dependencies": { - "sprintf-js": "~1.0.2" + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" } }, - "node_modules/gray-matter/node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "node_modules/finalhandler/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" + "ms": "2.0.0" } }, - "node_modules/gzip-size": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", - "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "node_modules/finalhandler/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" + }, + "node_modules/find-cache-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-4.0.0.tgz", + "integrity": "sha512-9ZonPT4ZAK4a+1pUPVPZJapbi7O5qbbJPdYw/NOQWZZbVLdDTYM3A4R9z/DpAM08IDaFGsvPgiGZ82WEwUDWjg==", "dependencies": { - "duplexer": "^0.1.2" + "common-path-prefix": "^3.0.0", + "pkg-dir": "^7.0.0" }, "engines": { - "node": ">=10" + "node": ">=14.16" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/handle-thing": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", - "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "engines": { - "node": ">=8" - } - }, - "node_modules/has-property-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz", - "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==", + "node_modules/find-up": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-6.3.0.tgz", + "integrity": "sha512-v2ZsoEuVHYy8ZIlYqwPe/39Cy+cFDzp4dXPaxNvkEuouymu+2Jbz0PxpKarJHYJTmv2HWT3O382qY8l4jMWthw==", "dependencies": { - "get-intrinsic": "^1.2.2" + "locate-path": "^7.1.0", + "path-exists": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", "engines": { - "node": ">= 0.4" + "node": ">=4.0" }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "peerDependenciesMeta": { + "debug": { + "optional": true + } } }, - "node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "node_modules/foreground-child": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", + "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, "engines": { - "node": ">= 0.4" + "node": ">=14" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/has-yarn": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", - "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">=14" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/hasown": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", - "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "node_modules/fork-ts-checker-webpack-plugin": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz", + "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==", "dependencies": { - "function-bind": "^1.1.2" + "@babel/code-frame": "^7.8.3", + "@types/json-schema": "^7.0.5", + "chalk": "^4.1.0", + "chokidar": "^3.4.2", + "cosmiconfig": "^6.0.0", + "deepmerge": "^4.2.2", + "fs-extra": "^9.0.0", + "glob": "^7.1.6", + "memfs": "^3.1.2", + "minimatch": "^3.0.4", + "schema-utils": "2.7.0", + "semver": "^7.3.2", + "tapable": "^1.0.0" }, "engines": { - "node": ">= 0.4" + "node": ">=10", + "yarn": ">=1.0.0" + }, + "peerDependencies": { + "eslint": ">= 6", + "typescript": ">= 2.7", + "vue-template-compiler": "*", + "webpack": ">= 4" + }, + "peerDependenciesMeta": { + "eslint": { + "optional": true + }, + "vue-template-compiler": { + "optional": true + } } }, - "node_modules/hast-util-from-parse5": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", - "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dependencies": { - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "devlop": "^1.0.0", - "hastscript": "^8.0.0", - "property-information": "^6.0.0", - "vfile": "^6.0.0", - "vfile-location": "^5.0.0", - "web-namespaces": "^2.0.0" + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/hast-util-parse-selector": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", - "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", - "dependencies": { - "@types/hast": "^3.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "node_modules/fork-ts-checker-webpack-plugin/node_modules/ajv-keywords": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "peerDependencies": { + "ajv": "^6.9.1" } }, - "node_modules/hast-util-raw": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.1.tgz", - "integrity": "sha512-5m1gmba658Q+lO5uqL5YNGQWeh1MYWZbZmWrM5lncdcuiXuo5E2HT/CIOp0rLF8ksfSwiCVJ3twlgVRyTGThGA==", + "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz", + "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==", "dependencies": { - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "@ungap/structured-clone": "^1.0.0", - "hast-util-from-parse5": "^8.0.0", - "hast-util-to-parse5": "^8.0.0", - "html-void-elements": "^3.0.0", - "mdast-util-to-hast": "^13.0.0", - "parse5": "^7.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.1.0", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.7.2" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "engines": { + "node": ">=8" } }, - "node_modules/hast-util-to-estree": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", - "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", + "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", "dependencies": { - "@types/estree": "^1.0.0", - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-attach-comments": "^3.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "hast-util-whitespace": "^3.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^0.4.0", - "unist-util-position": "^5.0.0", - "zwitch": "^2.0.0" + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "engines": { + "node": ">=10" } }, - "node_modules/hast-util-to-jsx-runtime": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", - "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", + "node_modules/fork-ts-checker-webpack-plugin/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz", + "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==", "dependencies": { - "@types/estree": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/unist": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "estree-util-is-identifier-name": "^3.0.0", - "hast-util-whitespace": "^3.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "style-to-object": "^1.0.0", - "unist-util-position": "^5.0.0", - "vfile-message": "^4.0.0" + "@types/json-schema": "^7.0.4", + "ajv": "^6.12.2", + "ajv-keywords": "^3.4.1" + }, + "engines": { + "node": ">= 8.9.0" }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/unified" + "url": "https://opencollective.com/webpack" } }, - "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { + "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", + "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/form-data-encoder": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-2.1.4.tgz", + "integrity": "sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==", + "engines": { + "node": ">= 14.17" + } + }, + "node_modules/format": { "version": "0.2.2", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.2.tgz", - "integrity": "sha512-EcKzdTHVe8wFVOGEYXiW9WmJXPjqi1T+234YpJr98RiFYKHV3cdy1+3mkTE+KHTHxFFLH51SfaGOoUdW+v7ViQ==" + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } }, - "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.5.tgz", - "integrity": "sha512-rDRwHtoDD3UMMrmZ6BzOW0naTjMsVZLIjsGleSKS/0Oz+cgCfAPRspaqJuE8rDzpKha/nEvnM0IF4seEAZUTKQ==", - "dependencies": { - "inline-style-parser": "0.2.2" + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" } }, - "node_modules/hast-util-to-parse5": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", - "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", - "dependencies": { - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "devlop": "^1.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0", - "web-namespaces": "^2.0.0", - "zwitch": "^2.0.0" + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "engines": { + "node": "*" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "type": "patreon", + "url": "https://github.com/sponsors/rawify" } }, - "node_modules/hast-util-whitespace": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", - "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fs-extra": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz", + "integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==", "dependencies": { - "@types/hast": "^3.0.0" + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "engines": { + "node": ">=14.14" } }, - "node_modules/hastscript": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", - "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", - "dependencies": { - "@types/hast": "^3.0.0", - "comma-separated-tokens": "^2.0.0", - "hast-util-parse-selector": "^4.0.0", - "property-information": "^6.0.0", - "space-separated-tokens": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } + "node_modules/fs-monkey": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz", + "integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==" }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "bin": { - "he": "bin/he" - } + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, - "node_modules/history": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", - "dependencies": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, - "node_modules/hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "dependencies": { - "react-is": "^16.7.0" + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/hpack.js": { - "version": "2.1.6", - "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", - "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", - "dependencies": { - "inherits": "^2.0.1", - "obuf": "^1.0.0", - "readable-stream": "^2.0.1", - "wbuf": "^1.1.0" + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" } }, - "node_modules/hpack.js/node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" - }, - "node_modules/hpack.js/node_modules/readable-stream": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", - "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/hpack.js/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + "node_modules/get-own-enumerable-property-symbols": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz", + "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==" }, - "node_modules/hpack.js/node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", "dependencies": { - "safe-buffer": "~5.1.0" + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" } }, - "node_modules/html-entities": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz", - "integrity": "sha512-igBTJcNNNhvZFRtm8uA6xMY6xYleeDwn3PeBCkDz7tHttv4F2hsDI2aPgNERWzvRcNYHNT3ymRaQzllmXj4YsQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/mdevils" - }, - { - "type": "patreon", - "url": "https://patreon.com/mdevils" - } - ] + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, - "node_modules/html-escaper": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", - "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" + "node_modules/github-slugger": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz", + "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw==" }, - "node_modules/html-minifier-terser": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", - "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "~5.3.2", - "commander": "^10.0.0", - "entities": "^4.4.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.15.1" - }, - "bin": { - "html-minifier-terser": "cli.js" + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" }, "engines": { - "node": "^14.13.1 || >=16.0.0" + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/html-minifier-terser/node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, "engines": { - "node": ">=14" + "node": ">= 6" } }, - "node_modules/html-tags": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", - "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "license": "BSD-2-Clause" + }, + "node_modules/global-dirs": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz", + "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==", + "dependencies": { + "ini": "2.0.0" + }, "engines": { - "node": ">=8" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/html-void-elements": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", - "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "node_modules/global-dirs/node_modules/ini": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", + "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==", + "engines": { + "node": ">=10" } }, - "node_modules/html-webpack-plugin": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", - "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", "dependencies": { - "@types/html-minifier-terser": "^6.0.0", - "html-minifier-terser": "^6.0.2", - "lodash": "^4.17.21", - "pretty-error": "^4.0.0", - "tapable": "^2.0.0" + "global-prefix": "^3.0.0" }, "engines": { - "node": ">=10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/html-webpack-plugin" + "node": ">=6" + } + }, + "node_modules/global-prefix": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", + "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", + "dependencies": { + "ini": "^1.3.5", + "kind-of": "^6.0.2", + "which": "^1.3.1" }, - "peerDependencies": { - "@rspack/core": "0.x || 1.x", - "webpack": "^5.20.0" - }, - "peerDependenciesMeta": { - "@rspack/core": { - "optional": true - }, - "webpack": { - "optional": true - } - } - }, - "node_modules/html-webpack-plugin/node_modules/commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", "engines": { - "node": ">= 12" + "node": ">=6" } }, - "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", - "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "node_modules/global-prefix/node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", "dependencies": { - "camel-case": "^4.1.2", - "clean-css": "^5.2.2", - "commander": "^8.3.0", - "he": "^1.2.0", - "param-case": "^3.0.4", - "relateurl": "^0.2.7", - "terser": "^5.10.0" + "isexe": "^2.0.0" }, "bin": { - "html-minifier-terser": "cli.js" - }, - "engines": { - "node": ">=12" + "which": "bin/which" } }, - "node_modules/htmlparser2": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", - "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "entities": "^4.4.0" + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" } }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" - }, - "node_modules/http-deceiver": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", - "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" - }, - "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" }, "engines": { - "node": ">= 0.8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/http-parser-js": { - "version": "0.5.8", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", - "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" - }, - "node_modules/http-proxy": { - "version": "1.18.1", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", - "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", - "dependencies": { - "eventemitter3": "^4.0.0", - "follow-redirects": "^1.0.0", - "requires-port": "^1.0.0" - }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", "engines": { - "node": ">=8.0.0" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/http-proxy-middleware": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz", - "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==", + "node_modules/got": { + "version": "12.6.1", + "resolved": "https://registry.npmjs.org/got/-/got-12.6.1.tgz", + "integrity": "sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==", "dependencies": { - "@types/http-proxy": "^1.17.8", - "http-proxy": "^1.18.1", - "is-glob": "^4.0.1", - "is-plain-obj": "^3.0.0", - "micromatch": "^4.0.2" + "@sindresorhus/is": "^5.2.0", + "@szmarczak/http-timer": "^5.0.1", + "cacheable-lookup": "^7.0.0", + "cacheable-request": "^10.2.8", + "decompress-response": "^6.0.0", + "form-data-encoder": "^2.1.2", + "get-stream": "^6.0.1", + "http2-wrapper": "^2.1.10", + "lowercase-keys": "^3.0.0", + "p-cancelable": "^3.0.0", + "responselike": "^3.0.0" }, "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "@types/express": "^4.17.13" + "node": ">=14.16" }, - "peerDependenciesMeta": { - "@types/express": { - "optional": true - } + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" } }, - "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", - "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", + "node_modules/got/node_modules/@sindresorhus/is": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-5.6.0.tgz", + "integrity": "sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==", "engines": { - "node": ">=10" + "node": ">=14.16" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sindresorhus/is?sponsor=1" } }, - "node_modules/http2-wrapper": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", - "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/gray-matter": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", + "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", "dependencies": { - "quick-lru": "^5.1.1", - "resolve-alpn": "^1.2.0" + "js-yaml": "^3.13.1", + "kind-of": "^6.0.2", + "section-matter": "^1.0.0", + "strip-bom-string": "^1.0.0" }, "engines": { - "node": ">=10.19.0" + "node": ">=6.0" } }, - "node_modules/human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "engines": { - "node": ">=10.17.0" + "node_modules/gray-matter/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" } }, - "node_modules/iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "node_modules/gray-matter/node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", "dependencies": { - "safer-buffer": ">= 2.1.2 < 3" + "argparse": "^1.0.7", + "esprima": "^4.0.0" }, - "engines": { - "node": ">=0.10.0" + "bin": { + "js-yaml": "bin/js-yaml.js" } }, - "node_modules/icss-utils": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", - "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "node_modules/gzip-size": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz", + "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==", + "dependencies": { + "duplexer": "^0.1.2" + }, "engines": { - "node": "^10 || ^12 || >= 14" + "node": ">=10" }, - "peerDependencies": { - "postcss": "^8.1.0" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ignore": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.0.tgz", - "integrity": "sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==", + "node_modules/handle-thing": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "engines": { - "node": ">= 4" + "node": ">=8" } }, - "node_modules/image-size": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", - "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", + "node_modules/has-property-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz", + "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==", "dependencies": { - "queue": "6.0.2" - }, - "bin": { - "image-size": "bin/image-size.js" + "get-intrinsic": "^1.2.2" }, - "engines": { - "node": ">=16.x" - } - }, - "node_modules/immediate": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", - "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" - }, - "node_modules/immer": { - "version": "9.0.21", - "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", - "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", "funding": { - "type": "opencollective", - "url": "https://opencollective.com/immer" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", "engines": { - "node": ">=6" + "node": ">= 0.4" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-lazy": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", - "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", - "engines": { - "node": ">=8" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "engines": { - "node": ">=0.8.19" + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "node_modules/has-yarn": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-3.0.0.tgz", + "integrity": "sha512-IrsVwUHhEULx3R8f/aA8AHuEzAorplsab/v8HBzEiIukwq5i/EC+xmOW+HfP1OaDP+2JkgT1yILHN2O3UFIbcA==", "engines": { - "node": ">=8" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/infima": { - "version": "0.2.0-alpha.43", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", - "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" - }, - "node_modules/inline-style-parser": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", - "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" - }, - "node_modules/interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "function-bind": "^1.1.2" + }, "engines": { - "node": ">= 0.10" + "node": ">= 0.4" } }, - "node_modules/invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "node_modules/hast-util-from-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.1.tgz", + "integrity": "sha512-Er/Iixbc7IEa7r/XLtuG52zoqn/b3Xng/w6aZQ0xGVxzhw5xUFxcRqdPzP6yFi/4HBYRaifaI5fQ1RH8n0ZeOQ==", "dependencies": { - "loose-envify": "^1.0.0" - } - }, - "node_modules/ipaddr.js": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", - "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==", - "engines": { - "node": ">= 10" - } - }, - "node_modules/is-alphabetical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", - "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^8.0.0", + "property-information": "^6.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-alphanumerical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", - "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", "dependencies": { - "is-alphabetical": "^2.0.0", - "is-decimal": "^2.0.0" + "@types/hast": "^3.0.0" }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "node_modules/hast-util-raw": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.1.tgz", + "integrity": "sha512-5m1gmba658Q+lO5uqL5YNGQWeh1MYWZbZmWrM5lncdcuiXuo5E2HT/CIOp0rLF8ksfSwiCVJ3twlgVRyTGThGA==", "dependencies": { - "binary-extensions": "^2.0.0" + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" }, - "engines": { - "node": ">=8" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-ci": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", - "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "node_modules/hast-util-to-estree": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.0.tgz", + "integrity": "sha512-lfX5g6hqVh9kjS/B9E2gSkvHH4SZNiQFiqWS0x9fENzEl+8W12RqdRxX6d/Cwxi30tPQs3bIO+aolQJNp1bIyw==", "dependencies": { - "ci-info": "^3.2.0" + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^0.4.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" }, - "bin": { - "is-ci": "bin.js" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-core-module": { - "version": "2.13.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", - "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", + "integrity": "sha512-H/y0+IWPdsLLS738P8tDnrQ8Z+dj12zQQ6WC11TIM21C8WFVoIxcqWXf2H3hiTVZjF1AWqoimGwrTWecWrnmRQ==", "dependencies": { - "hasown": "^2.0.0" + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-object": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" }, "funding": { - "url": "https://github.com/sponsors/ljharb" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-decimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", - "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } + "node_modules/hast-util-to-jsx-runtime/node_modules/inline-style-parser": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.2.tgz", + "integrity": "sha512-EcKzdTHVe8wFVOGEYXiW9WmJXPjqi1T+234YpJr98RiFYKHV3cdy1+3mkTE+KHTHxFFLH51SfaGOoUdW+v7ViQ==" }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" + "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.5.tgz", + "integrity": "sha512-rDRwHtoDD3UMMrmZ6BzOW0naTjMsVZLIjsGleSKS/0Oz+cgCfAPRspaqJuE8rDzpKha/nEvnM0IF4seEAZUTKQ==", + "dependencies": { + "inline-style-parser": "0.2.2" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.0.tgz", + "integrity": "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", - "engines": { - "node": ">=0.10.0" + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "engines": { - "node": ">=0.10.0" + "node_modules/hastscript": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-8.0.0.tgz", + "integrity": "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^6.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "bin": { + "he": "bin/he" } }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "node_modules/history": { + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", + "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" + "@babel/runtime": "^7.1.2", + "loose-envify": "^1.2.0", + "resolve-pathname": "^3.0.0", + "tiny-invariant": "^1.0.2", + "tiny-warning": "^1.0.0", + "value-equal": "^1.0.1" } }, - "node_modules/is-hexadecimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", - "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" } }, - "node_modules/is-installed-globally": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", - "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "node_modules/hpack.js": { + "version": "2.1.6", + "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz", + "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==", "dependencies": { - "global-dirs": "^3.0.0", - "is-path-inside": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "inherits": "^2.0.1", + "obuf": "^1.0.0", + "readable-stream": "^2.0.1", + "wbuf": "^1.1.0" } }, - "node_modules/is-npm": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", - "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } + "node_modules/hpack.js/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "engines": { - "node": ">=0.12.0" + "node_modules/hpack.js/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" } }, - "node_modules/is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", - "engines": { - "node": ">=0.10.0" + "node_modules/hpack.js/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/hpack.js/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" } }, - "node_modules/is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "node_modules/html-entities": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz", + "integrity": "sha512-igBTJcNNNhvZFRtm8uA6xMY6xYleeDwn3PeBCkDz7tHttv4F2hsDI2aPgNERWzvRcNYHNT3ymRaQzllmXj4YsQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==" + }, + "node_modules/html-minifier-terser": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-7.2.0.tgz", + "integrity": "sha512-tXgn3QfqPIpGl9o+K5tpcj3/MN4SfLtsx2GWwBC3SSd0tXQGyF3gsSqad8loJgKZGM3ZxbYDd5yhiBIdWpmvLA==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "~5.3.2", + "commander": "^10.0.0", + "entities": "^4.4.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.15.1" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, "engines": { - "node": ">=6" + "node": "^14.13.1 || >=16.0.0" } }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "node_modules/html-minifier-terser/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", "engines": { - "node": ">=8" + "node": ">=14" } }, - "node_modules/is-plain-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", - "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "node_modules/html-tags": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", "engines": { - "node": ">=12" + "node": ">=8" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/is-plain-object": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", - "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", - "engines": { - "node": ">=0.10.0" + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/is-reference": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", - "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "node_modules/html-webpack-plugin": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.0.tgz", + "integrity": "sha512-iwaY4wzbe48AfKLZ/Cc8k0L+FKG6oSNRaZ8x5A/T/IVDGyXcbHncM9TdDa93wn0FsSm82FhTKW7f3vS61thXAw==", "dependencies": { - "@types/estree": "*" + "@types/html-minifier-terser": "^6.0.0", + "html-minifier-terser": "^6.0.2", + "lodash": "^4.17.21", + "pretty-error": "^4.0.0", + "tapable": "^2.0.0" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/html-webpack-plugin" + }, + "peerDependencies": { + "@rspack/core": "0.x || 1.x", + "webpack": "^5.20.0" + }, + "peerDependenciesMeta": { + "@rspack/core": { + "optional": true + }, + "webpack": { + "optional": true + } } }, - "node_modules/is-regexp": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", - "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "node_modules/html-webpack-plugin/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", "engines": { - "node": ">=0.10.0" + "node": ">= 12" } }, - "node_modules/is-root": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", - "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "node_modules/html-webpack-plugin/node_modules/html-minifier-terser": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", + "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==", + "dependencies": { + "camel-case": "^4.1.2", + "clean-css": "^5.2.2", + "commander": "^8.3.0", + "he": "^1.2.0", + "param-case": "^3.0.4", + "relateurl": "^0.2.7", + "terser": "^5.10.0" + }, + "bin": { + "html-minifier-terser": "cli.js" + }, "engines": { - "node": ">=6" + "node": ">=12" } }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node_modules/htmlparser2": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", + "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.0.1", + "entities": "^4.4.0" } }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==" }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "node_modules/http-deceiver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz", + "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw==" + }, + "node_modules/http-errors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", + "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "license": "MIT", "dependencies": { - "is-docker": "^2.0.0" + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" }, "engines": { - "node": ">=8" - } - }, - "node_modules/is-yarn-global": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", - "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", - "engines": { - "node": ">=12" + "node": ">= 0.8" } }, - "node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + "node_modules/http-parser-js": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz", + "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q==" }, - "node_modules/isobject": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", - "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "node_modules/http-proxy": { + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", + "dependencies": { + "eventemitter3": "^4.0.0", + "follow-redirects": "^1.0.0", + "requires-port": "^1.0.0" + }, "engines": { - "node": ">=0.10.0" + "node": ">=8.0.0" } }, - "node_modules/jackspeak": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "node_modules/http-proxy-middleware": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.9.tgz", + "integrity": "sha512-c1IyJYLYppU574+YI7R4QyX2ystMtVXZwIdzazUIPIJsHuWNd+mho2j+bKoHftndicGj9yh+xjd+l0yj7VeT1Q==", + "license": "MIT", "dependencies": { - "@isaacs/cliui": "^8.0.2" + "@types/http-proxy": "^1.17.8", + "http-proxy": "^1.18.1", + "is-glob": "^4.0.1", + "is-plain-obj": "^3.0.0", + "micromatch": "^4.0.2" }, "engines": { - "node": ">=14" + "node": ">=12.0.0" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "peerDependencies": { + "@types/express": "^4.17.13" }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" + "peerDependenciesMeta": { + "@types/express": { + "optional": true + } } }, - "node_modules/jest-util": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", - "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", - "dependencies": { - "@jest/types": "^29.6.3", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - }, + "node_modules/http-proxy-middleware/node_modules/is-plain-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz", + "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==", "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/jest-worker": { - "version": "29.7.0", - "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", - "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "node_modules/http2-wrapper": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-2.2.1.tgz", + "integrity": "sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==", "dependencies": { - "@types/node": "*", - "jest-util": "^29.7.0", - "merge-stream": "^2.0.0", - "supports-color": "^8.0.0" + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.2.0" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": ">=10.19.0" } }, - "node_modules/jest-worker/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "license": "MIT", "dependencies": { - "has-flag": "^4.0.0" + "safer-buffer": ">= 2.1.2 < 3" }, "engines": { - "node": ">=10" + "node": ">=0.10.0" + } + }, + "node_modules/icss-utils": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz", + "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==", + "engines": { + "node": "^10 || ^12 || >= 14" }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" + "peerDependencies": { + "postcss": "^8.1.0" } }, - "node_modules/jiti": { - "version": "1.21.0", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz", - "integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==", - "bin": { - "jiti": "bin/jiti.js" + "node_modules/ignore": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.0.tgz", + "integrity": "sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==", + "engines": { + "node": ">= 4" } }, - "node_modules/joi": { - "version": "17.11.0", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.11.0.tgz", - "integrity": "sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==", - "dependencies": { - "@hapi/hoek": "^9.0.0", - "@hapi/topo": "^5.0.0", - "@sideway/address": "^4.1.3", - "@sideway/formula": "^3.0.1", - "@sideway/pinpoint": "^2.0.0" - } - }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "node_modules/image-size": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.2.1.tgz", + "integrity": "sha512-rH+46sQJ2dlwfjfhCyNx5thzrv+dtmBIhPHk0zgRUukHzZ/kRueTJXoYYsclBaKcSMBWuGbOFXtioLpzTb5euw==", + "license": "MIT", "dependencies": { - "argparse": "^2.0.1" + "queue": "6.0.2" }, "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "bin": { - "jsesc": "bin/jsesc" + "image-size": "bin/image-size.js" }, "engines": { - "node": ">=4" + "node": ">=16.x" } }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" - }, - "node_modules/json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + "node_modules/immediate": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", + "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" }, - "node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + "node_modules/immer": { + "version": "9.0.21", + "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz", + "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/immer" + } }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "bin": { - "json5": "lib/cli.js" + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" }, "engines": { "node": ">=6" - } - }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dependencies": { - "universalify": "^2.0.0" }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "dependencies": { - "json-buffer": "3.0.1" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", "engines": { - "node": ">=0.10.0" + "node": ">=8" } }, - "node_modules/klaw-sync": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", - "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", - "dependencies": { - "graceful-fs": "^4.1.11" + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "engines": { + "node": ">=0.8.19" } }, - "node_modules/kleur": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", - "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", "engines": { - "node": ">=6" + "node": ">=8" } }, - "node_modules/latest-version": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", - "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", - "dependencies": { - "package-json": "^8.1.0" - }, + "node_modules/infima": { + "version": "0.2.0-alpha.43", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", + "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "license": "MIT", "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=12" } }, - "node_modules/launch-editor": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.1.tgz", - "integrity": "sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==", + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "dependencies": { - "picocolors": "^1.0.0", - "shell-quote": "^1.8.1" + "once": "^1.3.0", + "wrappy": "1" } }, - "node_modules/leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inline-style-parser": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz", + "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==" + }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "license": "ISC", "engines": { - "node": ">=6" + "node": ">=12" } }, - "node_modules/lilconfig": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", - "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", "engines": { - "node": ">=10" + "node": ">= 0.10" } }, - "node_modules/lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + "node_modules/invariant": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", + "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", + "dependencies": { + "loose-envify": "^1.0.0" + } }, - "node_modules/loader-runner": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", - "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "node_modules/ipaddr.js": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz", + "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==", "engines": { - "node": ">=6.11.5" + "node": ">= 10" } }, - "node_modules/loader-utils": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", - "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", "dependencies": { - "big.js": "^5.2.2", - "emojis-list": "^3.0.0", - "json5": "^2.1.2" + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" }, - "engines": { - "node": ">=8.9.0" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/locate-path": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", - "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dependencies": { - "p-locate": "^6.0.0" + "binary-extensions": "^2.0.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + "node_modules/is-ci": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-3.0.1.tgz", + "integrity": "sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==", + "dependencies": { + "ci-info": "^3.2.0" + }, + "bin": { + "is-ci": "bin.js" + } + }, + "node_modules/is-core-module": { + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "dependencies": { + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-installed-globally": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz", + "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==", + "dependencies": { + "global-dirs": "^3.0.0", + "is-path-inside": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-npm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-6.0.0.tgz", + "integrity": "sha512-JEjxbSmtPSt1c8XTkVrlujcXdKV1/tvuQ7GwKcAlyiVLeYFQ2VHat8xfrDJsIkhCdF/tZ7CiIR3sy141c6+gPQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", + "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-path-cwd": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", + "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-reference": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-3.0.2.tgz", + "integrity": "sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/is-regexp": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz", + "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-root": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz", + "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-yarn-global": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.4.1.tgz", + "integrity": "sha512-/kppl+R+LO5VmhYSEWARUFjodS25D68gvj8W7z0I7OWhUla5xWu8KL6CtB2V0R6yqhnRgbcaREMr4EEM6htLPQ==", + "engines": { + "node": ">=12" + } + }, + "node_modules/isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jiti": { + "version": "1.21.0", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz", + "integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/joi": { + "version": "17.11.0", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.11.0.tgz", + "integrity": "sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==", + "dependencies": { + "@hapi/hoek": "^9.0.0", + "@hapi/topo": "^5.0.0", + "@sideway/address": "^4.1.3", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/katex": { + "version": "0.16.22", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.22.tgz", + "integrity": "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/katex/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/khroma": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/khroma/-/khroma-2.1.0.tgz", + "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/latest-version": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-7.0.0.tgz", + "integrity": "sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==", + "dependencies": { + "package-json": "^8.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/launch-editor": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.1.tgz", + "integrity": "sha512-eB/uXmFVpY4zezmGp5XtU21kwo7GBbKB+EQ+UZeWtGb9yAM5xt/Evk+lYH3eRNAtId+ej4u7TYPFZ07w4s7rRw==", + "dependencies": { + "picocolors": "^1.0.0", + "shell-quote": "^1.8.1" + } + }, + "node_modules/layout-base": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-1.0.2.tgz", + "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==", + "license": "MIT" + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/loader-utils": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz", + "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==", + "dependencies": { + "big.js": "^5.2.2", + "emojis-list": "^3.0.0", + "json5": "^2.1.2" + }, + "engines": { + "node": ">=8.9.0" + } + }, + "node_modules/locate-path": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-7.2.0.tgz", + "integrity": "sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==", + "dependencies": { + "p-locate": "^6.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==", + "license": "MIT" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "license": "MIT" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", + "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", + "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==" }, - "node_modules/lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + "node_modules/lunr-languages": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", + "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==" }, - "node_modules/lodash.memoize": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz", - "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==" + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==" }, - "node_modules/lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, - "node_modules/longest-streak": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", - "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", + "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" }, - "bin": { - "loose-envify": "cli.js" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", "dependencies": { - "tslib": "^2.0.3" + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.0.tgz", + "integrity": "sha512-n7MTOr/z+8NAX/wmhhDji8O3bRvPTV/U0oTCaZJkjhPSKTPhS3xufVhKGF8s1pJ7Ox4QgoIU7KHseh09S+9rTA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", + "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", + "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/lowercase-keys": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", - "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", "dependencies": { - "yallist": "^3.0.2" + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/lunr": { - "version": "2.3.9", - "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", - "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==" - }, - "node_modules/lunr-languages": { - "version": "1.14.0", - "resolved": "https://registry.npmjs.org/lunr-languages/-/lunr-languages-1.14.0.tgz", - "integrity": "sha512-hWUAb2KqM3L7J5bcrngszzISY4BxrXn/Xhbb9TTCJYEGqlR1nG67/M14sp09+PTIRklobrn57IAxcdcO/ZFyNA==" - }, - "node_modules/mark.js": { - "version": "8.11.1", - "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", - "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==" - }, - "node_modules/markdown-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", - "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", - "engines": { - "node": ">=16" + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/markdown-table": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", - "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", + "integrity": "sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-directive": { + "node_modules/mdast-util-mdx-jsx": { "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", - "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.0.0.tgz", + "integrity": "sha512-XZuPPzQNBPAlaqsTTgRrcJnyFbSOBovSadFgbFu8SnuNgm+6Bdx1K+IWoitsmj6Lq6MNtI+ytOqwN70n//NaBA==", "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", - "devlop": "^1.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", - "unist-util-visit-parents": "^6.0.0" + "unist-util-remove-position": "^5.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-find-and-replace": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", - "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", - "escape-string-regexp": "^5.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "engines": { - "node": ">=12" + "node_modules/mdast-util-phrasing": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.0.0.tgz", + "integrity": "sha512-xadSsJayQIucJ9n053dfQwVu1kuXg7jCTdYsMK8rqzKZh52nLfSH/k0sAxE0u+pj/zKZX+o5wB+ML5mRayOxFA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-from-markdown": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.0.tgz", - "integrity": "sha512-n7MTOr/z+8NAX/wmhhDji8O3bRvPTV/U0oTCaZJkjhPSKTPhS3xufVhKGF8s1pJ7Ox4QgoIU7KHseh09S+9rTA==", + "node_modules/mdast-util-to-hast": { + "version": "13.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.0.2.tgz", + "integrity": "sha512-U5I+500EOOw9e3ZrclN3Is3fRpw8c19SMyNZlZ2IS+7vLsNzb2Om11VpIVOR+/0137GhZsFEF6YiKD5+0Hr2Og==", "dependencies": { + "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "decode-named-character-reference": "^1.0.0", + "@ungap/structured-clone": "^1.0.0", "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", - "micromark": "^4.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-stringify-position": "^4.0.0" + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/mdast-util-frontmatter": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", - "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "escape-string-regexp": "^5.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-extension-frontmatter": "^2.0.0" + "@types/mdast": "^4.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "license": "MIT", "engines": { - "node": ">=12" + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/mdast-util-gfm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", - "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/mermaid": { + "version": "10.9.3", + "resolved": "https://registry.npmjs.org/mermaid/-/mermaid-10.9.3.tgz", + "integrity": "sha512-V80X1isSEvAewIL3xhmz/rVmc27CVljcsbWxkxlWJWY/1kQa4XOABqpDl2qQLGKzpKm6WbTfUEKImBlUfFYArw==", + "license": "MIT", + "dependencies": { + "@braintree/sanitize-url": "^6.0.1", + "@types/d3-scale": "^4.0.3", + "@types/d3-scale-chromatic": "^3.0.0", + "cytoscape": "^3.28.1", + "cytoscape-cose-bilkent": "^4.1.0", + "d3": "^7.4.0", + "d3-sankey": "^0.12.3", + "dagre-d3-es": "7.0.10", + "dayjs": "^1.11.7", + "dompurify": "^3.0.5 <3.1.7", + "elkjs": "^0.9.0", + "katex": "^0.16.9", + "khroma": "^2.0.0", + "lodash-es": "^4.17.21", + "mdast-util-from-markdown": "^1.3.0", + "non-layered-tidy-tree-layout": "^2.0.2", + "stylis": "^4.1.3", + "ts-dedent": "^2.2.0", + "uuid": "^9.0.0", + "web-worker": "^1.2.0" + } + }, + "node_modules/mermaid/node_modules/@types/mdast": { + "version": "3.0.15", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.15.tgz", + "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==", + "license": "MIT", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/mermaid/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==", + "license": "MIT" + }, + "node_modules/mermaid/node_modules/mdast-util-from-markdown": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz", + "integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==", + "license": "MIT", "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-gfm-autolink-literal": "^2.0.0", - "mdast-util-gfm-footnote": "^2.0.0", - "mdast-util-gfm-strikethrough": "^2.0.0", - "mdast-util-gfm-table": "^2.0.0", - "mdast-util-gfm-task-list-item": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-gfm-autolink-literal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", - "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "node_modules/mermaid/node_modules/mdast-util-to-string": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz", + "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==", + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "ccount": "^2.0.0", - "devlop": "^1.0.0", - "mdast-util-find-and-replace": "^3.0.0", - "micromark-util-character": "^2.0.0" + "@types/mdast": "^3.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.0.1.tgz", - "integrity": "sha512-3wgnrmEAJ4T+mGXAUfMvMAbxU9RDG43XmGce4j6CwPtVxB3vfwXSZ6KhFwDzZ3mZHhmPimMAXg71veiBGzeAZw==", + "node_modules/mermaid/node_modules/micromark": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.2.0.tgz", + "integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==", "funding": [ { "type": "GitHub Sponsors", @@ -8878,15 +9767,31 @@ "url": "https://opencollective.com/unified" } ], + "license": "MIT", "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "micromark-core-commonmark": "^1.0.1", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" } }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "node_modules/mermaid/node_modules/micromark-core-commonmark": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz", + "integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==", "funding": [ { "type": "GitHub Sponsors", @@ -8896,246 +9801,368 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "license": "MIT", + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } }, - "node_modules/mdast-util-gfm-footnote": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", - "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "node_modules/mermaid/node_modules/micromark-factory-destination": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz", + "integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-gfm-strikethrough": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", - "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "node_modules/mermaid/node_modules/micromark-factory-label": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz", + "integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" } }, - "node_modules/mdast-util-gfm-table": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", - "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "node_modules/mermaid/node_modules/micromark-factory-title": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz", + "integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "markdown-table": "^3.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-gfm-task-list-item": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", - "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "node_modules/mermaid/node_modules/micromark-factory-whitespace": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz", + "integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-mdx": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", - "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "node_modules/mermaid/node_modules/micromark-util-chunked": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz", + "integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-mdx-expression": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", - "integrity": "sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==", + "node_modules/mermaid/node_modules/micromark-util-classify-character": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz", + "integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/mermaid/node_modules/micromark-util-combine-extensions": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz", + "integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/mermaid/node_modules/micromark-util-decode-numeric-character-reference": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz", + "integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", + "dependencies": { + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-mdx-jsx": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.0.0.tgz", - "integrity": "sha512-XZuPPzQNBPAlaqsTTgRrcJnyFbSOBovSadFgbFu8SnuNgm+6Bdx1K+IWoitsmj6Lq6MNtI+ytOqwN70n//NaBA==", + "node_modules/mermaid/node_modules/micromark-util-decode-string": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz", + "integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "ccount": "^2.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "parse-entities": "^4.0.0", - "stringify-entities": "^4.0.0", - "unist-util-remove-position": "^5.0.0", - "unist-util-stringify-position": "^4.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-mdxjs-esm": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", - "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "node_modules/mermaid/node_modules/micromark-util-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz", + "integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mermaid/node_modules/micromark-util-html-tag-name": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz", + "integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mermaid/node_modules/micromark-util-normalize-identifier": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz", + "integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-phrasing": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.0.0.tgz", - "integrity": "sha512-xadSsJayQIucJ9n053dfQwVu1kuXg7jCTdYsMK8rqzKZh52nLfSH/k0sAxE0u+pj/zKZX+o5wB+ML5mRayOxFA==", + "node_modules/mermaid/node_modules/micromark-util-resolve-all": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz", + "integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "unist-util-is": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-to-hast": { - "version": "13.0.2", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.0.2.tgz", - "integrity": "sha512-U5I+500EOOw9e3ZrclN3Is3fRpw8c19SMyNZlZ2IS+7vLsNzb2Om11VpIVOR+/0137GhZsFEF6YiKD5+0Hr2Og==", + "node_modules/mermaid/node_modules/micromark-util-sanitize-uri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz", + "integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@ungap/structured-clone": "^1.0.0", - "devlop": "^1.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "trim-lines": "^3.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-to-markdown": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", - "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "node_modules/mermaid/node_modules/micromark-util-subtokenize": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz", + "integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "longest-streak": "^3.0.0", - "mdast-util-phrasing": "^4.0.0", - "mdast-util-to-string": "^4.0.0", - "micromark-util-decode-string": "^2.0.0", - "unist-util-visit": "^5.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" } }, - "node_modules/mdast-util-to-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", - "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "node_modules/mermaid/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "license": "MIT" + }, + "node_modules/mermaid/node_modules/unist-util-stringify-position": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", + "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "license": "MIT", "dependencies": { - "@types/mdast": "^4.0.0" + "@types/unist": "^2.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdn-data": { - "version": "2.0.30", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", - "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/memfs": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", - "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", - "dependencies": { - "fs-monkey": "^1.0.4" - }, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "engines": { - "node": ">= 8" + "node_modules/mermaid/node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" } }, "node_modules/methods": { @@ -10818,11 +11845,12 @@ ] }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { @@ -10833,6 +11861,7 @@ "version": "1.6.0", "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "license": "MIT", "bin": { "mime": "cli.js" }, @@ -10879,11 +11908,13 @@ } }, "node_modules/mini-css-extract-plugin": { - "version": "2.7.6", - "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz", - "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==", + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.9.2.tgz", + "integrity": "sha512-GJuACcS//jtq4kCtd5ii/M0SZf7OZRH+BxdqXZHaJfb8TJiVl+NgQRPwiYt2EuqeSkNydn/7vP+bcE27C5mb9w==", + "license": "MIT", "dependencies": { - "schema-utils": "^4.0.0" + "schema-utils": "^4.0.0", + "tapable": "^2.2.1" }, "engines": { "node": ">= 12.13.0" @@ -10928,6 +11959,15 @@ "node": ">=16 || 14 >=14.17" } }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, "node_modules/mrmime": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", @@ -10937,9 +11977,10 @@ } }, "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" }, "node_modules/multicast-dns": { "version": "7.2.5", @@ -10964,15 +12005,16 @@ } }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, @@ -11025,9 +12067,16 @@ } }, "node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + "version": "2.0.19", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "license": "MIT" + }, + "node_modules/non-layered-tidy-tree-layout": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz", + "integrity": "sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==", + "license": "MIT" }, "node_modules/normalize-path": { "version": "3.0.0", @@ -11090,9 +12139,13 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -11131,6 +12184,7 @@ "version": "2.4.1", "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", "dependencies": { "ee-first": "1.1.1" }, @@ -11443,9 +12497,10 @@ } }, "node_modules/path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz", + "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==", + "license": "MIT", "dependencies": { "isarray": "0.0.1" } @@ -11469,9 +12524,10 @@ } }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", @@ -12286,9 +13342,9 @@ } }, "node_modules/prismjs": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", "license": "MIT", "engines": { "node": ">=6" @@ -12355,11 +13411,6 @@ "node": ">= 0.10" } }, - "node_modules/punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==" - }, "node_modules/pupa": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/pupa/-/pupa-3.1.0.tgz", @@ -12375,11 +13426,12 @@ } }, "node_modules/qs": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", - "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", "dependencies": { - "side-channel": "^1.0.4" + "side-channel": "^1.0.6" }, "engines": { "node": ">=0.6" @@ -12443,9 +13495,10 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -12460,6 +13513,7 @@ "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -12807,12 +13861,14 @@ "node_modules/regenerate": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "license": "MIT" }, "node_modules/regenerate-unicode-properties": { - "version": "10.1.1", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", - "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz", + "integrity": "sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==", + "license": "MIT", "dependencies": { "regenerate": "^1.4.2" }, @@ -12820,28 +13876,16 @@ "node": ">=4" } }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" - }, - "node_modules/regenerator-transform": { - "version": "0.15.2", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", - "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", - "dependencies": { - "@babel/runtime": "^7.8.4" - } - }, "node_modules/regexpu-core": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", - "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-6.2.0.tgz", + "integrity": "sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA==", + "license": "MIT", "dependencies": { - "@babel/regjsgen": "^0.8.0", "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsparser": "^0.9.1", + "regenerate-unicode-properties": "^10.2.0", + "regjsgen": "^0.8.0", + "regjsparser": "^0.12.0", "unicode-match-property-ecmascript": "^2.0.0", "unicode-match-property-value-ecmascript": "^2.1.0" }, @@ -12874,23 +13918,34 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==", + "license": "MIT" + }, "node_modules/regjsparser": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.12.0.tgz", + "integrity": "sha512-cnE+y8bz4NhMjISKbgeVJtqNbtf5QpjZP+Bslo+UqkIt9QPnX9q095eiRRASJG1/tz6dlNr6Z5NsBiWYokp6EQ==", + "license": "BSD-2-Clause", "dependencies": { - "jsesc": "~0.5.0" + "jsesc": "~3.0.2" }, "bin": { "regjsparser": "bin/parser" } }, "node_modules/regjsparser/node_modules/jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.0.2.tgz", + "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", + "license": "MIT", "bin": { "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" } }, "node_modules/rehype-raw": { @@ -13228,6 +14283,12 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/robust-predicates": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/robust-predicates/-/robust-predicates-3.0.2.tgz", + "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==", + "license": "Unlicense" + }, "node_modules/rtl-detect": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.1.2.tgz", @@ -13273,6 +14334,24 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==", + "license": "BSD-3-Clause" + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "license": "MIT", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -13311,9 +14390,10 @@ } }, "node_modules/schema-utils": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", - "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.2.tgz", + "integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==", + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.9.0", @@ -13321,7 +14401,7 @@ "ajv-keywords": "^5.1.0" }, "engines": { - "node": ">= 12.13.0" + "node": ">= 10.13.0" }, "funding": { "type": "opencollective", @@ -13409,9 +14489,10 @@ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" }, "node_modules/send": { - "version": "0.18.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz", - "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==", + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", @@ -13435,6 +14516,7 @@ "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "license": "MIT", "dependencies": { "ms": "2.0.0" } @@ -13442,48 +14524,56 @@ "node_modules/send/node_modules/debug/node_modules/ms": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", + "license": "MIT" }, - "node_modules/send/node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + "node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } }, "node_modules/send/node_modules/range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/serialize-javascript": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz", - "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "license": "BSD-3-Clause", "dependencies": { "randombytes": "^2.1.0" } }, "node_modules/serve-handler": { - "version": "6.1.5", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", - "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", + "version": "6.1.6", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.6.tgz", + "integrity": "sha512-x5RL9Y2p5+Sh3D38Fh9i/iQ5ZK+e4xuXRd/pGbM4D13tgo/MGwbttUk8emytcr1YYzBYs+apnUngBDFYfpjPuQ==", + "license": "MIT", "dependencies": { "bytes": "3.0.0", "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", "mime-types": "2.1.18", "minimatch": "3.1.2", "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", + "path-to-regexp": "3.3.0", "range-parser": "1.2.0" } }, "node_modules/serve-handler/node_modules/path-to-regexp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==" + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-3.3.0.tgz", + "integrity": "sha512-qyCH421YQPS2WFDxDjftfc1ZR5WKQzVzqsp4n9M2kQhVOo/ByahFoUNJfl58kOcEGfQ//7weFTDhm+ss8Ecxgw==", + "license": "MIT" }, "node_modules/serve-index": { "version": "1.9.1", @@ -13556,14 +14646,15 @@ } }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", + "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "license": "MIT", "dependencies": { - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.18.0" + "send": "0.19.0" }, "engines": { "node": ">= 0.8.0" @@ -13586,7 +14677,8 @@ "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" }, "node_modules/shallow-clone": { "version": "3.0.1", @@ -13648,13 +14740,72 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -13843,6 +14994,7 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } @@ -14020,6 +15172,12 @@ "postcss": "^8.4.31" } }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, "node_modules/sucrase": { "version": "3.35.0", "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", @@ -14223,12 +15381,13 @@ } }, "node_modules/terser": { - "version": "5.26.0", - "resolved": "https://registry.npmjs.org/terser/-/terser-5.26.0.tgz", - "integrity": "sha512-dytTGoE2oHgbNV9nTzgBEPaqAWvcJNl66VZ0BkJqlvp71IjO8CxdBx/ykCNb47cLnCmCvRZ6ZR0tLkqvZCdVBQ==", + "version": "5.40.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.40.0.tgz", + "integrity": "sha512-cfeKl/jjwSR5ar7d0FGmave9hFGJT8obyo0z+CrQOylLDbk7X81nPU6vq9VORa5jU30SkDnT2FXjLbR8HLP+xA==", + "license": "BSD-2-Clause", "dependencies": { "@jridgewell/source-map": "^0.3.3", - "acorn": "^8.8.2", + "acorn": "^8.14.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, @@ -14240,15 +15399,16 @@ } }, "node_modules/terser-webpack-plugin": { - "version": "5.3.10", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz", - "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==", + "version": "5.3.14", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz", + "integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==", + "license": "MIT", "dependencies": { - "@jridgewell/trace-mapping": "^0.3.20", + "@jridgewell/trace-mapping": "^0.3.25", "jest-worker": "^27.4.5", - "schema-utils": "^3.1.1", - "serialize-javascript": "^6.0.1", - "terser": "^5.26.0" + "schema-utils": "^4.3.0", + "serialize-javascript": "^6.0.2", + "terser": "^5.31.1" }, "engines": { "node": ">= 10.13.0" @@ -14272,29 +15432,6 @@ } } }, - "node_modules/terser-webpack-plugin/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/terser-webpack-plugin/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, "node_modules/terser-webpack-plugin/node_modules/jest-worker": { "version": "27.5.1", "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz", @@ -14308,28 +15445,6 @@ "node": ">= 10.13.0" } }, - "node_modules/terser-webpack-plugin/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, - "node_modules/terser-webpack-plugin/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, "node_modules/terser-webpack-plugin/node_modules/supports-color": { "version": "8.1.1", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", @@ -14388,18 +15503,11 @@ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" }, - "node_modules/to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "engines": { - "node": ">=4" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -14411,6 +15519,7 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", "engines": { "node": ">=0.6" } @@ -14441,6 +15550,15 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "license": "MIT", + "engines": { + "node": ">=6.10" + } + }, "node_modules/ts-interface-checker": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", @@ -14466,6 +15584,7 @@ "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "license": "MIT", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -14478,6 +15597,7 @@ "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -14486,6 +15606,7 @@ "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -14520,9 +15641,10 @@ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" }, "node_modules/unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz", + "integrity": "sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==", + "license": "MIT", "engines": { "node": ">=4" } @@ -14539,6 +15661,7 @@ "version": "2.0.0", "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "license": "MIT", "dependencies": { "unicode-canonical-property-names-ecmascript": "^2.0.0", "unicode-property-aliases-ecmascript": "^2.0.0" @@ -14548,9 +15671,10 @@ } }, "node_modules/unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz", + "integrity": "sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==", + "license": "MIT", "engines": { "node": ">=4" } @@ -14559,6 +15683,7 @@ "version": "2.1.0", "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "license": "MIT", "engines": { "node": ">=4" } @@ -14695,14 +15820,15 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", "engines": { "node": ">= 0.8" } }, "node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", "funding": [ { "type": "opencollective", @@ -14717,9 +15843,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.2.0", + "picocolors": "^1.1.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -14938,6 +16065,33 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/uvu": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz", + "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==", + "license": "MIT", + "dependencies": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "bin": { + "uvu": "bin.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uvu/node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/value-equal": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", @@ -14992,9 +16146,10 @@ } }, "node_modules/watchpack": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz", - "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==", + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.4.tgz", + "integrity": "sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==", + "license": "MIT", "dependencies": { "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.1.2" @@ -15020,34 +16175,41 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/web-worker": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.5.0.tgz", + "integrity": "sha512-RiMReJrTAiA+mBjGONMnjVDP2u3p9R1vkcGz6gDIrOMT3oGuYwX2WRMYI9ipkphSuE5XKEhydbhNEJh4NY9mlw==", + "license": "Apache-2.0" + }, "node_modules/webpack": { - "version": "5.89.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.89.0.tgz", - "integrity": "sha512-qyfIC10pOr70V+jkmud8tMfajraGCZMBWJtrmuBymQKCrLTRejBI8STDp1MCyZu/QTdZSeacCQYpYNQVOzX5kw==", + "version": "5.99.9", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.99.9.tgz", + "integrity": "sha512-brOPwM3JnmOa+7kd3NsmOUOwbDAj8FT9xDsG3IW0MgbN9yZV7Oi/s/+MNQ/EcSMqw7qfoRyXPoeEWT8zLVdVGg==", + "license": "MIT", "dependencies": { - "@types/eslint-scope": "^3.7.3", - "@types/estree": "^1.0.0", - "@webassemblyjs/ast": "^1.11.5", - "@webassemblyjs/wasm-edit": "^1.11.5", - "@webassemblyjs/wasm-parser": "^1.11.5", - "acorn": "^8.7.1", - "acorn-import-assertions": "^1.9.0", - "browserslist": "^4.14.5", + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", + "@types/json-schema": "^7.0.15", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.15.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", "glob-to-regexp": "^0.4.1", - "graceful-fs": "^4.2.9", + "graceful-fs": "^4.2.11", "json-parse-even-better-errors": "^2.3.1", "loader-runner": "^4.2.0", "mime-types": "^2.1.27", "neo-async": "^2.6.2", - "schema-utils": "^3.2.0", + "schema-utils": "^4.3.2", "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.7", - "watchpack": "^2.4.0", + "terser-webpack-plugin": "^5.3.11", + "watchpack": "^2.4.1", "webpack-sources": "^3.2.3" }, "bin": { @@ -15067,9 +16229,10 @@ } }, "node_modules/webpack-bundle-analyzer": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.1.tgz", - "integrity": "sha512-s3P7pgexgT/HTUSYgxJyn28A+99mmLq4HsJepMPzu0R8ImJc52QNqaFYW1Z2z2uIb1/J3eYgaAWVpaC+v/1aAQ==", + "version": "4.10.2", + "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.10.2.tgz", + "integrity": "sha512-vJptkMm9pk5si4Bv922ZbKLV8UTT4zib4FPgXMhgzUny0bfDDkLXAVQs3ly3fS4/TN9ROFtb0NFrm04UXFE/Vw==", + "license": "MIT", "dependencies": { "@discoveryjs/json-ext": "0.5.7", "acorn": "^8.0.4", @@ -15079,7 +16242,6 @@ "escape-string-regexp": "^4.0.0", "gzip-size": "^6.0.0", "html-escaper": "^2.0.2", - "is-plain-object": "^5.0.0", "opener": "^1.5.2", "picocolors": "^1.0.0", "sirv": "^2.0.3", @@ -15101,9 +16263,10 @@ } }, "node_modules/webpack-dev-middleware": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz", - "integrity": "sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==", + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", + "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", + "license": "MIT", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -15126,6 +16289,7 @@ "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", "engines": { "node": ">= 0.6" } @@ -15134,6 +16298,7 @@ "version": "2.1.35", "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", "dependencies": { "mime-db": "1.52.0" }, @@ -15145,14 +16310,16 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/webpack-dev-server": { - "version": "4.15.1", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.1.tgz", - "integrity": "sha512-5hbAst3h3C3L8w6W4P96L5vaV0PxSmJhxZvWKYIdgxOQm8pNZ5dEOmmSLBVpP85ReeyRt6AS1QJNyo/oFFPeVA==", + "version": "4.15.2", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", + "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", + "license": "MIT", "dependencies": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", @@ -15182,7 +16349,7 @@ "serve-index": "^1.9.1", "sockjs": "^0.3.24", "spdy": "^4.0.2", - "webpack-dev-middleware": "^5.3.1", + "webpack-dev-middleware": "^5.3.4", "ws": "^8.13.0" }, "bin": { @@ -15208,9 +16375,10 @@ } }, "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.16.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", - "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "version": "8.18.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz", + "integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==", + "license": "MIT", "engines": { "node": ">=10.0.0" }, @@ -15248,34 +16416,6 @@ "node": ">=10.13.0" } }, - "node_modules/webpack/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/webpack/node_modules/ajv-keywords": { - "version": "3.5.2", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", - "peerDependencies": { - "ajv": "^6.9.1" - } - }, - "node_modules/webpack/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, "node_modules/webpack/node_modules/mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", @@ -15295,23 +16435,6 @@ "node": ">= 0.6" } }, - "node_modules/webpack/node_modules/schema-utils": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.3.0.tgz", - "integrity": "sha512-pN/yOAvcC+5rQ5nERGuwrjLlYvLTbCibnZ1I7B1LaiAz9BRBlE9GMgE/eqV30P7aJQUf7Ddimy/RsbYO/GrVGg==", - "dependencies": { - "@types/json-schema": "^7.0.8", - "ajv": "^6.12.5", - "ajv-keywords": "^3.5.2" - }, - "engines": { - "node": ">= 10.13.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/webpack" - } - }, "node_modules/webpackbar": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz", @@ -15487,9 +16610,10 @@ } }, "node_modules/ws": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", - "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "license": "MIT", "engines": { "node": ">=8.3.0" }, @@ -15531,7 +16655,8 @@ "node_modules/yallist": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "license": "ISC" }, "node_modules/yaml": { "version": "1.10.2", diff --git a/docs/package.json b/docs/package.json index d74dafda30e..3d20505dd12 100644 --- a/docs/package.json +++ b/docs/package.json @@ -21,6 +21,7 @@ "@docusaurus/plugin-google-gtag": "^3.4.0", "@docusaurus/plugin-sitemap": "^3.4.0", "@docusaurus/preset-classic": "^3.4.0", + "@docusaurus/theme-mermaid": "^3.4.0", "@easyops-cn/docusaurus-search-local": "^0.40.1", "@gracefullight/docusaurus-plugin-microsoft-clarity": "^1.0.0", "@mdx-js/react": "^3.0.0", diff --git a/docs/static/img/ibc-go repo cover 2025.svg b/docs/static/img/ibc-go repo cover 2025.svg new file mode 100644 index 00000000000..f9af7e1092e --- /dev/null +++ b/docs/static/img/ibc-go repo cover 2025.svg @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/versioned_docs/version-v10.1.x/00-intro.md b/docs/versioned_docs/version-v10.4.x/00-intro.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/00-intro.md rename to docs/versioned_docs/version-v10.4.x/00-intro.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/01-overview.md b/docs/versioned_docs/version-v10.4.x/01-ibc/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/01-overview.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/02-integration.md b/docs/versioned_docs/version-v10.4.x/01-ibc/02-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/02-integration.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/02-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/00-ibcv2apps.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/00-ibcv2apps.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/00-ibcv2apps.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/00-ibcv2apps.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/01-apps.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/01-apps.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/01-apps.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/01-apps.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/02-ibcmodule.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/02-ibcmodule.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/02-ibcmodule.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/02-ibcmodule.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/03-bindports.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/03-bindports.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/03-bindports.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/03-bindports.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/04-keeper.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/04-keeper.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/04-keeper.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/04-keeper.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/05-packets_acks.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/05-packets_acks.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/05-packets_acks.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/05-packets_acks.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/06-routing.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/06-routing.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/06-routing.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/06-routing.md diff --git a/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/07-address-codec.md b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/07-address-codec.md new file mode 100644 index 00000000000..cedaebf7b3b --- /dev/null +++ b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/07-address-codec.md @@ -0,0 +1,92 @@ +--- +title: Address Codec +sidebar_label: Address Codec +sidebar_position: 7 +slug: /ibc/apps/address-codec +--- + +# Custom Address Codec + +## Overview + +Starting in ibc-go `v10.4.0`, the IBC transfer module uses the application's configured address codec to parse sender and receiver addresses. This enables chains to accept multiple address formats in IBC packets—for example, both standard Cosmos bech32 addresses (`cosmos1...`) and Ethereum hex addresses (`0x...`). + +## Interface + +The Cosmos SDK defines a simple interface for converting between address representations: + +```go +type Codec interface { + StringToBytes(text string) ([]byte, error) + BytesToString(bz []byte) (string, error) +} +``` + +Applications configure a codec implementation on the `AccountKeeper`. The IBC transfer module retrieves this codec via `accountKeeper.AddressCodec()` and uses it throughout packet processing—validating sender addresses when creating packets and parsing receiver addresses when delivering funds. + +**Chain independence:** Each chain applies its own codec independently. The sending chain validates senders with its codec, the receiving chain validates receivers with its codec. This works seamlessly across chains with different codec configurations without any protocol changes. + +## Implementation + +A typical implementation composes the SDK's standard bech32 codec and extends it to parse hex addresses: + +```go +type EvmCodec struct { + bech32Codec address.Codec +} + +func (c *EvmCodec) StringToBytes(text string) ([]byte, error) { + if strings.HasPrefix(text, "0x") { + // Validate and parse hex address using go-ethereum/common + if !common.IsHexAddress(text) { + return nil, errors.New("invalid hex address") + } + addr := common.HexToAddress(text) + return addr.Bytes(), nil + } + // Default to bech32 parsing + return c.bech32Codec.StringToBytes(text) +} + +func (c *EvmCodec) BytesToString(bz []byte) (string, error) { + // Always return bech32 format + return c.bech32Codec.BytesToString(bz) +} +``` + +This pattern accepts both address formats as input while consistently outputting bech32. This makes the codec a drop-in replacement for the standard codec—existing tooling continues to work unchanged while users gain the ability to specify hex addresses where convenient. + +**Note:** A recommended address codec implementation is available in the [cosmos/evm repository](https://github.com/cosmos/evm/blob/main/encoding/address/address_codec.go). + +### Application Wiring + +After initializing your transfer keeper, configure the codec using the `SetAddressCodec` method: + +```go +app.TransferKeeper.SetAddressCodec(evmaddress.NewEvmCodec(sdk.GetConfig().GetBech32AccountAddrPrefix())) +``` + +For a complete example showing the transfer keeper initialization and address codec configuration, see [evmd app.go](https://github.com/cosmos/evm/blob/720ba9cf908a20a29b7401b19a136caeb8c4092f/evmd/app.go#L483-L494). + +## Usage + +Once configured, the chain accepts IBC transfers with receiver addresses in either format: + +```bash +# Standard bech32 address +gaiad tx ibc-transfer transfer transfer channel-0 \ +cosmos1p9p6h9m8jcn8f7l6h3k2wq9g6yx0l8a9u2n4lr 1000uatom --from sender + +# Ethereum hex address +gaiad tx ibc-transfer transfer transfer channel-0 \ +0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb 1000uatom --from sender +``` + +Both formats resolve to the same on-chain account when derived from the same private key. The codec handles conversion to the internal byte representation transparently. + +## Reference Implementation + +The cosmos/evm repository provides a complete implementation in `utils/address_codec.go` with integration examples in the `evmd` reference chain: + +- [**Implementation PR**](https://github.com/cosmos/evm/pull/665) +- [**Reference Chain "evmd"**](https://github.com/cosmos/evm/tree/main/evmd) diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/_category_.json b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/_category_.json rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/images/packet_flow.png b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/images/packet_flow.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/images/packet_flow.png rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/images/packet_flow.png diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/images/packet_flow_v2.png b/docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/images/packet_flow_v2.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/03-apps/images/packet_flow_v2.png rename to docs/versioned_docs/version-v10.4.x/01-ibc/03-apps/images/packet_flow_v2.png diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/01-overview.md b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/01-overview.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/02-develop.md b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/02-develop.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/02-develop.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/02-develop.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/02-developIBCv2.md b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/02-developIBCv2.md similarity index 95% rename from docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/02-developIBCv2.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/02-developIBCv2.md index 8d8a4b55275..ef9a6f47747 100644 --- a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/02-developIBCv2.md +++ b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/02-developIBCv2.md @@ -77,11 +77,11 @@ The middleware must have access to the underlying application, and be called bef > Middleware **may** choose not to call the underlying application's callback at all. Though these should generally be limited to error cases. -The `IBCModule` interface consists of the packet callbacks where cutom logic is performed. +The `IBCModule` interface consists of the packet callbacks where custom logic is performed. ### Packet callbacks -The packet callbacks are where the middleware performs most of its custom logic. The middleware may read the packet flow data and perform some additional packet handling, or it may modify the incoming data before it reaches the underlying application. This enables a wide degree of usecases, as a simple base application like token-transfer can be transformed for a variety of usecases by combining it with custom middleware, for example acting as a filter for which tokens can be sent and recieved. +The packet callbacks are where the middleware performs most of its custom logic. The middleware may read the packet flow data and perform some additional packet handling, or it may modify the incoming data before it reaches the underlying application. This enables a wide degree of usecases, as a simple base application like token-transfer can be transformed for a variety of usecases by combining it with custom middleware, for example acting as a filter for which tokens can be sent and received. #### `OnRecvPacket` @@ -255,6 +255,6 @@ The middleware follows a decorator pattern that wraps an underlying application' The least intrusive middleware is stateless. They simply read the ICS26 callback arguments before calling the underlying app's callback and error if the arguments are not acceptable (e.g. whitelisting packets). Stateful middleware that are used solely for erroring are also very simple to build, an example of this would be a rate-limiting middleware that prevents transfer outflows from getting too high within a certain time frame. -Middleware that directly interfere with the payload or acknowledgement before passing control to the underlying app are way more intrusive to the underyling app processing. This makes such middleware more error-prone when implementing as incorrect handling can cause the underlying app to break or worse execute unexpected behavior. Moreover, such middleware typically needs to be built for a specific underlying app rather than being generic. An example of this is the packet-forwarding middleware which modifies the payload and is specifically built for transfer. +Middleware that directly interfere with the payload or acknowledgement before passing control to the underlying app are way more intrusive to the underlying app processing. This makes such middleware more error-prone when implementing as incorrect handling can cause the underlying app to break or worse execute unexpected behavior. Moreover, such middleware typically needs to be built for a specific underlying app rather than being generic. An example of this is the packet-forwarding middleware which modifies the payload and is specifically built for transfer. -Middleware that modifies the payload or acknowledgement such that it is no longer readable by the underlying application is the most complicated middleware. Since it is not readable by the underlying apps, if these middleware write additional state into payloads and acknowledgements that get committed to IBC core provable state, there MUST be an equivalent counterparty middleware that is able to parse and intepret this additional state while also converting the payload and acknowledgment back to a readable form for the underlying application on its side. Thus, such middleware requires deployment on both sides of an IBC connection or the packet processing will break. This is the hardest type of middleware to implement, integrate and deploy. Thus, it is not recommended unless absolutely necessary to fulfill the given use case. +Middleware that modifies the payload or acknowledgement such that it is no longer readable by the underlying application is the most complicated middleware. Since it is not readable by the underlying apps, if these middleware write additional state into payloads and acknowledgements that get committed to IBC core provable state, there MUST be an equivalent counterparty middleware that is able to parse and interpret this additional state while also converting the payload and acknowledgment back to a readable form for the underlying application on its side. Thus, such middleware requires deployment on both sides of an IBC connection or the packet processing will break. This is the hardest type of middleware to implement, integrate and deploy. Thus, it is not recommended unless absolutely necessary to fulfill the given use case. diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/03-integration.md b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/03-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/03-integration.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/03-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/_category_.json b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/_category_.json rename to docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/images/middleware-stack.png b/docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/images/middleware-stack.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/04-middleware/images/middleware-stack.png rename to docs/versioned_docs/version-v10.4.x/01-ibc/04-middleware/images/middleware-stack.png diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/00-intro.md b/docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/00-intro.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/00-intro.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/00-intro.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/01-quick-guide.md b/docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/01-quick-guide.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/01-quick-guide.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/01-quick-guide.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/02-developer-guide.md b/docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/02-developer-guide.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/02-developer-guide.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/02-developer-guide.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/03-genesis-restart.md b/docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/03-genesis-restart.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/03-genesis-restart.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/03-genesis-restart.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/_category_.json b/docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/05-upgrades/_category_.json rename to docs/versioned_docs/version-v10.4.x/01-ibc/05-upgrades/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/07-relayer.md b/docs/versioned_docs/version-v10.4.x/01-ibc/07-relayer.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/07-relayer.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/07-relayer.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/08-best-practices.md b/docs/versioned_docs/version-v10.4.x/01-ibc/08-best-practices.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/08-best-practices.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/08-best-practices.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/09-permissioning.md b/docs/versioned_docs/version-v10.4.x/01-ibc/09-permissioning.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/09-permissioning.md rename to docs/versioned_docs/version-v10.4.x/01-ibc/09-permissioning.md diff --git a/docs/versioned_docs/version-v10.1.x/01-ibc/_category_.json b/docs/versioned_docs/version-v10.4.x/01-ibc/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/01-ibc/_category_.json rename to docs/versioned_docs/version-v10.4.x/01-ibc/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/01-overview.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/01-overview.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/02-state.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/02-state.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/02-state.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/02-state.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/03-state-transitions.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/03-state-transitions.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/03-state-transitions.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/03-state-transitions.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/04-messages.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/04-messages.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/04-messages.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/04-messages.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/05-events.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/05-events.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/05-events.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/05-events.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/06-metrics.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/06-metrics.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/06-metrics.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/06-metrics.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/07-params.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/07-params.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/07-params.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/07-params.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/08-authorizations.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/08-authorizations.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/08-authorizations.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/08-authorizations.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/09-client.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/09-client.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/09-client.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/09-client.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/10-IBCv2-transfer.md b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/10-IBCv2-transfer.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/10-IBCv2-transfer.md rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/10-IBCv2-transfer.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/_category_.json b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/_category_.json rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/images/forwarding-3-chains-dark.png b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/images/forwarding-3-chains-dark.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/images/forwarding-3-chains-dark.png rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/images/forwarding-3-chains-dark.png diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/images/forwarding-3-chains-light.png b/docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/images/forwarding-3-chains-light.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/01-transfer/images/forwarding-3-chains-light.png rename to docs/versioned_docs/version-v10.4.x/02-apps/01-transfer/images/forwarding-3-chains-light.png diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/01-overview.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/01-overview.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/02-development.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/02-development.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/02-development.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/02-development.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/03-auth-modules.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/03-auth-modules.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/03-auth-modules.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/03-auth-modules.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/04-integration.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/04-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/04-integration.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/04-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/05-messages.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/05-messages.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/05-messages.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/05-messages.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/06-parameters.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/06-parameters.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/06-parameters.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/06-parameters.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/07-tx-encoding.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/07-tx-encoding.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/07-tx-encoding.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/07-tx-encoding.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/08-client.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/08-client.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/08-client.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/08-client.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/09-active-channels.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/09-active-channels.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/09-active-channels.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/09-active-channels.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/01-auth-modules.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/01-auth-modules.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/01-auth-modules.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/01-auth-modules.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/02-integration.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/02-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/02-integration.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/02-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/03-keeper-api.md b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/03-keeper-api.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/03-keeper-api.md rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/03-keeper-api.md diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/_category_.json b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/_category_.json rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/images/ica-pre-v6.png b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/images/ica-pre-v6.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/10-legacy/images/ica-pre-v6.png rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/10-legacy/images/ica-pre-v6.png diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/_category_.json b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/_category_.json rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/images/ica-v6.png b/docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/images/ica-v6.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/02-interchain-accounts/images/ica-v6.png rename to docs/versioned_docs/version-v10.4.x/02-apps/02-interchain-accounts/images/ica-v6.png diff --git a/docs/versioned_docs/version-v10.1.x/02-apps/_category_.json b/docs/versioned_docs/version-v10.4.x/02-apps/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/02-apps/_category_.json rename to docs/versioned_docs/version-v10.4.x/02-apps/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/01-overview.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/01-overview.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/02-light-client-module.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/02-light-client-module.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/02-light-client-module.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/02-light-client-module.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/03-client-state.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/03-client-state.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/03-client-state.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/03-client-state.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/04-consensus-state.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/04-consensus-state.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/04-consensus-state.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/04-consensus-state.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/05-updates-and-misbehaviour.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/05-updates-and-misbehaviour.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/05-updates-and-misbehaviour.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/05-updates-and-misbehaviour.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/06-upgrades.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/06-upgrades.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/06-upgrades.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/06-upgrades.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/07-proofs.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/07-proofs.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/07-proofs.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/07-proofs.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/08-proposals.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/08-proposals.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/08-proposals.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/08-proposals.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/09-setup.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/09-setup.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/09-setup.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/09-setup.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/_category_.json b/docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/01-developer-guide/_category_.json rename to docs/versioned_docs/version-v10.4.x/03-light-clients/01-developer-guide/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/01-overview.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/01-overview.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/02-integration.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/02-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/02-integration.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/02-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/03-client-state.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/03-client-state.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/03-client-state.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/03-client-state.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/04-connection.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/04-connection.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/04-connection.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/04-connection.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/05-state-verification.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/05-state-verification.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/05-state-verification.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/05-state-verification.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/_category_.json b/docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/02-localhost/_category_.json rename to docs/versioned_docs/version-v10.4.x/03-light-clients/02-localhost/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/01-solomachine.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/01-solomachine.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/01-solomachine.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/01-solomachine.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/02-concepts.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/02-concepts.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/02-concepts.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/02-concepts.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/03-state.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/03-state.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/03-state.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/03-state.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/04-state_transitions.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/04-state_transitions.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/04-state_transitions.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/04-state_transitions.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/_category_.json b/docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/03-solomachine/_category_.json rename to docs/versioned_docs/version-v10.4.x/03-light-clients/03-solomachine/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/01-overview.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/01-overview.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/02-concepts.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/02-concepts.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/02-concepts.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/02-concepts.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/03-integration.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/03-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/03-integration.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/03-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/04-messages.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/04-messages.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/04-messages.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/04-messages.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/05-governance.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/05-governance.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/05-governance.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/05-governance.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/06-events.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/06-events.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/06-events.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/06-events.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/07-contracts.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/07-contracts.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/07-contracts.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/07-contracts.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/08-client.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/08-client.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/08-client.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/08-client.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/09-migrations.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/09-migrations.md similarity index 95% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/09-migrations.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/09-migrations.md index 35c43bd6721..5a10f32bead 100644 --- a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/09-migrations.md +++ b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/09-migrations.md @@ -26,7 +26,7 @@ v0.1.1-0.20231213092633-b306e7a706e1 => v0.1.0+ibc-go-v7.3-wasmvm-v1.5 - The `Initialize`, `Status`, `GetTimestampAtHeight`, `GetLatestHeight`, `VerifyMembership`, `VerifyNonMembership`, `VerifyClientMessage`, `UpdateState` and `UpdateStateOnMisbehaviour` functions in `ClientState` have been removed and all their logic has been moved to functions of the `LightClientModule`. - The `MigrateContract` function has been removed from `ClientState`. -- The `VerifyMembershipMsg` and `VerifyNonMembershipMsg` payloads for `SudoMsg` have been modified. The `Path` field of both structs has been updated from `v1.MerklePath` to `v2.MerklePath`. The new `v2.MerklePath` field contains a `KeyPath` of `[][]byte` as opposed to `[]string`, see [23-commitment](../../05-migrations/13-v8-to-v9.md#23-commitment). This supports proving values stored under keys which contain non-utf8 encoded symbols. As a result, the JSON field `path` containing `key_path` of both messages will marshal elements as a base64 encoded bytestrings. This is a breaking change for 08-wasm client contracts and they should be migrated to correctly support deserialisation of the `v2.MerklePath` field. +- The `VerifyMembershipMsg` and `VerifyNonMembershipMsg` payloads for `SudoMsg` have been modified. The `Path` field of both structs has been updated from `v1.MerklePath` to `v2.MerklePath`. The new `v2.MerklePath` field contains a `KeyPath` of `[][]byte` as opposed to `[]string`. This supports proving values stored under keys which contain non-utf8 encoded symbols. As a result, the JSON field `path` containing `key_path` of both messages will marshal elements as a base64 encoded bytestrings. This is a breaking change for 08-wasm client contracts and they should be migrated to correctly support deserialisation of the `v2.MerklePath` field. - The `ExportMetadataMsg` struct has been removed and is no longer required for contracts to implement. Core IBC will handle exporting all key/value's written to the store by a light client contract. - The `ZeroCustomFields` interface function has been removed from the `ClientState` interface. Core IBC only used this function to set tendermint client states when scheduling an IBC software upgrade. The interface function has been replaced by a type assertion. - The `MaxWasmByteSize` function has been removed in favor of the `MaxWasmSize` constant. diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/_category_.json b/docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/04-wasm/_category_.json rename to docs/versioned_docs/version-v10.4.x/03-light-clients/04-wasm/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/05-tendermint/01-overview.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/05-tendermint/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/05-tendermint/01-overview.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/05-tendermint/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/05-tendermint/_category_.json b/docs/versioned_docs/version-v10.4.x/03-light-clients/05-tendermint/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/05-tendermint/_category_.json rename to docs/versioned_docs/version-v10.4.x/03-light-clients/05-tendermint/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/06-proposals.md b/docs/versioned_docs/version-v10.4.x/03-light-clients/06-proposals.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/06-proposals.md rename to docs/versioned_docs/version-v10.4.x/03-light-clients/06-proposals.md diff --git a/docs/versioned_docs/version-v10.1.x/03-light-clients/_category_.json b/docs/versioned_docs/version-v10.4.x/03-light-clients/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/03-light-clients/_category_.json rename to docs/versioned_docs/version-v10.4.x/03-light-clients/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/01-overview.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/01-overview.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/01-overview.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/01-overview.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/02-integration.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/02-integration.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/02-integration.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/02-integration.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/03-interfaces.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/03-interfaces.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/03-interfaces.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/03-interfaces.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/04-events.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/04-events.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/04-events.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/04-events.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/05-end-users.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/05-end-users.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/05-end-users.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/05-end-users.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/06-gas.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/06-gas.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/06-gas.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/06-gas.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/07-callbacks-IBCv2.md b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/07-callbacks-IBCv2.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/07-callbacks-IBCv2.md rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/07-callbacks-IBCv2.md diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/_category_.json b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/_category_.json rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/images/callbackflow.svg b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/images/callbackflow.svg similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/images/callbackflow.svg rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/images/callbackflow.svg diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/images/ics4-callbackflow.svg b/docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/images/ics4-callbackflow.svg similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/01-callbacks/images/ics4-callbackflow.svg rename to docs/versioned_docs/version-v10.4.x/04-middleware/01-callbacks/images/ics4-callbackflow.svg diff --git a/docs/versioned_docs/version-v10.1.x/04-middleware/_category_.json b/docs/versioned_docs/version-v10.4.x/04-middleware/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/04-middleware/_category_.json rename to docs/versioned_docs/version-v10.4.x/04-middleware/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/01-support-denoms-with-slashes.md b/docs/versioned_docs/version-v10.4.x/05-migrations/01-support-denoms-with-slashes.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/01-support-denoms-with-slashes.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/01-support-denoms-with-slashes.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/02-sdk-to-v1.md b/docs/versioned_docs/version-v10.4.x/05-migrations/02-sdk-to-v1.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/02-sdk-to-v1.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/02-sdk-to-v1.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/03-v1-to-v2.md b/docs/versioned_docs/version-v10.4.x/05-migrations/03-v1-to-v2.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/03-v1-to-v2.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/03-v1-to-v2.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/04-v2-to-v3.md b/docs/versioned_docs/version-v10.4.x/05-migrations/04-v2-to-v3.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/04-v2-to-v3.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/04-v2-to-v3.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/05-v3-to-v4.md b/docs/versioned_docs/version-v10.4.x/05-migrations/05-v3-to-v4.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/05-v3-to-v4.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/05-v3-to-v4.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/06-v4-to-v5.md b/docs/versioned_docs/version-v10.4.x/05-migrations/06-v4-to-v5.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/06-v4-to-v5.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/06-v4-to-v5.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/07-v5-to-v6.md b/docs/versioned_docs/version-v10.4.x/05-migrations/07-v5-to-v6.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/07-v5-to-v6.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/07-v5-to-v6.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/08-v6-to-v7.md b/docs/versioned_docs/version-v10.4.x/05-migrations/08-v6-to-v7.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/08-v6-to-v7.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/08-v6-to-v7.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/09-v7-to-v7_1.md b/docs/versioned_docs/version-v10.4.x/05-migrations/09-v7-to-v7_1.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/09-v7-to-v7_1.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/09-v7-to-v7_1.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/10-v7_2-to-v7_3.md b/docs/versioned_docs/version-v10.4.x/05-migrations/10-v7_2-to-v7_3.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/10-v7_2-to-v7_3.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/10-v7_2-to-v7_3.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/11-v7-to-v8.md b/docs/versioned_docs/version-v10.4.x/05-migrations/11-v7-to-v8.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/11-v7-to-v8.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/11-v7-to-v8.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/12-v8-to-v8_1.md b/docs/versioned_docs/version-v10.4.x/05-migrations/12-v8-to-v8_1.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/12-v8-to-v8_1.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/12-v8-to-v8_1.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/13-v8_1-to-v10.md b/docs/versioned_docs/version-v10.4.x/05-migrations/13-v8_1-to-v10.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/13-v8_1-to-v10.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/13-v8_1-to-v10.md diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/_category_.json b/docs/versioned_docs/version-v10.4.x/05-migrations/_category_.json similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/_category_.json rename to docs/versioned_docs/version-v10.4.x/05-migrations/_category_.json diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/images/auth-module-decision-tree.png b/docs/versioned_docs/version-v10.4.x/05-migrations/images/auth-module-decision-tree.png similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/images/auth-module-decision-tree.png rename to docs/versioned_docs/version-v10.4.x/05-migrations/images/auth-module-decision-tree.png diff --git a/docs/versioned_docs/version-v10.1.x/05-migrations/migration.template.md b/docs/versioned_docs/version-v10.4.x/05-migrations/migration.template.md similarity index 100% rename from docs/versioned_docs/version-v10.1.x/05-migrations/migration.template.md rename to docs/versioned_docs/version-v10.4.x/05-migrations/migration.template.md diff --git a/docs/versioned_docs/version-v10.1.x/images/ibcoverview-dark.svg b/docs/versioned_docs/version-v10.4.x/images/ibcoverview-dark.svg similarity index 100% rename from docs/versioned_docs/version-v10.1.x/images/ibcoverview-dark.svg rename to docs/versioned_docs/version-v10.4.x/images/ibcoverview-dark.svg diff --git a/docs/versioned_docs/version-v10.1.x/images/ibcoverview-light.svg b/docs/versioned_docs/version-v10.4.x/images/ibcoverview-light.svg similarity index 100% rename from docs/versioned_docs/version-v10.1.x/images/ibcoverview-light.svg rename to docs/versioned_docs/version-v10.4.x/images/ibcoverview-light.svg diff --git a/docs/versioned_sidebars/version-v10.1.x-sidebars.json b/docs/versioned_sidebars/version-v10.4.x-sidebars.json similarity index 100% rename from docs/versioned_sidebars/version-v10.1.x-sidebars.json rename to docs/versioned_sidebars/version-v10.4.x-sidebars.json diff --git a/docs/versions.json b/docs/versions.json index 43ba05d19c4..84942ad26fc 100644 --- a/docs/versions.json +++ b/docs/versions.json @@ -1,5 +1,5 @@ [ - "v10.1.x", + "v10.4.x", "v8.5.x", "v7.8.x", "v6.3.x", diff --git a/e2e/README.md b/e2e/README.md index a085562f15c..28b1ff1d151 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -89,11 +89,11 @@ selection. make e2e-test test=TestMsgSubmitTx_SuccessfulTransfer ``` -> Note: sometimes it can be useful to make changes to [interchaintest](https://github.com/strangelove-ventures/interchaintest) +> Note: sometimes it can be useful to make changes to [interchaintest](https://github.com/cosmos/interchaintest) > when running tests locally. In order to do this, add the following line to > e2e/go.mod -`replace github.com/strangelove-ventures/interchaintest => ../../interchaintest` +`replace github.com/cosmos/interchaintest => ../../interchaintest` Or point it to any local checkout you have. @@ -122,7 +122,7 @@ This should be set to the path of a valid config file you want to use, setting t ### interchaintest -These E2E tests use the [interchaintest framework](https://github.com/strangelove-ventures/interchaintest). This framework creates chains and relayers in containers and allows for arbitrary commands to be executed in the chain containers, +These E2E tests use the [interchaintest framework](https://github.com/cosmos/interchaintest). This framework creates chains and relayers in containers and allows for arbitrary commands to be executed in the chain containers, as well as allowing us to broadcast arbitrary messages which are signed on behalf of a user created in the test. ### Test Suites diff --git a/e2e/ci-e2e-config.yaml b/e2e/ci-e2e-config.yaml index d9160583b51..0c04ff3b92b 100644 --- a/e2e/ci-e2e-config.yaml +++ b/e2e/ci-e2e-config.yaml @@ -2,16 +2,16 @@ # Many of these fields can be overridden with environment variables. # All fields that support this have the corresponding environment variable name in a comment beside the field. -# | Environment Variable | Description | Default Value | -# |----------------------|-------------------------------------------|------------------------------| -# | CHAIN_IMAGE | The image that will be used for the chain | ghcr.io/cosmos/ibc-go-simd | -# | CHAIN_A_TAG | The tag used for chain A | N/A (must be set) | -# | CHAIN_B_TAG | The tag used for chain B | N/A (must be set) | -# | CHAIN_C_TAG | The tag used for chain C | N/A (optional; fallback to A)| -# | CHAIN_D_TAG | The tag used for chain D | N/A (optional; fallback to A)| -# | CHAIN_BINARY | The binary used in the container | simd | -# | RELAYER_TAG | The tag used for the relayer | 1.10.4 | -# | RELAYER_ID | The type of relayer to use (rly/hermes) | hermes | +# | Environment Variable | Description | Default Value | +# |----------------------|-------------------------------------------|-------------------------------| +# | CHAIN_IMAGE | The image that will be used for the chain | ghcr.io/cosmos/ibc-go-simd | +# | CHAIN_A_TAG | The tag used for chain A | N/A (must be set) | +# | CHAIN_B_TAG | The tag used for chain B | N/A (must be set) | +# | CHAIN_C_TAG | The tag used for chain C | N/A (optional; fallback to A) | +# | CHAIN_D_TAG | The tag used for chain D | N/A (optional; fallback to A) | +# | CHAIN_BINARY | The binary used in the container | simd | +# | RELAYER_TAG | The tag used for the relayer | 1.13.1 | +# | RELAYER_ID | The type of relayer to use (rly/hermes) | hermes | # see sample.config.yaml for a bare minimum configuration example. # set env E2E_CONFIG_PATH to point to this file to use it. @@ -51,7 +51,7 @@ activeRelayer: hermes # override with RELAYER_ID relayers: - id: hermes image: ghcr.io/informalsystems/hermes - tag: "1.10.4" + tag: "1.13.1" - id: rly image: ghcr.io/cosmos/relayer tag: "latest" @@ -76,6 +76,6 @@ upgrades: - planName: "v8.1" tag: "v8.1.0" - planName: "v10" - tag: "main" # TODO: Update with correct tag as soon as we have a release + tag: "branch-release-v10.4.x" - planName: "ibcwasm-v8" tag: "v8.0.0-e2e-upgrade" diff --git a/e2e/dockerutil/dockerutil.go b/e2e/dockerutil/dockerutil.go index 175abe8aa10..3214c6654bd 100644 --- a/e2e/dockerutil/dockerutil.go +++ b/e2e/dockerutil/dockerutil.go @@ -10,7 +10,7 @@ import ( dockertypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - dockerclient "github.com/docker/docker/client" + dockerclient "github.com/moby/moby/client" ) const testLabel = "ibc-test" @@ -22,13 +22,13 @@ func GetTestContainers(ctx context.Context, suiteName string, dc *dockerclient.C testContainers, err := dc.ContainerList(ctx, container.ListOptions{ All: true, Filters: filters.NewArgs( - // see: https://github.com/strangelove-ventures/interchaintest/blob/0bdc194c2aa11aa32479f32b19e1c50304301981/internal/dockerutil/setup.go#L31-L36 + // see: https://github.com/cosmos/interchaintest/blob/0bdc194c2aa11aa32479f32b19e1c50304301981/internal/dockerutil/setup.go#L31-L36 // for the suiteName needed to identify test containers. filters.Arg("label", testLabel+"="+suiteName), ), }) if err != nil { - return nil, fmt.Errorf("failed listing containers: %s", err) + return nil, fmt.Errorf("failed listing containers: %w", err) } return testContainers, nil @@ -41,7 +41,7 @@ func GetContainerLogs(ctx context.Context, dc *dockerclient.Client, containerNam ShowStderr: true, }) if err != nil { - return nil, fmt.Errorf("failed reading logs in test cleanup: %s", err) + return nil, fmt.Errorf("failed reading logs in test cleanup: %w", err) } return io.ReadAll(readCloser) } diff --git a/e2e/go.mod b/e2e/go.mod index b019545a9ac..d4da4e34422 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -1,14 +1,15 @@ module github.com/cosmos/ibc-go/e2e -go 1.23.8 +go 1.24.3 + +// TODO: Remove when v11 release of interchaintest is available (that is where this one is coming from) +replace github.com/cosmos/interchain-security/v7 => github.com/cosmos/interchain-security/v7 v7.0.0-20250622154438-73c73cf686e5 replace ( github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10 => ../modules/light-clients/08-wasm // uncomment to use the local version of ibc-go, you will need to run `go mod tidy` in e2e directory. github.com/cosmos/ibc-go/v10 => ../ - - github.com/strangelove-ventures/interchaintest/v8 => github.com/gjermundgaraba/interchaintest/v8 v8.0.0-20250302163936-9fca2b7de400 - + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 ) @@ -18,36 +19,38 @@ require ( cosmossdk.io/math v1.5.3 cosmossdk.io/x/upgrade v0.2.0 github.com/cometbft/cometbft v0.38.17 - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-sdk v0.53.4 github.com/cosmos/gogoproto v1.7.0 - github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10 v10.2.0 - github.com/cosmos/ibc-go/v10 v10.2.0 - github.com/docker/docker v27.3.1+incompatible + github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10 v10.3.0 + github.com/cosmos/ibc-go/v10 v10.3.0 + github.com/cosmos/interchaintest/v10 v10.0.0 + github.com/docker/docker v28.0.0+incompatible + github.com/moby/moby v27.5.1+incompatible github.com/pelletier/go-toml v1.9.5 - github.com/strangelove-ventures/interchaintest/v8 v8.2.1-0.20240419152858-c8b741617cd8 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.0 - golang.org/x/mod v0.24.0 - google.golang.org/grpc v1.72.0 - google.golang.org/protobuf v1.36.6 + golang.org/x/mod v0.25.0 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 gopkg.in/yaml.v2 v2.4.0 ) require ( - cel.dev/expr v0.20.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.116.0 // indirect cloud.google.com/go/auth v0.14.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect cloud.google.com/go/storage v1.49.0 // indirect - cosmossdk.io/collections v1.2.1 // indirect + cosmossdk.io/collections v1.3.1 // indirect cosmossdk.io/core v0.11.3 // indirect - cosmossdk.io/depinject v1.2.0 // indirect - cosmossdk.io/log v1.6.0 // indirect + cosmossdk.io/depinject v1.2.1 // indirect + cosmossdk.io/log v1.6.1 // indirect cosmossdk.io/schema v1.1.0 // indirect cosmossdk.io/store v1.1.2 // indirect + cosmossdk.io/x/evidence v0.2.0 // indirect cosmossdk.io/x/feegrant v0.2.0 // indirect cosmossdk.io/x/tx v0.14.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect @@ -57,24 +60,25 @@ require ( github.com/CosmWasm/wasmvm/v2 v2.2.4 // indirect github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/avast/retry-go/v4 v4.5.1 // indirect github.com/aws/aws-sdk-go v1.49.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.22.0 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/btcsuite/btcd v0.22.1 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cloudwego/base64x v0.1.5 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect - github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/cockroachdb/errors v1.12.0 // indirect github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect @@ -82,16 +86,21 @@ require ( github.com/cockroachdb/redact v1.1.6 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft-db v0.14.1 // indirect + github.com/consensys/gnark-crypto v0.18.0 // indirect github.com/cosmos/btcutil v1.0.5 // indirect - github.com/cosmos/cosmos-db v1.1.1 // indirect + github.com/cosmos/cosmos-db v1.1.3 // indirect github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect github.com/cosmos/go-bip39 v1.0.0 // indirect github.com/cosmos/gogogateway v1.2.0 // indirect - github.com/cosmos/iavl v1.2.2 // indirect + github.com/cosmos/iavl v1.2.4 // indirect github.com/cosmos/ics23/go v0.11.0 // indirect + github.com/cosmos/interchain-security/v7 v7.0.0-20250408210344-06e0dc6bf6d6 // indirect github.com/cosmos/ledger-cosmos-go v0.14.0 // indirect + github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/danieljoos/wincred v1.2.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/desertbit/timer v1.0.1 // indirect github.com/dgraph-io/badger/v4 v4.2.0 // indirect @@ -104,22 +113,25 @@ require ( github.com/emicklei/dot v1.6.2 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/ethereum/go-ethereum v1.15.11 // indirect + github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect + github.com/ethereum/go-ethereum v1.16.3 // indirect + github.com/ethereum/go-verkle v0.2.2 // indirect github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.32.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/getsentry/sentry-go v0.33.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect @@ -138,6 +150,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-getter v1.7.8 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect @@ -145,7 +158,7 @@ require ( github.com/hashicorp/go-metrics v0.5.4 // indirect github.com/hashicorp/go-plugin v1.6.3 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/yamux v0.1.2 // indirect @@ -158,22 +171,24 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.9 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/linxGnu/grocksdb v1.9.2 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b // indirect github.com/minio/highwayhash v1.0.3 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oklog/run v1.1.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -195,17 +210,25 @@ require ( github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/shamaton/msgpack/v2 v2.2.0 // indirect + github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.8.0 // indirect - github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/cast v1.9.2 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/spf13/viper v1.20.1 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/supranational/blst v0.3.14 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tendermint/tendermint v0.38.0-dev // indirect github.com/tidwall/btree v1.7.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ulikunitz/xz v0.5.11 // indirect github.com/zeebo/errs v1.4.0 // indirect @@ -214,46 +237,44 @@ require ( go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.15.0 // indirect - golang.org/x/crypto v0.37.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/arch v0.17.0 // indirect + golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.10.0 // indirect - golang.org/x/tools v0.31.0 // indirect + golang.org/x/tools v0.33.0 // indirect google.golang.org/api v0.222.0 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect - lukechampine.com/uint128 v1.3.0 // indirect - modernc.org/cc/v3 v3.41.0 // indirect - modernc.org/ccgo/v3 v3.16.15 // indirect - modernc.org/libc v1.37.1 // indirect + modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect + modernc.org/libc v1.55.3 // indirect modernc.org/mathutil v1.6.0 // indirect - modernc.org/memory v1.7.2 // indirect - modernc.org/opt v0.1.3 // indirect - modernc.org/sqlite v1.28.0 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/sqlite v1.31.1 // indirect modernc.org/strutil v1.2.0 // indirect modernc.org/token v1.1.0 // indirect - nhooyr.io/websocket v1.8.11 // indirect + nhooyr.io/websocket v1.8.17 // indirect pgregory.net/rapid v1.2.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/e2e/go.sum b/e2e/go.sum index 1cd92728c91..929a89e6f52 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -618,16 +618,16 @@ cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/client/v2 v2.0.0-beta.9 h1:xc06zg4G858/pK5plhf8RCfo+KR2mdDKJNrEkfrVAqc= cosmossdk.io/client/v2 v2.0.0-beta.9/go.mod h1:pHf3CCHX5gmbL9rDCVbXhGI2+/DdAVTEZSLpdd5V9Zs= -cosmossdk.io/collections v1.2.1 h1:mAlNMs5vJwkda4TA+k5q/43p24RVAQ/qyDrjANu3BXE= -cosmossdk.io/collections v1.2.1/go.mod h1:PSsEJ/fqny0VPsHLFT6gXDj/2C1tBOTS9eByK0+PBFU= +cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= +cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= -cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= -cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= -cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= +cosmossdk.io/log v1.6.1 h1:YXNwAgbDwMEKwDlCdH8vPcoggma48MgZrTQXCfmMBeI= +cosmossdk.io/log v1.6.1/go.mod h1:gMwsWyyDBjpdG9u2avCFdysXqxq28WJapJvu+vF1y+E= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= @@ -659,6 +659,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= github.com/CosmWasm/wasmvm/v2 v2.2.4 h1:V3UwXJMA8TNOuQETppDQkaXAevF7gOWLYpKvrThPv7o= github.com/CosmWasm/wasmvm/v2 v2.2.4/go.mod h1:Aj/rB2KMRM8nAdbWxkO23rnQYb5KsoPuH9ZizSi0sVg= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -666,8 +668,8 @@ github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bp github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= @@ -680,9 +682,15 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OffchainLabs/prysm/v6 v6.0.4 h1:aqWCb2U3LfeahzjORvxXYsL1ebKWT1AUu3Ya3y7LApE= +github.com/OffchainLabs/prysm/v6 v6.0.4/go.mod h1:lMkHT3gWiCOqo4rbuhLTU4FoQ/THni9v6z4w9P6FRyU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/adlio/schema v1.3.6 h1:k1/zc2jNfeiZBA5aFTRy37jlBIuCkXCm0XmvpzCKI9I= @@ -731,17 +739,23 @@ github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCk github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -751,6 +765,7 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -786,13 +801,13 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.12.0 h1:d7oCs6vuIMUQRVbi6jWWWEJZahLCfJpnJSVobd1/sUo= github.com/cockroachdb/errors v1.12.0/go.mod h1:SvzfYNNBshAVbZ8wzNc/UPK3w1vf0dKDUP41ucAIf7g= github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a h1:f52TdbU4D5nozMAhO9TvTJ2ZMCXtN4VIAmfrrZ0JXQ4= @@ -810,6 +825,8 @@ github.com/cometbft/cometbft v0.38.17 h1:FkrQNbAjiFqXydeAO81FUzriL4Bz0abYxN/eOHr github.com/cometbft/cometbft v0.38.17/go.mod h1:5l0SkgeLRXi6bBfQuevXjKqML1jjfJJlvI1Ulp02/o4= github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ= github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -820,12 +837,12 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= +github.com/cosmos/cosmos-sdk v0.53.4 h1:kPF6vY68+/xi1/VebSZGpoxQqA52qkhUzqkrgeBn3Mg= +github.com/cosmos/cosmos-sdk v0.53.4/go.mod h1:7U3+WHZtI44dEOnU46+lDzBb2tFh1QlMvi8Z5JugopI= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -833,14 +850,25 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= github.com/cosmos/gogoproto v1.7.0 h1:79USr0oyXAbxg3rspGh/m4SWNyoz/GLaAh0QlCe2fro= github.com/cosmos/gogoproto v1.7.0/go.mod h1:yWChEv5IUEYURQasfyBW5ffkMHR/90hiHgbNgrtp4j0= -github.com/cosmos/iavl v1.2.2 h1:qHhKW3I70w+04g5KdsdVSHRbFLgt3yY3qTMd4Xa4rC8= -github.com/cosmos/iavl v1.2.2/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= +github.com/cosmos/iavl v1.2.4 h1:IHUrG8dkyueKEY72y92jajrizbkZKPZbMmG14QzsEkw= +github.com/cosmos/iavl v1.2.4/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= github.com/cosmos/ics23/go v0.11.0 h1:jk5skjT0TqX5e5QJbEnwXIS2yI2vnmLOgpQPeM5RtnU= github.com/cosmos/ics23/go v0.11.0/go.mod h1:A8OjxPE67hHST4Icw94hOxxFEJMBG031xIGF/JHNIY0= +github.com/cosmos/interchain-security/v7 v7.0.0-20250622154438-73c73cf686e5 h1:6LnlaeVk/wHPicQG8NqElL1F1FDVGEl7xF0JXGGhgEs= +github.com/cosmos/interchain-security/v7 v7.0.0-20250622154438-73c73cf686e5/go.mod h1:9EIcx4CzKt/5/2KHtniyzt7Kz8Wgk6fdvyr+AFIUGHc= +github.com/cosmos/interchaintest/v10 v10.0.0 h1:DEsXOS10x191Q3EU4RkOnyqahGCTnLaBGEN//C2MvUQ= +github.com/cosmos/interchaintest/v10 v10.0.0/go.mod h1:caS4BRkAg8NkiZ8BsHEzjNBibt2OVdTctW5Ezz+Jqxs= github.com/cosmos/ledger-cosmos-go v0.14.0 h1:WfCHricT3rPbkPSVKRH+L4fQGKYHuGOK9Edpel8TYpE= github.com/cosmos/ledger-cosmos-go v0.14.0/go.mod h1:E07xCWSBl3mTGofZ2QnL4cIUzMbbGVyik84QYKbX3RA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/crate-crypto/go-eth-kzg v1.3.0 h1:05GrhASN9kDAidaFJOda6A4BEvgvuXbazXg/0E3OOdI= +github.com/crate-crypto/go-eth-kzg v1.3.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= +github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= @@ -849,6 +877,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= +github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= +github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= +github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= @@ -856,6 +888,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjY github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= github.com/desertbit/timer v1.0.1 h1:yRpYNn5Vaaj6QXecdLMPMJsW81JLiI1eokUft5nBmeo= github.com/desertbit/timer v1.0.1/go.mod h1:htRrYeY5V/t4iu1xCJ5XsQvp4xve8QulXXctAzxqcwE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= @@ -866,8 +900,8 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM= +github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -909,14 +943,20 @@ github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0+ github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= -github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= +github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= +github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= +github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= +github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -929,13 +969,11 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= -github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= +github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gjermundgaraba/interchaintest/v8 v8.0.0-20250302163936-9fca2b7de400 h1:v8Odwn6C9IGBTZFsP2HIO6WLBm8KDFNYfwEj26Qcjrc= -github.com/gjermundgaraba/interchaintest/v8 v8.0.0-20250302163936-9fca2b7de400/go.mod h1:KeojpULmZVnaOdtOyEy0lWpjnNwVHEY8+Qga9bKeOX8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= @@ -946,8 +984,8 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -964,10 +1002,13 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -985,22 +1026,20 @@ github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MG github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1155,9 +1194,15 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3Ar github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -1186,8 +1231,9 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -1205,6 +1251,10 @@ github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w= github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= +github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -1213,6 +1263,8 @@ github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0Jr github.com/huandu/skiplist v1.2.1 h1:dTi93MgjwErA/8idWTzIw4Y1kZsMWx35fmI2c8Rij7w= github.com/huandu/skiplist v1.2.1/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= @@ -1226,6 +1278,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1250,10 +1304,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= @@ -1282,9 +1333,13 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= +github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linxGnu/grocksdb v1.9.2 h1:O3mzvO0wuzQ9mtlHbDrShixyVjVbmuqTjFrzlf43wZ8= @@ -1313,11 +1368,14 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b h1:QrHweqAtyJ9EwCaGHBu1fghwxIPiopAHV06JlXrMHjk= +github.com/mimoo/StrobeGo v0.0.0-20220103164710-9a04d6ca976b/go.mod h1:xxLb2ip6sSUts3g1irPVHyk/DGslwQsNOo9I7smJfNU= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= @@ -1337,8 +1395,12 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/moby v27.5.1+incompatible h1:/pN59F/t3U7Q4FPzV88nzqf7Fp0qqCSL2KzhZaiKcKw= +github.com/moby/moby v27.5.1+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1364,6 +1426,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= @@ -1375,6 +1439,8 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= @@ -1423,6 +1489,16 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/dtls/v2 v2.2.7 h1:cSUBsETxepsCSFSxC3mc/aDo14qQLMSL+O6IjG28yV8= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= +github.com/pion/transport/v2 v2.2.1 h1:7qYnCBlpgSJNYMbLCKuSY9KbQdBFoETvPNETv0y4N7c= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v3 v3.0.1 h1:gDTlPJwROfSfz6QfSi0ZmeCSkFcnWWiiR9ES0ouANiM= +github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1479,14 +1555,16 @@ github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b h1:VK7thFOnhxAZ/5aolr5Os4beiubuD08WiuiHyRqgwks= github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b/go.mod h1:HRuvtXLZ4WkaB1MItToVH2e8ZwKwZPY5/Rcby+CvvLY= -github.com/prysmaticlabs/prysm/v5 v5.3.0 h1:7Lr8ndapBTZg00YE+MgujN6+yvJR6Bdfn28ZDSJ00II= -github.com/prysmaticlabs/prysm/v5 v5.3.0/go.mod h1:r1KhlduqDMIGZ1GhR5pjZ2Ko8Q89noTDYTRoPKwf1+c= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= +github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1500,7 +1578,9 @@ github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= @@ -1513,6 +1593,8 @@ github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6v github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shamaton/msgpack/v2 v2.2.0 h1:IP1m01pHwCrMa6ZccP9B3bqxEMKMSmMVAVKk54g3L/Y= github.com/shamaton/msgpack/v2 v2.2.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -1532,15 +1614,15 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -1565,20 +1647,36 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tendermint/tendermint v0.38.0-dev h1:yX4zsEgTF9PxlLmhx9XAPTGH2E9FSlqSpHcY7sW7Vb8= +github.com/tendermint/tendermint v0.38.0-dev/go.mod h1:EHKmaqObmcGysoRr7krxXoxxhUDyYWbKvvRYJ9tCGWY= +github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM= +github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= @@ -1589,8 +1687,13 @@ github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0o github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1623,28 +1726,28 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1670,8 +1773,12 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= -golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= +golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1689,8 +1796,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1754,8 +1861,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= +golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1830,8 +1937,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1861,8 +1968,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1883,8 +1990,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1904,6 +2011,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1984,6 +2092,7 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1991,13 +2100,14 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2012,8 +2122,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2034,8 +2144,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2045,11 +2155,9 @@ golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -2118,8 +2226,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2132,6 +2240,8 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -2226,6 +2336,7 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2343,10 +2454,10 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2394,8 +2505,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2415,8 +2526,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2429,6 +2540,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -2460,24 +2573,26 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= -lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= -modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= +modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= +modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= -modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= -modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y= +modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= +modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= @@ -2486,8 +2601,8 @@ modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/libc v1.37.1 h1:Wi3qhejztgB3hOYQGMc8NwePETHAWXmlU+GQnBNTrw8= -modernc.org/libc v1.37.1/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -2496,30 +2611,28 @@ modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWP modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= -modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= -modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= +modernc.org/sqlite v1.31.1 h1:XVU0VyzxrYHlBhIs1DiEgSl0ZtdnPtbLVy8hSkzxGrs= +modernc.org/sqlite v1.31.1/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= -modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= -modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= -nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= @@ -2528,6 +2641,6 @@ rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/e2e/relayer/relayer.go b/e2e/relayer/relayer.go index cb02f0d3a83..fed5a4a5e20 100644 --- a/e2e/relayer/relayer.go +++ b/e2e/relayer/relayer.go @@ -6,12 +6,12 @@ import ( "fmt" "testing" - dockerclient "github.com/docker/docker/client" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + "github.com/cosmos/interchaintest/v10/relayer" + "github.com/cosmos/interchaintest/v10/relayer/hermes" + dockerclient "github.com/moby/moby/client" "github.com/pelletier/go-toml" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - "github.com/strangelove-ventures/interchaintest/v8/relayer" - "github.com/strangelove-ventures/interchaintest/v8/relayer/hermes" "go.uber.org/zap" ) diff --git a/e2e/sample.config.extended.yaml b/e2e/sample.config.extended.yaml index 8ffe75e705e..288af5e1111 100644 --- a/e2e/sample.config.extended.yaml +++ b/e2e/sample.config.extended.yaml @@ -2,14 +2,16 @@ # Many of these fields can be overridden with environment variables. # All fields that support this have the corresponding environment variable name in a comment beside the field. -# | Environment Variable | Description | Default Value | -# |----------------------|-------------------------------------------|------------------------------| -# | CHAIN_IMAGE | The image that will be used for the chain | ghcr.io/cosmos/ibc-go-simd | -# | CHAIN_A_TAG | The tag used for chain A | N/A (must be set) | -# | CHAIN_B_TAG | The tag used for chain B | N/A (must be set) | -# | CHAIN_BINARY | The binary used in the container | simd | -# | RELAYER_TAG | The tag used for the relayer | 1.10.4 | -# | RELAYER_ID | The type of relayer to use (rly/hermes) | hermes | +# | Environment Variable | Description | Default Value | +# |----------------------|-------------------------------------------|-------------------------------| +# | CHAIN_IMAGE | The image that will be used for the chain | ghcr.io/cosmos/ibc-go-simd | +# | CHAIN_A_TAG | The tag used for chain A | N/A (must be set) | +# | CHAIN_B_TAG | The tag used for chain B | N/A (must be set) | +# | CHAIN_C_TAG | The tag used for chain C | Optional (fallback to A) | +# | CHAIN_D_TAG | The tag used for chain D | Optional (fallback to A) | +# | CHAIN_BINARY | The binary used in the container | simd | +# | RELAYER_TAG | The tag used for the relayer | 1.13.1 | +# | RELAYER_ID | The type of relayer to use (rly/hermes) | hermes | # see sample.config.yaml for a bare minimum configuration example. # set env E2E_CONFIG_PATH to point to this file to use it. @@ -23,13 +25,29 @@ chains: tag: main # override with CHAIN_A_TAG binary: simd # override with CHAIN_BINARY - # the entry at index 1 corresponds to CHAIN_B + # the entry at index 1 corresponds to CHAIN_B - chainId: chainB-1 numValidators: 4 numFullNodes: 1 image: ghcr.io/cosmos/ibc-go-simd # override with CHAIN_IMAGE tag: main # override with CHAIN_B_TAG binary: simd # override with CHAIN_BINARY + + # the entry at index 2 corresponds to CHAIN_C + - chainId: chainC-1 + numValidators: 4 + numFullNodes: 1 + image: ghcr.io/cosmos/ibc-go-simd # override with CHAIN_IMAGE + tag: main # override with CHAIN_C_TAG + binary: simd # override with CHAIN_BINARY + + # the entry at index 3 corresponds to CHAIN_D + - chainId: chainD-1 + numValidators: 4 + numFullNodes: 1 + image: ghcr.io/cosmos/ibc-go-simd # override with CHAIN_IMAGE + tag: main # override with CHAIN_D_TAG + binary: simd # override with CHAIN_BINARY # activeRelayer must match the id of a relayer specified in the relayers list below. activeRelayer: hermes # override with RELAYER_ID @@ -40,7 +58,7 @@ activeRelayer: hermes # override with RELAYER_ID relayers: - id: hermes image: ghcr.io/informalsystems/hermes - tag: "1.10.4" # override with RELAYER_TAG + tag: "1.13.1" # override with RELAYER_TAG - id: rly image: ghcr.io/cosmos/relayer tag: "latest" # override with RELAYER_TAG @@ -65,6 +83,6 @@ upgrades: - planName: "v8.1" tag: "v8.1.0" - planName: "v10" - tag: "v10.1.0" + tag: "v10.3.0" - planName: "ibcwasm-v8" tag: "v8.0.0-e2e-upgrade" diff --git a/e2e/sample.config.yaml b/e2e/sample.config.yaml index 2715858f832..a6ab2bdae3e 100644 --- a/e2e/sample.config.yaml +++ b/e2e/sample.config.yaml @@ -6,3 +6,7 @@ chains: chainId: chainA-1 - tag: main # override with CHAIN_B_TAG chainId: chainB-1 + - tag: main # override with CHAIN_C_TAG + chainId: chainC-1 + - tag: main # override with CHAIN_D_TAG + chainId: chainD-1 diff --git a/e2e/tests/core/02-client/client_test.go b/e2e/tests/core/02-client/client_test.go index 85073b18234..e42c314ccc6 100644 --- a/e2e/tests/core/02-client/client_test.go +++ b/e2e/tests/core/02-client/client_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" upgradetypes "cosmossdk.io/x/upgrade/types" @@ -73,7 +73,7 @@ func (s *ClientTestSuite) TestScheduleIBCUpgrade_Succeeds() { ctx := context.TODO() testName := t.Name() - s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) chainA, chainB := s.GetChains() chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -182,7 +182,8 @@ func (s *ClientTestSuite) TestRecoverClient_Succeeds() { ) testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) t.Run("create substitute client with correct trusting period", func(t *testing.T) { // TODO: update when client identifier created is accessible @@ -268,7 +269,8 @@ func (s *ClientTestSuite) TestClient_Update_Misbehaviour() { ) testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() @@ -374,7 +376,7 @@ func (s *ClientTestSuite) TestAllowedClientsParam() { ctx := context.TODO() testName := t.Name() - s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) chainA, chainB := s.GetChains() chainAVersion := chainA.Config().Images[0].Version diff --git a/e2e/tests/core/03-connection/connection_test.go b/e2e/tests/core/03-connection/connection_test.go index 1faac487253..a9c05811f0b 100644 --- a/e2e/tests/core/03-connection/connection_test.go +++ b/e2e/tests/core/03-connection/connection_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" @@ -39,10 +39,6 @@ func (s *ConnectionTestSuite) SetupSuite() { s.SetupChains(context.TODO(), 2, nil) } -func (s *ConnectionTestSuite) CreateConnectionTestPath(testName string) (ibc.Relayer, ibc.ChannelOutput) { - return s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName), s.GetChainAChannelForTest(testName) -} - // QueryMaxExpectedTimePerBlockParam queries the on-chain max expected time per block param for 03-connection func (s *ConnectionTestSuite) QueryMaxExpectedTimePerBlockParam(ctx context.Context, chain ibc.Chain) uint64 { if testvalues.SelfParamsFeatureReleases.IsSupported(chain.Config().Images[0].Version) { @@ -71,11 +67,14 @@ func (s *ConnectionTestSuite) TestMaxExpectedTimePerBlockParam() { t := s.T() ctx := context.TODO() testName := t.Name() - relayer, channelA := s.CreateConnectionTestPath(testName) chainA, chainB := s.GetChains() chainAVersion := chainA.Config().Images[0].Version + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainBDenom := chainB.Config().Denom chainAIBCToken := testsuite.GetIBCToken(chainBDenom, channelA.PortID, channelA.ChannelID) diff --git a/e2e/tests/interchain_accounts/base_test.go b/e2e/tests/interchain_accounts/base_test.go index d7761b87a27..2c141a9f146 100644 --- a/e2e/tests/interchain_accounts/base_test.go +++ b/e2e/tests/interchain_accounts/base_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -70,7 +70,8 @@ func (s *InterchainAccountsTestSuite) testMsgSendTxSuccessfulTransfer(order chan ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() @@ -102,7 +103,7 @@ func (s *InterchainAccountsTestSuite) testMsgSendTxSuccessfulTransfer(order chan channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) s.Require().NoError(err) - s.Require().Equal(len(channels), 2) + s.Require().Len(channels, 2) icaChannel := channels[0] s.Require().Contains(orderMapping[order], icaChannel.Ordering) @@ -169,7 +170,8 @@ func (s *InterchainAccountsTestSuite) TestMsgSendTx_FailedTransfer_InsufficientF ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() @@ -201,7 +203,7 @@ func (s *InterchainAccountsTestSuite) TestMsgSendTx_FailedTransfer_InsufficientF channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) s.Require().NoError(err) - s.Require().Equal(len(channels), 2) + s.Require().Len(channels, 2) }) t.Run("fail to execute bank transfer over ICA", func(t *testing.T) { @@ -259,7 +261,8 @@ func (s *InterchainAccountsTestSuite) TestMsgSendTx_SuccessfulTransfer_AfterReop ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() @@ -449,7 +452,8 @@ func (s *InterchainAccountsTestSuite) testMsgSendTxSuccessfulGovProposal(order c ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() @@ -480,7 +484,7 @@ func (s *InterchainAccountsTestSuite) testMsgSendTxSuccessfulGovProposal(order c channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) s.Require().NoError(err) - s.Require().Equal(len(channels), 2) + s.Require().Len(channels, 2) icaChannel := channels[0] s.Require().Contains(orderMapping[order], icaChannel.Ordering) diff --git a/e2e/tests/interchain_accounts/gov_test.go b/e2e/tests/interchain_accounts/gov_test.go index a4c20434b86..1be336b2c38 100644 --- a/e2e/tests/interchain_accounts/gov_test.go +++ b/e2e/tests/interchain_accounts/gov_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -47,7 +47,8 @@ func (s *InterchainAccountsGovTestSuite) TestInterchainAccountsGovIntegration() ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() controllerAccount := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -76,11 +77,11 @@ func (s *InterchainAccountsGovTestSuite) TestInterchainAccountsGovIntegration() var err error interchainAccAddr, err = query.InterchainAccount(ctx, chainA, govModuleAddress.String(), ibctesting.FirstConnectionID) s.Require().NoError(err) - s.Require().NotZero(len(interchainAccAddr)) + s.Require().NotEmpty(interchainAccAddr) channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) s.Require().NoError(err) - s.Require().Equal(len(channels), 2) + s.Require().Len(channels, 2) }) t.Run("interchain account executes a bank transfer on behalf of the corresponding owner account", func(t *testing.T) { diff --git a/e2e/tests/interchain_accounts/groups_test.go b/e2e/tests/interchain_accounts/groups_test.go deleted file mode 100644 index cc1b5207fae..00000000000 --- a/e2e/tests/interchain_accounts/groups_test.go +++ /dev/null @@ -1,221 +0,0 @@ -//go:build !test_e2e - -package interchainaccounts - -import ( - "context" - "testing" - "time" - - "github.com/cosmos/gogoproto/proto" - interchaintest "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" - testifysuite "github.com/stretchr/testify/suite" - - sdkmath "cosmossdk.io/math" - - sdk "github.com/cosmos/cosmos-sdk/types" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - grouptypes "github.com/cosmos/cosmos-sdk/x/group" - - "github.com/cosmos/ibc-go/e2e/testsuite" - "github.com/cosmos/ibc-go/e2e/testsuite/query" - "github.com/cosmos/ibc-go/e2e/testvalues" - controllertypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/types" - icatypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/types" - channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" - ibctesting "github.com/cosmos/ibc-go/v10/testing" -) - -const ( - // DefaultGroupMemberWeight is the members voting weight. - // A group members weight is used in the sum of `YES` votes required to meet a decision policy threshold. - DefaultGroupMemberWeight = "1" - - // DefaultGroupThreshold is the minimum weighted sum of `YES` votes that must be met or - // exceeded for a proposal to succeed. - DefaultGroupThreshold = "1" - - // DefaultMetadata defines a reusable metadata string for testing purposes - DefaultMetadata = "custom metadata" - - // DefaultMinExecutionPeriod is the minimum duration after the proposal submission - // where members can start sending MsgExec. This means that the window for - // sending a MsgExec transaction is: - // `[ submission + min_execution_period ; submission + voting_period + max_execution_period]` - // where max_execution_period is an app-specific config, defined in the keeper. - // If not set, min_execution_period will default to 0. - DefaultMinExecutionPeriod = time.Duration(0) - - // DefaultVotingPeriod is the duration from submission of a proposal to the end of voting period - // Within this times votes can be submitted with MsgVote. - DefaultVotingPeriod = time.Minute - - // InitialGroupID is the first group ID generated by x/group - InitialGroupID = 1 - - // InitialProposalID is the first group proposal ID generated by x/group - InitialProposalID = 1 -) - -// compatibility:from_version: v7.10.0 -func TestInterchainAccountsGroupsTestSuite(t *testing.T) { - testifysuite.Run(t, new(InterchainAccountsGroupsTestSuite)) -} - -type InterchainAccountsGroupsTestSuite struct { - testsuite.E2ETestSuite -} - -// SetupSuite sets up chains for the current test suite -func (s *InterchainAccountsGroupsTestSuite) SetupSuite() { - s.SetupChains(context.TODO(), 2, nil) -} - -func (s *InterchainAccountsGroupsTestSuite) QueryGroupPolicyAddress(ctx context.Context, chain ibc.Chain) string { - res, err := query.GRPCQuery[grouptypes.QueryGroupPoliciesByGroupResponse](ctx, chain, &grouptypes.QueryGroupPoliciesByGroupRequest{ - GroupId: InitialGroupID, // always use the initial group id - }) - s.Require().NoError(err) - - return res.GroupPolicies[0].Address -} - -func (s *InterchainAccountsGroupsTestSuite) TestInterchainAccountsGroupsIntegration() { - t := s.T() - ctx := context.TODO() - - var ( - groupPolicyAddr string - interchainAccAddr string - err error - ) - - testName := t.Name() - relayer := s.CreateDefaultPaths(testName) - - chainA, chainB := s.GetChains() - - chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) - chainAAddress := chainAWallet.FormattedAddress() - - chainBWallet := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) - chainBAddress := chainBWallet.FormattedAddress() - - t.Run("create group with new threshold decision policy", func(t *testing.T) { - members := []grouptypes.MemberRequest{ - { - Address: chainAAddress, - Weight: DefaultGroupMemberWeight, - }, - } - - decisionPolicy := grouptypes.NewThresholdDecisionPolicy(DefaultGroupThreshold, DefaultVotingPeriod, DefaultMinExecutionPeriod) - msgCreateGroupWithPolicy, err := grouptypes.NewMsgCreateGroupWithPolicy(chainAAddress, members, DefaultMetadata, DefaultMetadata, true, decisionPolicy) - s.Require().NoError(err) - - txResp := s.BroadcastMessages(ctx, chainA, chainAWallet, msgCreateGroupWithPolicy) - s.AssertTxSuccess(txResp) - }) - - t.Run("submit proposal for MsgRegisterInterchainAccount", func(t *testing.T) { - groupPolicyAddr = s.QueryGroupPolicyAddress(ctx, chainA) - msgRegisterAccount := controllertypes.NewMsgRegisterInterchainAccount(ibctesting.FirstConnectionID, groupPolicyAddr, icatypes.NewDefaultMetadataString(ibctesting.FirstConnectionID, ibctesting.FirstConnectionID), channeltypes.ORDERED) - - msgSubmitProposal, err := grouptypes.NewMsgSubmitProposal(groupPolicyAddr, []string{chainAAddress}, []sdk.Msg{msgRegisterAccount}, DefaultMetadata, grouptypes.Exec_EXEC_UNSPECIFIED, "e2e groups proposal: for MsgRegisterInterchainAccount", "e2e groups proposal: for MsgRegisterInterchainAccount") - s.Require().NoError(err) - - txResp := s.BroadcastMessages(ctx, chainA, chainAWallet, msgSubmitProposal) - s.AssertTxSuccess(txResp) - }) - - t.Run("vote and exec proposal", func(t *testing.T) { - msgVote := &grouptypes.MsgVote{ - ProposalId: InitialProposalID, - Voter: chainAAddress, - Option: grouptypes.VOTE_OPTION_YES, - Exec: grouptypes.Exec_EXEC_TRY, - } - - txResp := s.BroadcastMessages(ctx, chainA, chainAWallet, msgVote) - s.AssertTxSuccess(txResp) - }) - - t.Run("start relayer", func(t *testing.T) { - s.StartRelayer(relayer, testName) - }) - - t.Run("verify interchain account registration success", func(t *testing.T) { - interchainAccAddr, err = query.InterchainAccount(ctx, chainA, groupPolicyAddr, ibctesting.FirstConnectionID) - s.Require().NotEmpty(interchainAccAddr) - s.Require().NoError(err) - - channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) - s.Require().NoError(err) - s.Require().Equal(len(channels), 2) // 1 transfer (created by default), 1 interchain-accounts - }) - - t.Run("fund interchain account wallet", func(t *testing.T) { - err := chainB.SendFunds(ctx, interchaintest.FaucetAccountKeyName, ibc.WalletAmount{ - Address: interchainAccAddr, - Amount: sdkmath.NewInt(testvalues.StartingTokenAmount), - Denom: chainB.Config().Denom, - }) - s.Require().NoError(err) - }) - - t.Run("submit proposal for MsgSendTx", func(t *testing.T) { - msgBankSend := &banktypes.MsgSend{ - FromAddress: interchainAccAddr, - ToAddress: chainBAddress, - Amount: sdk.NewCoins(testvalues.DefaultTransferAmount(chainB.Config().Denom)), - } - - cdc := testsuite.Codec() - - bz, err := icatypes.SerializeCosmosTx(cdc, []proto.Message{msgBankSend}, icatypes.EncodingProtobuf) - s.Require().NoError(err) - - packetData := icatypes.InterchainAccountPacketData{ - Type: icatypes.EXECUTE_TX, - Data: bz, - Memo: "e2e", - } - - msgSubmitTx := controllertypes.NewMsgSendTx(groupPolicyAddr, ibctesting.FirstConnectionID, uint64(time.Hour.Nanoseconds()), packetData) - msgSubmitProposal, err := grouptypes.NewMsgSubmitProposal(groupPolicyAddr, []string{chainAAddress}, []sdk.Msg{msgSubmitTx}, DefaultMetadata, grouptypes.Exec_EXEC_UNSPECIFIED, "e2e groups proposal: for MsgRegisterInterchainAccount", "e2e groups proposal: for MsgRegisterInterchainAccount") - s.Require().NoError(err) - - txResp := s.BroadcastMessages(ctx, chainA, chainAWallet, msgSubmitProposal) - s.AssertTxSuccess(txResp) - }) - - t.Run("vote and exec proposal", func(t *testing.T) { - msgVote := &grouptypes.MsgVote{ - ProposalId: InitialProposalID + 1, - Voter: chainAAddress, - Option: grouptypes.VOTE_OPTION_YES, - Exec: grouptypes.Exec_EXEC_TRY, - } - - txResp := s.BroadcastMessages(ctx, chainA, chainAWallet, msgVote) - s.AssertTxSuccess(txResp) - }) - - t.Run("verify tokens transferred", func(t *testing.T) { - s.Require().NoError(test.WaitForBlocks(ctx, 10, chainA, chainB), "failed to wait for blocks") - balance, err := query.Balance(ctx, chainB, chainBAddress, chainB.Config().Denom) - - s.Require().NoError(err) - - expected := testvalues.IBCTransferAmount + testvalues.StartingTokenAmount - s.Require().Equal(expected, balance.Int64()) - - balance, err = query.Balance(ctx, chainB, interchainAccAddr, chainB.Config().Denom) - s.Require().NoError(err) - - expected = testvalues.StartingTokenAmount - testvalues.IBCTransferAmount - s.Require().Equal(expected, balance.Int64()) - }) -} diff --git a/e2e/tests/interchain_accounts/localhost_test.go b/e2e/tests/interchain_accounts/localhost_test.go index 2991ebac284..5d7d4664ada 100644 --- a/e2e/tests/interchain_accounts/localhost_test.go +++ b/e2e/tests/interchain_accounts/localhost_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -50,7 +50,7 @@ func (s *LocalhostInterchainAccountsTestSuite) TestInterchainAccounts_Localhost( ctx := context.TODO() testName := t.Name() - s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) chains := s.GetAllChains() chainA := chains[0] @@ -207,7 +207,7 @@ func (s *LocalhostInterchainAccountsTestSuite) TestInterchainAccounts_ReopenChan ctx := context.TODO() testName := t.Name() - s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) chains := s.GetAllChains() chainA := chains[0] diff --git a/e2e/tests/interchain_accounts/params_test.go b/e2e/tests/interchain_accounts/params_test.go index ce2b81c3a0e..a4b1a7cefdd 100644 --- a/e2e/tests/interchain_accounts/params_test.go +++ b/e2e/tests/interchain_accounts/params_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -67,7 +67,7 @@ func (s *InterchainAccountsParamsTestSuite) TestControllerEnabledParam() { ctx := context.TODO() testName := t.Name() - s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) chainA, _ := s.GetChains() chainAVersion := chainA.Config().Images[0].Version @@ -123,7 +123,8 @@ func (s *InterchainAccountsParamsTestSuite) TestHostEnabledParam() { ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() chainBVersion := chainB.Config().Images[0].Version @@ -165,7 +166,7 @@ func (s *InterchainAccountsParamsTestSuite) TestHostEnabledParam() { channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) s.Require().NoError(err) - s.Require().Equal(len(channels), 2) + s.Require().Len(channels, 2) }) t.Run("stop relayer", func(t *testing.T) { diff --git a/e2e/tests/interchain_accounts/query_test.go b/e2e/tests/interchain_accounts/query_test.go index 4a6e58f1dc8..90103369c39 100644 --- a/e2e/tests/interchain_accounts/query_test.go +++ b/e2e/tests/interchain_accounts/query_test.go @@ -10,7 +10,8 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdk "github.com/cosmos/cosmos-sdk/types" @@ -46,7 +47,8 @@ func (s *InterchainAccountsQueryTestSuite) TestInterchainAccountsQuery() { ctx := context.TODO() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() diff --git a/e2e/tests/packet_forward_middleware/forward_timeout_test.go b/e2e/tests/packet_forward_middleware/forward_timeout_test.go new file mode 100644 index 00000000000..b23326a88e2 --- /dev/null +++ b/e2e/tests/packet_forward_middleware/forward_timeout_test.go @@ -0,0 +1,223 @@ +//go:build !test_e2e + +package pfm + +import ( + "context" + "testing" + "time" + + "github.com/cosmos/interchaintest/v10/chain/cosmos" + "github.com/cosmos/interchaintest/v10/ibc" + "github.com/cosmos/interchaintest/v10/testutil" + testifysuite "github.com/stretchr/testify/suite" + + "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/e2e/testsuite" + "github.com/cosmos/ibc-go/e2e/testvalues" + pfmtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + chantypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" +) + +type PFMTimeoutTestSuite struct { + testsuite.E2ETestSuite +} + +func TestForwardTransferTimeoutSuite(t *testing.T) { + testifysuite.Run(t, new(PFMTimeoutTestSuite)) +} + +func (s *PFMTimeoutTestSuite) SetupSuite() { + s.SetupChains(context.TODO(), 3, nil) +} + +func (s *PFMTimeoutTestSuite) TestTimeoutOnForward() { + t := s.T() + ctx := context.TODO() + testName := t.Name() + + chains := s.GetAllChains() + chainA, chainB, chainC := chains[0], chains[1], chains[2] + + userA := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) + userB := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) + userC := s.CreateUserOnChainC(ctx, testvalues.StartingTokenAmount) + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), t.Name()) + relayer := s.GetRelayerForTest(t.Name()) + + chanAB := s.GetChannelBetweenChains(testName, chainA, chainB) + chanBC := s.GetChannelBetweenChains(testName, chainB, chainC) + + escrowAddrAB := transfertypes.GetEscrowAddress(chanAB.PortID, chanAB.ChannelID) + escrowAddrBC := transfertypes.GetEscrowAddress(chanBC.PortID, chanBC.ChannelID) + + denomA := chainA.Config().Denom + ibcTokenB := testsuite.GetIBCToken(denomA, chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID) + ibcTokenC := testsuite.GetIBCToken(ibcTokenB.Path(), chanBC.Counterparty.PortID, chanBC.Counterparty.ChannelID) + + zeroBal := math.NewInt(0) + + // Send packet from a -> b -> c that should timeout between b -> c + retries := uint8(0) + + bToCMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userC.FormattedAddress(), + Channel: chanBC.ChannelID, + Port: chanBC.PortID, + Retries: &retries, + Timeout: time.Second * 10, // Short timeout + }, + } + + memo, err := bToCMetadata.ToMemo() + s.Require().NoError(err) + + opts := ibc.TransferOptions{ + Memo: memo, + } + + transferAmount := math.NewInt(100_000) + walletAmount := ibc.WalletAmount{ + Address: userB.FormattedAddress(), + Denom: chainA.Config().Denom, + Amount: transferAmount, + } + + bHeightBeforeTransfer, err := chainB.Height(ctx) + s.Require().NoError(err) + + transferTx, err := chainA.SendIBCTransfer(ctx, chanAB.ChannelID, userA.KeyName(), walletAmount, opts) + s.Require().NoError(err) + + s.Require().NoError(testutil.WaitForBlocks(ctx, 5, chainA, chainB)) + err = relayer.Flush(ctx, s.GetRelayerExecReporter(), s.GetPathByChains(chainA, chainB), chanAB.ChannelID) + s.Require().NoError(err) + + // Check that the packet was received on chainB + _, err = cosmos.PollForMessage[*chantypes.MsgRecvPacket](ctx, chainB.(*cosmos.CosmosChain), cosmos.DefaultEncoding().InterfaceRegistry, bHeightBeforeTransfer, bHeightBeforeTransfer+20, nil) + s.Require().NoError(err) + + time.Sleep(time.Second * 12) // Wait for timeout + + // Verify that the users funds are still in escrow on chainA and chainB before we relay the timeout between chainB and chainC + userABalance, err := chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + s.Require().NoError(err) + userBBalance, err := chainB.GetBalance(ctx, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + + s.Require().Equal(testvalues.StartingTokenAmount-transferAmount.Int64(), userABalance.Int64()) + s.Require().Equal(zeroBal, userBBalance) + + escrowBalanceAB, err := chainA.GetBalance(ctx, escrowAddrAB.String(), chainA.Config().Denom) + s.Require().NoError(err) + escrowBalanceBC, err := chainB.GetBalance(ctx, escrowAddrBC.String(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + + s.Require().Equal(transferAmount, escrowBalanceAB) + s.Require().Equal(transferAmount, escrowBalanceBC) + + // Relay the packet from chainB to chainC, which should timeout + err = relayer.Flush(ctx, s.GetRelayerExecReporter(), s.GetPathByChains(chainB, chainC), chanBC.ChannelID) + s.Require().NoError(err) + + bHeightAfterTimeout, err := chainB.Height(ctx) + s.Require().NoError(err) + aHeightAfterTimeout, err := chainA.Height(ctx) + s.Require().NoError(err) + + // Make sure there is a MsgTimeout on chainB + _, err = cosmos.PollForMessage[*chantypes.MsgTimeout](ctx, chainB.(*cosmos.CosmosChain), chainB.Config().EncodingConfig.InterfaceRegistry, bHeightBeforeTransfer, bHeightAfterTimeout+30, nil) + s.Require().NoError(err) + + // Relay the ack from chainB to chainA + err = relayer.Flush(ctx, s.GetRelayerExecReporter(), s.GetPathByChains(chainB, chainA), chanAB.Counterparty.ChannelID) + s.Require().NoError(err) + + // Make sure there is an acknowledgment on chainA + _, err = testutil.PollForAck(ctx, chainA, aHeightAfterTimeout, aHeightAfterTimeout+30, transferTx.Packet) + s.Require().NoError(err) + + // Verify that the users funds have been returned to userA on chainA, and that all escrow balances are zero + userABalance, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + s.Require().NoError(err) + + userBBalance, err = chainB.GetBalance(ctx, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + + userCBalance, err := chainC.GetBalance(ctx, userC.FormattedAddress(), ibcTokenC.IBCDenom()) + s.Require().NoError(err) + + s.Require().Equal(testvalues.StartingTokenAmount, userABalance.Int64()) + s.Require().Equal(zeroBal, userCBalance) + s.Require().Equal(zeroBal, userBBalance) + + escrowBalanceAB, err = chainA.GetBalance(ctx, escrowAddrAB.String(), chainA.Config().Denom) + s.Require().NoError(err) + + escrowBalanceBC, err = chainB.GetBalance(ctx, escrowAddrBC.String(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + + s.Require().Equal(zeroBal, escrowBalanceAB) + s.Require().Equal(zeroBal, escrowBalanceBC) + + // Send IBC transfer from ChainA -> ChainB -> ChainC that should succeed + err = relayer.StartRelayer(ctx, s.GetRelayerExecReporter()) + s.Require().NoError(err) + + bToCMetadata = pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userC.FormattedAddress(), + Channel: chanBC.ChannelID, + Port: chanBC.PortID, + }, + } + + memo, err = bToCMetadata.ToMemo() + s.Require().NoError(err) + + opts = ibc.TransferOptions{ + Memo: memo, + } + + aHeightBeforeTransfer, err := chainA.Height(ctx) + s.Require().NoError(err) + + transferTx, err = chainA.SendIBCTransfer(ctx, chanAB.ChannelID, userA.KeyName(), walletAmount, opts) + s.Require().NoError(err) + + s.FlushPackets(ctx, relayer, []ibc.Chain{chainA, chainB, chainC}) + + // Verify that the ack has come all the way back to chainA (only happens after the entire packet lifecycle is complete) + _, err = testutil.PollForAck(ctx, chainA, aHeightBeforeTransfer, aHeightAfterTimeout+30, transferTx.Packet) + s.Require().NoError(err) + + err = testutil.WaitForBlocks(ctx, 10, chainA) + s.Require().NoError(err) + + // Verify that the users funds have been forwarded to userC on chainC, and that the escrow balances are correct + userABalance, err = chainA.GetBalance(ctx, userA.FormattedAddress(), chainA.Config().Denom) + s.Require().NoError(err) + + userBBalance, err = chainB.GetBalance(ctx, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + + userCBalance, err = chainC.GetBalance(ctx, userC.FormattedAddress(), ibcTokenC.IBCDenom()) + s.Require().NoError(err) + + s.Require().Equal(testvalues.StartingTokenAmount-transferAmount.Int64(), userABalance.Int64()) + s.Require().Equal(zeroBal, userBBalance) + s.Require().Equal(transferAmount, userCBalance) + + escrowBalanceAB, err = chainA.GetBalance(ctx, escrowAddrAB.String(), chainA.Config().Denom) + s.Require().NoError(err) + + escrowBalanceBC, err = chainB.GetBalance(ctx, escrowAddrBC.String(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + + s.Require().Equal(transferAmount, escrowBalanceAB) + s.Require().Equal(transferAmount, escrowBalanceBC) +} diff --git a/e2e/tests/packet_forward_middleware/packet_forward_test.go b/e2e/tests/packet_forward_middleware/packet_forward_test.go new file mode 100644 index 00000000000..fad904af1ae --- /dev/null +++ b/e2e/tests/packet_forward_middleware/packet_forward_test.go @@ -0,0 +1,299 @@ +//go:build !test_e2e + +package pfm + +import ( + "context" + "testing" + + "github.com/cosmos/interchaintest/v10/ibc" + testifysuite "github.com/stretchr/testify/suite" + + "github.com/cosmos/ibc-go/e2e/testsuite" + "github.com/cosmos/ibc-go/e2e/testsuite/query" + "github.com/cosmos/ibc-go/e2e/testvalues" + pfmtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" +) + +type PFMTestSuite struct { + testsuite.E2ETestSuite +} + +func TestForwardTransferSuite(t *testing.T) { + testifysuite.Run(t, new(PFMTestSuite)) +} + +func (s *PFMTestSuite) SetupSuite() { + s.SetupChains(context.TODO(), 4, nil) +} + +func (s *PFMTestSuite) TestForwardPacket() { + t := s.T() + ctx := context.TODO() + testName := t.Name() + + chains := s.GetAllChains() + chainA, chainB, chainC, chainD := chains[0], chains[1], chains[2], chains[3] + + userA := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) + userB := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) + userC := s.CreateUserOnChainC(ctx, testvalues.StartingTokenAmount) + userD := s.CreateUserOnChainD(ctx, testvalues.StartingTokenAmount) + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), t.Name()) + relayer := s.GetRelayerForTest(t.Name()) + s.StartRelayer(relayer, testName) + + chanAB := s.GetChannelBetweenChains(testName, chainA, chainB) + chanBC := s.GetChannelBetweenChains(testName, chainB, chainC) + chanCD := s.GetChannelBetweenChains(testName, chainC, chainD) + + ab, err := query.Channel(ctx, chainA, transfertypes.PortID, chanAB.ChannelID) + s.Require().NoError(err) + s.Require().NotNil(ab) + + bc, err := query.Channel(ctx, chainB, transfertypes.PortID, chanBC.ChannelID) + s.Require().NoError(err) + s.Require().NotNil(bc) + + cd, err := query.Channel(ctx, chainC, transfertypes.PortID, chanCD.ChannelID) + s.Require().NoError(err) + s.Require().NotNil(cd) + + escrowAddrAB := transfertypes.GetEscrowAddress(chanAB.PortID, chanAB.ChannelID) + escrowAddrBC := transfertypes.GetEscrowAddress(chanBC.PortID, chanBC.ChannelID) + escrowAddrCD := transfertypes.GetEscrowAddress(chanCD.PortID, chanCD.ChannelID) + + denomA := chainA.Config().Denom + ibcTokenB := testsuite.GetIBCToken(denomA, chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID) + ibcTokenC := testsuite.GetIBCToken(ibcTokenB.Path(), chanBC.Counterparty.PortID, chanBC.Counterparty.ChannelID) + ibcTokenD := testsuite.GetIBCToken(ibcTokenC.Path(), chanCD.Counterparty.PortID, chanCD.Counterparty.ChannelID) + + t.Run("Multihop forward [A -> B -> C -> D]", func(_ *testing.T) { + // Send packet from Chain A->Chain B->Chain C->Chain D + // From A -> B will be handled by transfer msg. + // From B -> C will be handled by firstHopMetadata. + // From C -> D will be handled by secondHopMetadata. + secondHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userD.FormattedAddress(), + Channel: chanCD.ChannelID, + Port: chanCD.PortID, + }, + } + + firstHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userC.FormattedAddress(), + Channel: chanBC.ChannelID, + Port: chanBC.PortID, + Next: &secondHopMetadata, + }, + } + + memo, err := firstHopMetadata.ToMemo() + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, memo) + s.AssertTxSuccess(txResp) + + s.FlushPackets(ctx, relayer, []ibc.Chain{chainA, chainB, chainC, chainD}) + + actualBalance, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + expected := testvalues.StartingTokenAmount - testvalues.IBCTransferAmount + s.Require().Equal(expected, actualBalance) + + escrowBalAB, err := query.Balance(ctx, chainA, escrowAddrAB.String(), denomA) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalAB.Int64()) + + versionB := chainB.Config().Images[0].Version + if testvalues.TokenMetadataFeatureReleases.IsSupported(versionB) { + s.AssertHumanReadableDenom(ctx, chainB, denomA, chanAB) + } + + escrowBalBC, err := query.Balance(ctx, chainB, escrowAddrBC.String(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalBC.Int64()) + + escrowBalCD, err := query.Balance(ctx, chainC, escrowAddrCD.String(), ibcTokenC.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalCD.Int64()) + + balanceD, err := query.Balance(ctx, chainD, userD.FormattedAddress(), ibcTokenD.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, balanceD.Int64()) + }) + + t.Run("Packet forwarded [D -> C -> B -> A]", func(_ *testing.T) { + secondHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userA.FormattedAddress(), + Channel: chanAB.Counterparty.ChannelID, + Port: chanAB.Counterparty.PortID, + }, + } + + firstHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userB.FormattedAddress(), + Channel: chanBC.Counterparty.ChannelID, + Port: chanBC.Counterparty.PortID, + Next: &secondHopMetadata, + }, + } + + memo, err := firstHopMetadata.ToMemo() + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainD, userD, chanCD.Counterparty.PortID, chanCD.Counterparty.ChannelID, testvalues.DefaultTransferAmount(ibcTokenD.IBCDenom()), userD.FormattedAddress(), userC.FormattedAddress(), s.GetTimeoutHeight(ctx, chainD), 0, memo) + s.AssertTxSuccess(txResp) + + // Flush the packet all the way back to Chain A and then the acknowledgement back to Chain D + s.FlushPackets(ctx, relayer, []ibc.Chain{chainA, chainB, chainC, chainD}) + + // All escrow accounts have been cleared + escrowBalAB, err := query.Balance(ctx, chainA, escrowAddrAB.String(), denomA) + s.Require().NoError(err) + s.Require().Zero(escrowBalAB.Int64()) + + escrowBalBC, err := query.Balance(ctx, chainB, escrowAddrBC.String(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(escrowBalBC.Int64()) + + escrowBalCD, err := query.Balance(ctx, chainC, escrowAddrCD.String(), ibcTokenC.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(escrowBalCD.Int64()) + + userDBalance, err := query.Balance(ctx, chainD, userD.FormattedAddress(), ibcTokenD.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(userDBalance.Int64()) + + // User A has his asset back + balance, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + s.Require().Equal(testvalues.StartingTokenAmount, balance) + }) + + t.Run("Error while forwarding: Refund ok [A -> B -> C ->X D]", func(_ *testing.T) { + secondHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: "GurbageAddress", + Channel: chanCD.ChannelID, + Port: chanCD.PortID, + }, + } + + firstHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userC.FormattedAddress(), + Channel: chanBC.ChannelID, + Port: chanBC.PortID, + Next: &secondHopMetadata, + }, + } + + memo, err := firstHopMetadata.ToMemo() + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(ibcTokenD.IBCDenom()), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, memo) + s.AssertTxFailure(txResp, transfertypes.ErrDenomNotFound) + + // Flush the packet all the way back to Chain D and then the acknowledgement back to Chain A + s.FlushPackets(ctx, relayer, []ibc.Chain{chainA, chainB, chainC, chainD}) + + // C -> D should not happen. + // Refunded UserA on chain A. + escrowBalAB, err := query.Balance(ctx, chainA, escrowAddrAB.String(), denomA) + s.Require().NoError(err) + s.Require().Zero(escrowBalAB.Int64()) + + escrowBalBC, err := query.Balance(ctx, chainB, escrowAddrBC.String(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(escrowBalBC.Int64()) + + escrowBalCD, err := query.Balance(ctx, chainC, escrowAddrCD.String(), ibcTokenC.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(escrowBalCD.Int64()) + + userDBalance, err := query.Balance(ctx, chainD, userD.FormattedAddress(), ibcTokenD.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(userDBalance.Int64()) + + // User A has his asset back + balance, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + s.Require().Equal(testvalues.StartingTokenAmount, balance) + + // send normal IBC transfer from B->A to get funds in IBC denom, then do multihop A->B(native)->C->D + // this lets us test the burn from escrow account on chain C and the escrow to escrow transfer on chain B. + + denomB := chainB.Config().Denom + ibcTokenA := testsuite.GetIBCToken(denomB, chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID) + escrowAddrCD = transfertypes.GetEscrowAddress(chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID) + + txResp = s.Transfer(ctx, chainB, userB, chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID, testvalues.DefaultTransferAmount(denomB), userB.FormattedAddress(), userA.FormattedAddress(), s.GetTimeoutHeight(ctx, chainB), 0, "") + s.AssertTxSuccess(txResp) + + s.FlushPackets(ctx, relayer, []ibc.Chain{chainB, chainA}) + + escrowBalBC, err = query.Balance(ctx, chainB, escrowAddrCD.String(), denomB) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalBC.Int64()) + + balanceA, err := query.Balance(ctx, chainA, userA.FormattedAddress(), ibcTokenA.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, balanceA.Int64()) + + // Proof that unwinding happens. + txResp = s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(ibcTokenA.IBCDenom()), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + s.FlushPackets(ctx, relayer, []ibc.Chain{chainA, chainB}) + + // Escrow account is cleared on chain B + escrowBalBC, err = query.Balance(ctx, chainB, escrowAddrCD.String(), denomB) + s.Require().NoError(err) + s.Require().Zero(escrowBalBC.Int64()) + + // ChainB user now has the same amount he started with + balanceB, err := s.GetChainBNativeBalance(ctx, userB) + s.Require().NoError(err) + s.Require().Equal(testvalues.StartingTokenAmount, balanceB) + }) + + // A -> B -> A Nothing changes + t.Run("A -> B -> A", func(_ *testing.T) { + balanceAInt, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + balanceBInt, err := s.GetChainBNativeBalance(ctx, userB) + s.Require().NoError(err) + + firstHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userA.FormattedAddress(), + Channel: chanAB.Counterparty.ChannelID, + Port: chanAB.Counterparty.PortID, + }, + } + + memo, err := firstHopMetadata.ToMemo() + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, memo) + s.AssertTxSuccess(txResp) + + s.FlushPackets(ctx, relayer, []ibc.Chain{chainA, chainB}) + s.FlushPackets(ctx, relayer, []ibc.Chain{chainB, chainA}) + + balanceAIntAfter, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + balanceBIntAfter, err := s.GetChainBNativeBalance(ctx, userB) + s.Require().NoError(err) + + s.Require().Equal(balanceAInt, balanceAIntAfter) + s.Require().Equal(balanceBInt, balanceBIntAfter) + }) +} diff --git a/e2e/tests/packet_forward_middleware/pfm_upgrade_test.go b/e2e/tests/packet_forward_middleware/pfm_upgrade_test.go new file mode 100644 index 00000000000..79b012a7066 --- /dev/null +++ b/e2e/tests/packet_forward_middleware/pfm_upgrade_test.go @@ -0,0 +1,178 @@ +//go:build !test_e2e + +package pfm + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/cosmos/interchaintest/v10/chain/cosmos" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" + + "github.com/cosmos/ibc-go/e2e/testsuite" + "github.com/cosmos/ibc-go/e2e/testsuite/query" + "github.com/cosmos/ibc-go/e2e/testvalues" + pfmtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + chantypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +// TODO: Move to `e2e/tests/upgrades` in #8360 +type PFMUpgradeTestSuite struct { + testsuite.E2ETestSuite +} + +func TestPFMUpgradeTestSuite(t *testing.T) { + // TODO: Enable as we clean up these tests #8360 + t.Skip("Skipping as relayer is not relaying failed packets") + testCfg := testsuite.LoadConfig() + if testCfg.UpgradePlanName == "" { + t.Fatalf("%s must be set when running an upgrade test", testsuite.ChainUpgradePlanEnv) + } + + // testifysuite.Run(t, new(PFMUpgradeTestSuite)) +} + +func updateGenesisChainB(option *testsuite.ChainOptions) { + option.ChainSpecs[1].ModifyGenesis = cosmos.ModifyGenesis([]cosmos.GenesisKV{ + { + Key: "app_state.gov.params.voting_period", + Value: "15s", + }, + { + Key: "app_state.gov.params.max_deposit_period", + Value: "10s", + }, + { + Key: "app_state.gov.params.min_deposit.0.denom", + Value: "ustake", + }, + }) +} + +func (s *PFMUpgradeTestSuite) SetupSuite() { + s.SetupChains(context.TODO(), 4, nil, updateGenesisChainB) +} + +func (s *PFMUpgradeTestSuite) TestV8ToV10ChainUpgrade_PacketForward() { + t := s.T() + ctx := context.TODO() + testName := t.Name() + + chains := s.GetAllChains() + chainA, chainB, chainC, chainD := chains[0], chains[1], chains[2], chains[3] + + userA := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) + userB := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) + userC := s.CreateUserOnChainC(ctx, testvalues.StartingTokenAmount) + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), t.Name()) + relayer := s.GetRelayerForTest(t.Name()) + s.StartRelayer(relayer, testName) + + chanAB := s.GetChannelBetweenChains(testName, chainA, chainB) + chanBC := s.GetChannelBetweenChains(testName, chainB, chainC) + chanCD := s.GetChannelBetweenChains(testName, chainC, chainD) + + ab, err := query.Channel(ctx, chainA, transfertypes.PortID, chanAB.ChannelID) + s.Require().NoError(err) + s.Require().NotNil(ab) + + bc, err := query.Channel(ctx, chainB, transfertypes.PortID, chanBC.ChannelID) + s.Require().NoError(err) + s.Require().NotNil(bc) + + cd, err := query.Channel(ctx, chainC, transfertypes.PortID, chanCD.ChannelID) + s.Require().NoError(err) + s.Require().NotNil(cd) + + escrowAddrA := transfertypes.GetEscrowAddress(chanAB.PortID, chanAB.ChannelID) + + denomB := chainB.Config().Denom + ibcTokenA := testsuite.GetIBCToken(denomB, chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID) + + s.Require().NoError(test.WaitForBlocks(ctx, 1, chainA, chainB), "failed to wait for blocks") + + t.Run("Send from B -> A", func(_ *testing.T) { + aHeight, err := chainA.Height(ctx) + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainB, userB, chanAB.Counterparty.PortID, chanAB.Counterparty.ChannelID, testvalues.DefaultTransferAmount(denomB), userB.FormattedAddress(), userA.FormattedAddress(), s.GetTimeoutHeight(ctx, chainB), 0, "") + s.AssertTxSuccess(txResp) + + bBal, err := s.GetChainBNativeBalance(ctx, userB) + s.Require().NoError(err) + expected := testvalues.StartingTokenAmount - testvalues.IBCTransferAmount + s.Require().Equal(expected, bBal) + + _, err = cosmos.PollForMessage[*chantypes.MsgRecvPacket](ctx, chainA.(*cosmos.CosmosChain), cosmos.DefaultEncoding().InterfaceRegistry, aHeight, aHeight+40, nil) + s.Require().NoError(err) + + escrowBalB, err := query.Balance(ctx, chainB, escrowAddrA.String(), denomB) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalB.Int64()) + + escrowBalA, err := query.Balance(ctx, chainA, userA.FormattedAddress(), ibcTokenA.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalA.Int64()) + }) + + // Send the IBC denom that chain A received from the previous step + t.Run("Send from A -> B -> C ->X D", func(_ *testing.T) { + secondHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: "cosmos1wgz9ntx6e5vu4npeabcde88d7kfsymag62p6y2", + Channel: chanCD.ChannelID, + Port: chanCD.PortID, + }, + } + + firstHopMetadata := pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: userC.FormattedAddress(), + Channel: chanBC.ChannelID, + Port: chanBC.PortID, + Next: &secondHopMetadata, + }, + } + + memo, err := firstHopMetadata.ToMemo() + s.Require().NoError(err) + + bHeight, err := chainB.Height(ctx) + s.Require().NoError(err) + + ibcDenomOnA := ibcTokenA.IBCDenom() + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(ibcDenomOnA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, memo) + s.AssertTxSuccess(txResp) + + packet, err := ibctesting.ParseV1PacketFromEvents(txResp.Events) + s.Require().NoError(err) + s.Require().NotNil(packet) + + _, err = cosmos.PollForMessage[*chantypes.MsgRecvPacket](ctx, chainB.(*cosmos.CosmosChain), cosmos.DefaultEncoding().InterfaceRegistry, bHeight, bHeight+40, nil) + s.Require().NoError(err) + + actualBalance, err := query.Balance(ctx, chainA, userA.FormattedAddress(), ibcDenomOnA) + s.Require().NoError(err) + s.Require().Zero(actualBalance) + + escrowBalA, err := query.Balance(ctx, chainA, escrowAddrA.String(), ibcDenomOnA) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalA.Int64()) + + // Assart Packet relayed + s.Require().Eventually(func() bool { + _, err := query.GRPCQuery[chantypes.QueryPacketCommitmentResponse](ctx, chainA, &chantypes.QueryPacketCommitmentRequest{ + PortId: chanAB.PortID, + ChannelId: chanAB.ChannelID, + Sequence: packet.Sequence, + }) + return err != nil && strings.Contains(err.Error(), "packet commitment hash not found") + }, time.Second*70, time.Second) + }) +} diff --git a/e2e/tests/rate_limiting/rate_limiting_test.go b/e2e/tests/rate_limiting/rate_limiting_test.go new file mode 100644 index 00000000000..dc84a918f2c --- /dev/null +++ b/e2e/tests/rate_limiting/rate_limiting_test.go @@ -0,0 +1,259 @@ +//go:build !test_e2e + +package ratelimiting + +import ( + "context" + "testing" + + interchaintest "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + "github.com/cosmos/interchaintest/v10/testutil" + testifysuite "github.com/stretchr/testify/suite" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + "github.com/cosmos/ibc-go/e2e/testsuite" + "github.com/cosmos/ibc-go/e2e/testsuite/query" + "github.com/cosmos/ibc-go/e2e/testvalues" + ratelimitingtypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +type RateLimTestSuite struct { + testsuite.E2ETestSuite +} + +func TestRateLimitSuite(t *testing.T) { + testifysuite.Run(t, new(RateLimTestSuite)) +} + +func (s *RateLimTestSuite) SetupSuite() { + s.SetupChains(context.TODO(), 2, nil, func(options *testsuite.ChainOptions) { + options.RelayerCount = 1 + }) +} + +func (s *RateLimTestSuite) TestRateLimit() { + t := s.T() + ctx := context.TODO() + testName := t.Name() + + chainA, chainB := s.GetChains() + + userA := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) + userB := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) + + authority, err := query.ModuleAccountAddress(ctx, govtypes.ModuleName, chainA) + s.Require().NoError(err) + s.Require().NotNil(authority) + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + s.StartRelayer(relayer, testName) + + chanAB := s.GetChannelBetweenChains(testName, chainA, chainB) + + escrowAddrA := transfertypes.GetEscrowAddress(chanAB.PortID, chanAB.ChannelID) + denomA := chainA.Config().Denom + + ibcTokenB := testsuite.GetIBCToken(denomA, chanAB.PortID, chanAB.ChannelID) + + t.Run("No rate limit set: transfer succeeds", func(_ *testing.T) { + userABalBefore, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + userBBalBefore, err := query.Balance(ctx, chainB, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Zero(userBBalBefore.Int64()) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + packet, err := ibctesting.ParseV1PacketFromEvents(txResp.Events) + s.Require().NoError(err) + s.Require().NotNil(packet) + + s.Require().NoError(testutil.WaitForBlocks(ctx, 5, chainA, chainB), "failed to wait for blocks") + s.AssertPacketRelayed(ctx, chainA, chanAB.PortID, chanAB.ChannelID, packet.Sequence) + + userABalAfter, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + + // Balanced moved form userA to userB + s.Require().Equal(userABalBefore-testvalues.IBCTransferAmount, userABalAfter) + escrowBalA, err := query.Balance(ctx, chainA, escrowAddrA.String(), denomA) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, escrowBalA.Int64()) + + userBBalAfter, err := query.Balance(ctx, chainB, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(testvalues.IBCTransferAmount, userBBalAfter.Int64()) + }) + + t.Run("Add outgoing rate limit on ChainA", func(_ *testing.T) { + resp, err := query.GRPCQuery[ratelimitingtypes.QueryAllRateLimitsResponse](ctx, chainA, &ratelimitingtypes.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().Nil(resp.RateLimits) + + sendPercentage := int64(10) + recvPercentage := int64(0) + s.addRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String(), sendPercentage, recvPercentage, 1) + + resp, err = query.GRPCQuery[ratelimitingtypes.QueryAllRateLimitsResponse](ctx, chainA, &ratelimitingtypes.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().Len(resp.RateLimits, 1) + + rateLimit := resp.RateLimits[0] + s.Require().Equal(int64(0), rateLimit.Flow.Outflow.Int64()) + s.Require().Equal(int64(0), rateLimit.Flow.Inflow.Int64()) + s.Require().Equal(rateLimit.Quota.MaxPercentSend.Int64(), sendPercentage) + s.Require().Equal(rateLimit.Quota.MaxPercentRecv.Int64(), recvPercentage) + s.Require().Equal(uint64(1), rateLimit.Quota.DurationHours) + }) + + t.Run("Transfer updates the rate limit flow", func(_ *testing.T) { + userABalBefore, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + packet, err := ibctesting.ParseV1PacketFromEvents(txResp.Events) + s.Require().NoError(err) + s.Require().NotNil(packet) + + s.Require().NoError(testutil.WaitForBlocks(ctx, 5, chainA, chainB), "failed to wait for blocks") + s.AssertPacketRelayed(ctx, chainA, chanAB.PortID, chanAB.ChannelID, packet.Sequence) + + userABalAfter, err := s.GetChainANativeBalance(ctx, userA) + s.Require().NoError(err) + + // Balanced moved form userA to userB + s.Require().Equal(userABalBefore-testvalues.IBCTransferAmount, userABalAfter) + userBBalAfter, err := query.Balance(ctx, chainB, userB.FormattedAddress(), ibcTokenB.IBCDenom()) + s.Require().NoError(err) + s.Require().Equal(2*testvalues.IBCTransferAmount, userBBalAfter.Int64()) + + // Check the flow has been updated. + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + s.Require().NotNil(rateLimit) + s.Require().Equal(testvalues.IBCTransferAmount, rateLimit.Flow.Outflow.Int64()) + }) + + t.Run("Fill and exceed quota", func(_ *testing.T) { + rateLim := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + sendPercentage := rateLim.Quota.MaxPercentSend.Int64() + + // Create an account that can almost exhause the outflow limit. + richKidAmt := rateLim.Flow.ChannelValue.MulRaw(sendPercentage).QuoRaw(100).Sub(rateLim.Flow.Outflow) + richKid := interchaintest.GetAndFundTestUsers(t, ctx, "richkid", richKidAmt, chainA)[0] + s.Require().NoError(testutil.WaitForBlocks(ctx, 4, chainA)) + + sendCoin := sdk.NewCoin(denomA, richKidAmt) + + // Fill the quota + txResp := s.Transfer(ctx, chainA, richKid, chanAB.PortID, chanAB.ChannelID, sendCoin, richKid.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + + // Sending even 10denomA fails due to exceeding the quota + sendCoin = sdk.NewInt64Coin(denomA, 10) + txResp = s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, sendCoin, userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxFailure(txResp, ratelimitingtypes.ErrQuotaExceeded) + }) + + t.Run("Reset rate limit: transfer succeeds", func(_ *testing.T) { + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + sendPercentage := rateLimit.Quota.MaxPercentSend.Int64() + recvPercentage := rateLimit.Quota.MaxPercentRecv.Int64() + + s.resetRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String()) + + rateLimit = s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + // Resetting only clears the flow. It does not change the quota + s.Require().Zero(rateLimit.Flow.Outflow.Int64()) + s.Require().Equal(rateLimit.Quota.MaxPercentSend.Int64(), sendPercentage) + s.Require().Equal(rateLimit.Quota.MaxPercentRecv.Int64(), recvPercentage) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + }) + + t.Run("Set outflow quota to 0: transfer fails", func(_ *testing.T) { + sendPercentage := int64(0) + recvPercentage := int64(1) + s.updateRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String(), sendPercentage, recvPercentage) + + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + s.Require().Equal(rateLimit.Quota.MaxPercentSend.Int64(), sendPercentage) + s.Require().Equal(rateLimit.Quota.MaxPercentRecv.Int64(), recvPercentage) + + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxFailure(txResp, ratelimitingtypes.ErrQuotaExceeded) + }) + + t.Run("Remove rate limit -> transfer succeeds again", func(_ *testing.T) { + s.removeRateLimit(ctx, chainA, userA, denomA, chanAB.ChannelID, authority.String()) + + rateLimit := s.rateLimit(ctx, chainA, denomA, chanAB.ChannelID) + s.Require().Nil(rateLimit) + + // Transfer works again + txResp := s.Transfer(ctx, chainA, userA, chanAB.PortID, chanAB.ChannelID, testvalues.DefaultTransferAmount(denomA), userA.FormattedAddress(), userB.FormattedAddress(), s.GetTimeoutHeight(ctx, chainA), 0, "") + s.AssertTxSuccess(txResp) + }) +} + +func (s *RateLimTestSuite) rateLimit(ctx context.Context, chain ibc.Chain, denom, chanID string) *ratelimitingtypes.RateLimit { + respRateLim, err := query.GRPCQuery[ratelimitingtypes.QueryRateLimitResponse](ctx, chain, &ratelimitingtypes.QueryRateLimitRequest{ + Denom: denom, + ChannelOrClientId: chanID, + }) + s.Require().NoError(err) + return respRateLim.RateLimit +} + +func (s *RateLimTestSuite) addRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string, sendPercent, recvPercent, duration int64) { + msg := &ratelimitingtypes.MsgAddRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + MaxPercentSend: sdkmath.NewInt(sendPercent), + MaxPercentRecv: sdkmath.NewInt(recvPercent), + DurationHours: uint64(duration), + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} + +func (s *RateLimTestSuite) resetRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string) { + msg := &ratelimitingtypes.MsgResetRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} + +func (s *RateLimTestSuite) updateRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string, sendPercent, recvPercent int64) { + msg := &ratelimitingtypes.MsgUpdateRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + MaxPercentSend: sdkmath.NewInt(sendPercent), + MaxPercentRecv: sdkmath.NewInt(recvPercent), + DurationHours: 1, + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} + +func (s *RateLimTestSuite) removeRateLimit(ctx context.Context, chain ibc.Chain, user ibc.Wallet, denom, chanID, authority string) { + msg := &ratelimitingtypes.MsgRemoveRateLimit{ + Signer: authority, + Denom: denom, + ChannelOrClientId: chanID, + } + s.ExecuteAndPassGovV1Proposal(ctx, msg, chain, user) +} diff --git a/e2e/tests/transfer/authz_test.go b/e2e/tests/transfer/authz_test.go index 4f64f4e5e7e..6f3ce1fd77b 100644 --- a/e2e/tests/transfer/authz_test.go +++ b/e2e/tests/transfer/authz_test.go @@ -6,8 +6,8 @@ import ( "context" "testing" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -38,10 +38,6 @@ func (s *AuthzTransferTestSuite) SetupSuite() { s.SetupChains(context.TODO(), 2, nil) } -func (s *AuthzTransferTestSuite) CreateAuthzTestPath(testName string) (ibc.Relayer, ibc.ChannelOutput) { - return s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName), s.GetChainAChannelForTest(testName) -} - // QueryGranterGrants returns all GrantAuthorizations for the given granterAddress. func (*AuthzTransferTestSuite) QueryGranterGrants(ctx context.Context, chain ibc.Chain, granterAddress string) ([]*authz.GrantAuthorization, error) { res, err := query.GRPCQuery[authz.QueryGranterGrantsResponse](ctx, chain, &authz.QueryGranterGrantsRequest{ @@ -60,11 +56,14 @@ func (s *AuthzTransferTestSuite) TestAuthz_MsgTransfer_Succeeds() { testName := t.Name() t.Parallel() - relayer, channelA := s.CreateAuthzTestPath(testName) chainA, chainB := s.GetChains() chainADenom := chainA.Config().Denom + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + granterWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) granterAddress := granterWallet.FormattedAddress() @@ -105,7 +104,7 @@ func (s *AuthzTransferTestSuite) TestAuthz_MsgTransfer_Succeeds() { }, } - resp := s.BroadcastMessages(context.TODO(), chainA, granterWallet, msgGrant) + resp := s.BroadcastMessages(t.Context(), chainA, granterWallet, msgGrant) s.AssertTxSuccess(resp) } @@ -150,7 +149,7 @@ func (s *AuthzTransferTestSuite) TestAuthz_MsgTransfer_Succeeds() { Msgs: []*codectypes.Any{protoAny}, } - resp := s.BroadcastMessages(context.TODO(), chainA, granteeWallet, msgExec) + resp := s.BroadcastMessages(t.Context(), chainA, granteeWallet, msgExec) s.AssertTxSuccess(resp) }) @@ -182,10 +181,10 @@ func (s *AuthzTransferTestSuite) TestAuthz_MsgTransfer_Succeeds() { msgRevoke := authz.MsgRevoke{ Granter: granterAddress, Grantee: granteeAddress, - MsgTypeUrl: transfertypes.TransferAuthorization{}.MsgTypeURL(), + MsgTypeUrl: (*transfertypes.TransferAuthorization)(nil).MsgTypeURL(), } - resp := s.BroadcastMessages(context.TODO(), chainA, granterWallet, &msgRevoke) + resp := s.BroadcastMessages(t.Context(), chainA, granterWallet, &msgRevoke) s.AssertTxSuccess(resp) }) @@ -210,7 +209,7 @@ func (s *AuthzTransferTestSuite) TestAuthz_MsgTransfer_Succeeds() { Msgs: []*codectypes.Any{protoAny}, } - resp := s.BroadcastMessages(context.TODO(), chainA, granteeWallet, msgExec) + resp := s.BroadcastMessages(t.Context(), chainA, granteeWallet, msgExec) s.AssertTxFailure(resp, authz.ErrNoAuthorizationFound) }) } @@ -221,9 +220,13 @@ func (s *AuthzTransferTestSuite) TestAuthz_InvalidTransferAuthorizations() { testName := t.Name() t.Parallel() - relayer, channelA := s.CreateAuthzTestPath(testName) chainA, chainB := s.GetChains() + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainAVersion := chainA.Config().Images[0].Version @@ -267,7 +270,7 @@ func (s *AuthzTransferTestSuite) TestAuthz_InvalidTransferAuthorizations() { }, } - resp := s.BroadcastMessages(context.TODO(), chainA, granterWallet, msgGrant) + resp := s.BroadcastMessages(t.Context(), chainA, granterWallet, msgGrant) s.AssertTxSuccess(resp) }) @@ -295,7 +298,7 @@ func (s *AuthzTransferTestSuite) TestAuthz_InvalidTransferAuthorizations() { Msgs: []*codectypes.Any{protoAny}, } - resp := s.BroadcastMessages(context.TODO(), chainA, granteeWallet, msgExec) + resp := s.BroadcastMessages(t.Context(), chainA, granteeWallet, msgExec) if testvalues.IbcErrorsFeatureReleases.IsSupported(chainAVersion) { s.AssertTxFailure(resp, ibcerrors.ErrInsufficientFunds) } else { @@ -355,7 +358,7 @@ func (s *AuthzTransferTestSuite) TestAuthz_InvalidTransferAuthorizations() { Msgs: []*codectypes.Any{protoAny}, } - resp := s.BroadcastMessages(context.TODO(), chainA, granteeWallet, msgExec) + resp := s.BroadcastMessages(t.Context(), chainA, granteeWallet, msgExec) if testvalues.IbcErrorsFeatureReleases.IsSupported(chainAVersion) { s.AssertTxFailure(resp, ibcerrors.ErrInvalidAddress) } else { diff --git a/e2e/tests/transfer/base_test.go b/e2e/tests/transfer/base_test.go index b8990d5d153..abd9864cd80 100644 --- a/e2e/tests/transfer/base_test.go +++ b/e2e/tests/transfer/base_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -48,14 +48,6 @@ func (s *transferTester) QueryTransferParams(ctx context.Context, chain ibc.Chai return *res.Params } -// CreateTransferPath sets up a path between chainA and chainB with a transfer channel and returns the relayer wired -// up to watch the channel and port IDs created. -func (s *transferTester) CreateTransferPath(testName string) (ibc.Relayer, ibc.ChannelOutput) { - relayer, channel := s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName), s.GetChainAChannelForTest(testName) - s.T().Logf("test %s running on portID %s channelID %s", testName, channel.PortID, channel.ChannelID) - return relayer, channel -} - // TestMsgTransfer_Succeeds_Nonincentivized will test sending successful IBC transfers from chainA to chainB. // The transfer will occur over a basic transfer channel (non incentivized) and both native and non-native tokens // will be sent forwards and backwards in the IBC transfer timeline (both chains will act as source and receiver chains). @@ -70,10 +62,12 @@ func (s *TransferTestSuite) TestMsgTransfer_Succeeds_Nonincentivized() { // deterministic. t.Parallel() - relayer, channelA := s.CreateTransferPath(testName) - chainA, chainB := s.GetChains() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainBVersion := chainB.Config().Images[0].Version chainADenom := chainA.Config().Denom @@ -178,10 +172,13 @@ func (s *TransferTestSuite) TestMsgTransfer_Fails_InvalidAddress() { testName := t.Name() t.Parallel() - relayer, channelA := s.CreateTransferPath(testName) chainA, chainB := s.GetChains() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -225,9 +222,12 @@ func (s *TransferTestSuite) TestMsgTransfer_Timeout_Nonincentivized() { testName := t.Name() t.Parallel() - relayer, channelA := s.CreateTransferPath(testName) - chainA, _ := s.GetChains() + chainA, chainB := s.GetChains() + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) chainBWallet := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) @@ -283,10 +283,13 @@ func (s *TransferTestSuite) TestMsgTransfer_WithMemo() { testName := t.Name() t.Parallel() - relayer, channelA := s.CreateTransferPath(testName) chainA, chainB := s.GetChains() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -334,10 +337,13 @@ func (s *TransferTestSuite) TestMsgTransfer_EntireBalance() { testName := t.Name() t.Parallel() - relayer, channelA := s.CreateTransferPath(testName) chainA, chainB := s.GetChains() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) diff --git a/e2e/tests/transfer/localhost_test.go b/e2e/tests/transfer/localhost_test.go index 64cabde2e3b..b4194e92c11 100644 --- a/e2e/tests/transfer/localhost_test.go +++ b/e2e/tests/transfer/localhost_test.go @@ -6,7 +6,7 @@ import ( "context" "testing" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" "github.com/cosmos/ibc-go/e2e/testsuite" diff --git a/e2e/tests/transfer/send_enabled_test.go b/e2e/tests/transfer/send_enabled_test.go index fe2b7e1810b..bbdaea68941 100644 --- a/e2e/tests/transfer/send_enabled_test.go +++ b/e2e/tests/transfer/send_enabled_test.go @@ -6,7 +6,8 @@ import ( "context" "testing" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" @@ -41,11 +42,11 @@ func (s *TransferTestSuiteSendEnabled) TestSendEnabledParam() { testName := t.Name() // Note: explicitly not using t.Parallel() in this test as it makes chain wide changes - s.CreateTransferPath(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) chainA, chainB := s.GetChains() - channelA := s.GetChainAChannelForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) chainAVersion := chainA.Config().Images[0].Version chainADenom := chainA.Config().Denom diff --git a/e2e/tests/transfer/send_receive_test.go b/e2e/tests/transfer/send_receive_test.go index b900dcb9f40..e1dbc928136 100644 --- a/e2e/tests/transfer/send_receive_test.go +++ b/e2e/tests/transfer/send_receive_test.go @@ -6,7 +6,8 @@ import ( "context" "testing" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" @@ -40,13 +41,13 @@ func (s *TransferTestSuiteSendReceive) TestReceiveEnabledParam() { ctx := context.TODO() testName := t.Name() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) // Note: explicitly not using t.Parallel() in this test as it makes chain wide changes - s.CreateTransferPath(testName) chainA, chainB := s.GetChains() relayer := s.GetRelayerForTest(testName) - channelA := s.GetChainAChannelForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) chainAVersion := chainA.Config().Images[0].Version diff --git a/e2e/tests/upgrades/genesis_test.go b/e2e/tests/upgrades/genesis_test.go index 1299eb0158e..487a45f0f3c 100644 --- a/e2e/tests/upgrades/genesis_test.go +++ b/e2e/tests/upgrades/genesis_test.go @@ -8,12 +8,11 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/chain/cosmos" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/chain/cosmos" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" "github.com/stretchr/testify/suite" - "go.uber.org/zap" sdkmath "cosmossdk.io/math" @@ -62,20 +61,17 @@ func (s *GenesisTestSuite) SetupSuite() { func (s *GenesisTestSuite) TestIBCGenesis() { t := s.T() - haltHeight := int64(100) - chainA, chainB := s.GetChains() ctx := context.Background() testName := t.Name() - relayer := s.CreateDefaultPaths(testName) - channelA := s.GetChainAChannelForTest(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) - var ( - chainADenom = chainA.Config().Denom - chainBIBCToken = testsuite.GetIBCToken(chainADenom, channelA.Counterparty.PortID, channelA.Counterparty.ChannelID) // IBC token sent to chainB - ) + chainADenom := chainA.Config().Denom + chainBIBCToken := testsuite.GetIBCToken(chainADenom, channelA.Counterparty.PortID, channelA.Counterparty.ChannelID) // IBC token sent to chainB chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) chainAAddress := chainAWallet.FormattedAddress() @@ -83,7 +79,7 @@ func (s *GenesisTestSuite) TestIBCGenesis() { chainBWallet := s.CreateUserOnChainB(ctx, testvalues.StartingTokenAmount) chainBAddress := chainBWallet.FormattedAddress() - s.Require().NoError(test.WaitForBlocks(ctx, 1, chainA, chainB), "failed to wait for blocks") + s.Require().NoError(test.WaitForBlocks(ctx, 5, chainA, chainB), "failed to wait for blocks") t.Run("ics20: native IBC token transfer from chainA to chainB, sender is source of tokens", func(t *testing.T) { transferTxResp := s.Transfer(ctx, chainA, chainAWallet, channelA.PortID, channelA.ChannelID, testvalues.DefaultTransferAmount(chainADenom), chainAAddress, chainBAddress, s.GetTimeoutHeight(ctx, chainB), 0, "") @@ -137,20 +133,20 @@ func (s *GenesisTestSuite) TestIBCGenesis() { ConnectionId: ibctesting.FirstConnectionID, }) s.Require().NoError(err) - s.Require().NotZero(len(res.Address)) + s.Require().NotEmpty(res.Address) hostAccount = res.Address s.Require().NotEmpty(hostAccount) channels, err := relayer.GetChannels(ctx, s.GetRelayerExecReporter(), chainA.Config().ChainID) s.Require().NoError(err) - s.Require().Equal(len(channels), 2) + s.Require().Len(channels, 2) }) s.Require().NoError(test.WaitForBlocks(ctx, 10, chainA, chainB), "failed to wait for blocks") t.Run("Halt chain and export genesis", func(t *testing.T) { - s.HaltChainAndExportGenesis(ctx, chainA.(*cosmos.CosmosChain), haltHeight) + s.HaltChainAndExportGenesis(ctx, chainA.(*cosmos.CosmosChain)) }) t.Run("ics20: native IBC token transfer from chainA to chainB, sender is source of tokens", func(t *testing.T) { @@ -213,39 +209,25 @@ func (s *GenesisTestSuite) TestIBCGenesis() { s.Require().NoError(test.WaitForBlocks(ctx, 5, chainA, chainB), "failed to wait for blocks") } -func (s *GenesisTestSuite) HaltChainAndExportGenesis(ctx context.Context, chain *cosmos.CosmosChain, haltHeight int64) { +func (s *GenesisTestSuite) HaltChainAndExportGenesis(ctx context.Context, chain *cosmos.CosmosChain) { timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, time.Minute*2) defer timeoutCtxCancel() - err := test.WaitForBlocks(timeoutCtx, int(haltHeight), chain) - s.Require().Error(err, "chain did not halt at halt height") + beforeHaltHeight, err := chain.Height(timeoutCtx) + s.Require().NoError(err, "error fetching height before halt") + + err = test.WaitForBlocks(timeoutCtx, 1, chain) + s.Require().NoError(err, "failed to wait for blocks") err = chain.StopAllNodes(ctx) s.Require().NoError(err, "error stopping node(s)") - state, err := chain.ExportState(ctx, haltHeight) + state, err := chain.ExportState(ctx, beforeHaltHeight) s.Require().NoError(err) - appTomlOverrides := make(test.Toml) - - appTomlOverrides["halt-height"] = 0 - for _, node := range chain.Nodes() { err := node.OverwriteGenesisFile(ctx, []byte(state)) s.Require().NoError(err) - } - - for _, node := range chain.Nodes() { - err := test.ModifyTomlConfigFile( - ctx, - zap.NewExample(), - node.DockerClient, - node.TestName, - node.VolumeName, - "config/app.toml", - appTomlOverrides, - ) - s.Require().NoError(err) _, _, err = node.ExecBin(ctx, "comet", "unsafe-reset-all") s.Require().NoError(err) @@ -263,5 +245,5 @@ func (s *GenesisTestSuite) HaltChainAndExportGenesis(ctx context.Context, chain height, err := chain.Height(ctx) s.Require().NoError(err, "error fetching height after halt") - s.Require().Greater(height, haltHeight, "height did not increment after halt") + s.Require().Greater(height, beforeHaltHeight+1, "height did not increment after halt") } diff --git a/e2e/tests/upgrades/upgrade_test.go b/e2e/tests/upgrades/upgrade_test.go index f30976dc959..233300e478e 100644 --- a/e2e/tests/upgrades/upgrade_test.go +++ b/e2e/tests/upgrades/upgrade_test.go @@ -9,10 +9,10 @@ import ( "time" "github.com/cosmos/gogoproto/proto" - interchaintest "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/chain/cosmos" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + interchaintest "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/chain/cosmos" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" testifysuite "github.com/stretchr/testify/suite" sdkmath "cosmossdk.io/math" @@ -60,10 +60,6 @@ func (s *UpgradeTestSuite) SetupSuite() { s.SetupChains(context.TODO(), 2, nil) } -func (s *UpgradeTestSuite) CreateUpgradeTestPath(testName string) (ibc.Relayer, ibc.ChannelOutput) { - return s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName), s.GetChainAChannelForTest(testName) -} - // UpgradeChain upgrades a chain to a specific version using the planName provided. // The software upgrade proposal is broadcast by the provided wallet. func (s *UpgradeTestSuite) UpgradeChain(ctx context.Context, chain *cosmos.CosmosChain, wallet ibc.Wallet, planName, currentVersion, upgradeVersion string) { @@ -137,17 +133,18 @@ func (s *UpgradeTestSuite) TestIBCChainUpgrade() { ctx := context.Background() testName := t.Name() - relayer, channelA := s.CreateUpgradeTestPath(testName) chainA, chainB := s.GetChains() - var ( - chainADenom = chainA.Config().Denom - chainBIBCToken = testsuite.GetIBCToken(chainADenom, channelA.Counterparty.PortID, channelA.Counterparty.ChannelID) // IBC token sent to chainB + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) - chainBDenom = chainB.Config().Denom - chainAIBCToken = testsuite.GetIBCToken(chainBDenom, channelA.PortID, channelA.ChannelID) // IBC token sent to chainA - ) + chainADenom := chainA.Config().Denom + chainBIBCToken := testsuite.GetIBCToken(chainADenom, channelA.Counterparty.PortID, channelA.Counterparty.ChannelID) // IBC token sent to chainB + + chainBDenom := chainB.Config().Denom + chainAIBCToken := testsuite.GetIBCToken(chainBDenom, channelA.PortID, channelA.ChannelID) // IBC token sent to chainA // create separate user specifically for the upgrade proposal to more easily verify starting // and end balances of the chainA users. @@ -244,7 +241,7 @@ func (s *UpgradeTestSuite) TestChainUpgrade() { ctx := context.Background() testName := t.Name() - s.CreateUpgradeTestPath(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) // TODO(chatton): this test is still creating a relayer and a channel, but it is not using them. chain := s.GetAllChains()[0] @@ -308,14 +305,14 @@ func (s *UpgradeTestSuite) TestV6ToV7ChainUpgrade() { ctx := context.Background() testName := t.Name() - relayer, channelA := s.CreateUpgradeTestPath(testName) - chainA, chainB := s.GetChains() - var ( - chainADenom = chainA.Config().Denom - chainBIBCToken = testsuite.GetIBCToken(chainADenom, channelA.Counterparty.PortID, channelA.Counterparty.ChannelID) // IBC token sent to chainB - ) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + + chainADenom := chainA.Config().Denom + chainBIBCToken := testsuite.GetIBCToken(chainADenom, channelA.Counterparty.PortID, channelA.Counterparty.ChannelID) // IBC token sent to chainB chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) chainAAddress := chainAWallet.FormattedAddress() @@ -465,10 +462,12 @@ func (s *UpgradeTestSuite) TestV7ToV7_1ChainUpgrade() { ctx := context.Background() testName := t.Name() - relayer, channelA := s.CreateUpgradeTestPath(testName) - chainA, chainB := s.GetChains() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -563,10 +562,12 @@ func (s *UpgradeTestSuite) TestV7ToV8ChainUpgrade() { ctx := context.Background() testName := t.Name() - relayer, channelA := s.CreateUpgradeTestPath(testName) - chainA, chainB := s.GetChains() + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -658,11 +659,12 @@ func (s *UpgradeTestSuite) TestV8ToV8_1ChainUpgrade() { ctx := context.Background() testName := t.Name() - relayer := s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) - - channelA := s.GetChainAChannelForTest(testName) + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) chainA, chainB := s.GetChains() + + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) chainADenom := chainA.Config().Denom chainAWallet := s.CreateUserOnChainA(ctx, testvalues.StartingTokenAmount) @@ -733,9 +735,12 @@ func (s *UpgradeTestSuite) TestV8ToV10ChainUpgrade() { testName := t.Name() - relayer, channelA := s.CreateUpgradeTestPath(testName) - chainA, chainB := s.GetChains() + + s.CreatePaths(ibc.DefaultClientOpts(), s.TransferChannelOptions(), testName) + relayer := s.GetRelayerForTest(testName) + channelA := s.GetChannelBetweenChains(testName, chainA, chainB) + chainADenom := chainA.Config().Denom chainBDenom := chainB.Config().Denom diff --git a/e2e/testsuite/codec.go b/e2e/testsuite/codec.go index 7c2679919e5..58e2d0fa018 100644 --- a/e2e/testsuite/codec.go +++ b/e2e/testsuite/codec.go @@ -20,12 +20,13 @@ import ( banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" - grouptypes "github.com/cosmos/cosmos-sdk/x/group" proposaltypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" wasmtypes "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" icacontrollertypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/types" icahosttypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" + packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + ratelimitingtypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" v7migrations "github.com/cosmos/ibc-go/v10/modules/core/02-client/migrations/v7" clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" @@ -71,6 +72,8 @@ func codecAndEncodingConfig() (*codec.ProtoCodec, testutil.TestEncodingConfig) { ibctmtypes.RegisterInterfaces(cfg.InterfaceRegistry) wasmtypes.RegisterInterfaces(cfg.InterfaceRegistry) channeltypesv2.RegisterInterfaces(cfg.InterfaceRegistry) + packetforwardtypes.RegisterInterfaces(cfg.InterfaceRegistry) + ratelimitingtypes.RegisterInterfaces(cfg.InterfaceRegistry) // all other types upgradetypes.RegisterInterfaces(cfg.InterfaceRegistry) @@ -79,7 +82,6 @@ func codecAndEncodingConfig() (*codec.ProtoCodec, testutil.TestEncodingConfig) { govv1.RegisterInterfaces(cfg.InterfaceRegistry) authtypes.RegisterInterfaces(cfg.InterfaceRegistry) cryptocodec.RegisterInterfaces(cfg.InterfaceRegistry) - grouptypes.RegisterInterfaces(cfg.InterfaceRegistry) proposaltypes.RegisterInterfaces(cfg.InterfaceRegistry) authz.RegisterInterfaces(cfg.InterfaceRegistry) txtypes.RegisterInterfaces(cfg.InterfaceRegistry) @@ -89,7 +91,7 @@ func codecAndEncodingConfig() (*codec.ProtoCodec, testutil.TestEncodingConfig) { } // UnmarshalMsgResponses attempts to unmarshal the tx msg responses into the provided message types. -func UnmarshalMsgResponses(txResp sdk.TxResponse, msgs ...codec.ProtoMarshaler) error { +func UnmarshalMsgResponses(txResp sdk.TxResponse, msgs ...proto.Message) error { cdc := Codec() bz, err := hex.DecodeString(txResp.Data) if err != nil { diff --git a/e2e/testsuite/diagnostics/diagnostics.go b/e2e/testsuite/diagnostics/diagnostics.go index 958e3a8f428..c9c50a94afb 100644 --- a/e2e/testsuite/diagnostics/diagnostics.go +++ b/e2e/testsuite/diagnostics/diagnostics.go @@ -10,7 +10,7 @@ import ( "testing" dockertypes "github.com/docker/docker/api/types" - dockerclient "github.com/docker/docker/client" + dockerclient "github.com/moby/moby/client" "github.com/cosmos/ibc-go/e2e/dockerutil" "github.com/cosmos/ibc-go/e2e/internal/directories" @@ -29,14 +29,14 @@ func Collect(t *testing.T, dc *dockerclient.Client, debugModeEnabled bool, suite if !debugModeEnabled { // when we are not forcing log collection, we only upload upon test failing. if !t.Failed() { - t.Logf("test passed, not uploading logs") + t.Log("test passed, not uploading logs") return } } t.Logf("writing logs for test: %s", t.Name()) - ctx := context.TODO() + ctx := t.Context() e2eDir, err := directories.E2E() if err != nil { t.Logf("failed finding log directory: %s", err) @@ -95,7 +95,7 @@ func Collect(t *testing.T, dc *dockerclient.Client, debugModeEnabled bool, suite if err := fetchAndWriteDockerInspectOutput(ctx, dc, container.ID, localFilePath); err != nil { continue } - t.Logf("successfully wrote docker inspect output") + t.Log("successfully wrote docker inspect output") } } diff --git a/e2e/testsuite/query/grpc_query.go b/e2e/testsuite/query/grpc_query.go index e5b5f74d4d1..4a584c85041 100644 --- a/e2e/testsuite/query/grpc_query.go +++ b/e2e/testsuite/query/grpc_query.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/cosmos/gogoproto/proto" - "github.com/strangelove-ventures/interchaintest/v8/ibc" + "github.com/cosmos/interchaintest/v10/ibc" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/protobuf/proto" diff --git a/e2e/testsuite/query/queries.go b/e2e/testsuite/query/queries.go index 9b247d97035..0d859d491e0 100644 --- a/e2e/testsuite/query/queries.go +++ b/e2e/testsuite/query/queries.go @@ -5,7 +5,7 @@ import ( "errors" "sort" - "github.com/strangelove-ventures/interchaintest/v8/ibc" + "github.com/cosmos/interchaintest/v10/ibc" "cosmossdk.io/math" diff --git a/e2e/testsuite/sanitize/messages.go b/e2e/testsuite/sanitize/messages.go index 349f340797f..91c773b5178 100644 --- a/e2e/testsuite/sanitize/messages.go +++ b/e2e/testsuite/sanitize/messages.go @@ -3,7 +3,6 @@ package sanitize import ( sdk "github.com/cosmos/cosmos-sdk/types" govtypesv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" - grouptypes "github.com/cosmos/cosmos-sdk/x/group" "github.com/cosmos/ibc-go/e2e/semverutil" icacontrollertypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/types" @@ -11,10 +10,6 @@ import ( ) var ( - // groupsv1ProposalTitleAndSummary represents the releases that support the new title and summary fields. - groupsv1ProposalTitleAndSummary = semverutil.FeatureReleases{ - MajorVersion: "v7", - } // govv1ProposalTitleAndSummary represents the releases that support the new title and summary fields. govv1ProposalTitleAndSummary = semverutil.FeatureReleases{ MajorVersion: "v7", @@ -58,21 +53,6 @@ func removeUnknownFields(tag string, msg sdk.Msg) sdk.Msg { panic(err) } return msg - case *grouptypes.MsgSubmitProposal: - if !groupsv1ProposalTitleAndSummary.IsSupported(tag) { - msg.Title = "" - msg.Summary = "" - } - // sanitize messages contained in the x/group proposal - msgs, err := msg.GetMsgs() - if err != nil { - panic(err) - } - sanitizedMsgs := Messages(tag, msgs...) - if err := msg.SetMsgs(sanitizedMsgs); err != nil { - panic(err) - } - return msg case *icacontrollertypes.MsgRegisterInterchainAccount: if !icaUnorderedChannelFeatureReleases.IsSupported(tag) { msg.Ordering = channeltypes.NONE diff --git a/e2e/testsuite/testconfig.go b/e2e/testsuite/testconfig.go index a9f1121b292..fa026f27286 100644 --- a/e2e/testsuite/testconfig.go +++ b/e2e/testsuite/testconfig.go @@ -10,9 +10,9 @@ import ( "strings" "time" - "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - interchaintestutil "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/ibc" + interchaintestutil "github.com/cosmos/interchaintest/v10/testutil" "gopkg.in/yaml.v2" "github.com/cosmos/cosmos-sdk/codec" @@ -71,7 +71,7 @@ const ( defaultRlyTag = "latest" // defaultHermesTag is the tag that will be used if no relayer tag is specified for hermes. - defaultHermesTag = "1.10.4" + defaultHermesTag = "1.13.1" // defaultChainTag is the tag that will be used for the chains if none is specified. defaultChainTag = "main" // defaultConfigFileName is the default filename for the config file that can be used to configure @@ -702,7 +702,7 @@ type ChainOptions struct { // ChainOptionConfiguration enables arbitrary configuration of ChainOptions. type ChainOptionConfiguration func(options *ChainOptions) -// DefaultChainOptions returns the default configuration for the chains. +// DefaultChainOptions returns the default configuration for required number of chains. // These options can be configured by passing configuration functions to E2ETestSuite.GetChains. func DefaultChainOptions(chainCount int) (ChainOptions, error) { tc := LoadConfig() @@ -730,7 +730,7 @@ func DefaultChainOptions(chainCount int) (ChainOptions, error) { // if running a single test, only one relayer is needed. numRelayers := 1 if IsRunSuite() { - // arbitrary number that will not be required if https://github.com/strangelove-ventures/interchaintest/issues/1153 is resolved. + // arbitrary number that will not be required if https://github.com/cosmos/interchaintest/issues/1153 is resolved. // It can be overridden in individual test suites in SetupSuite if required. numRelayers = 10 } @@ -757,7 +757,7 @@ func newDefaultSimappConfig(cc ChainConfig, name, chainID, denom string, cometCf { Repository: cc.Image, Version: cc.Tag, - UidGid: "1000:1000", + UIDGID: "1000:1000", }, }, Bin: cc.Binary, @@ -883,7 +883,7 @@ func defaultGovv1Beta1ModifyGenesis(version string) func(ibc.ChainConfig, []byte govModuleBytes, err := json.Marshal(appStateMap[govtypes.ModuleName]) if err != nil { - return nil, fmt.Errorf("failed to extract gov genesis bytes: %s", err) + return nil, fmt.Errorf("failed to extract gov genesis bytes: %w", err) } govModuleGenesisBytes, err := modifyGovv1Beta1AppState(chainConfig, govModuleBytes) @@ -900,7 +900,7 @@ func defaultGovv1Beta1ModifyGenesis(version string) func(ibc.ChainConfig, []byte if !testvalues.AllowAllClientsWildcardFeatureReleases.IsSupported(version) { ibcModuleBytes, err := json.Marshal(appStateMap[ibcexported.ModuleName]) if err != nil { - return nil, fmt.Errorf("failed to extract ibc genesis bytes: %s", err) + return nil, fmt.Errorf("failed to extract ibc genesis bytes: %w", err) } ibcGenesisBytes, err := modifyClientGenesisAppState(ibcModuleBytes) @@ -919,7 +919,7 @@ func defaultGovv1Beta1ModifyGenesis(version string) func(ibc.ChainConfig, []byte if !testvalues.ChannelParamsFeatureReleases.IsSupported(version) { ibcModuleBytes, err := json.Marshal(appStateMap[ibcexported.ModuleName]) if err != nil { - return nil, fmt.Errorf("failed to extract ibc genesis bytes: %s", err) + return nil, fmt.Errorf("failed to extract ibc genesis bytes: %w", err) } ibcGenesisBytes, err := modifyChannelGenesisAppState(ibcModuleBytes) @@ -933,13 +933,12 @@ func defaultGovv1Beta1ModifyGenesis(version string) func(ibc.ChainConfig, []byte return nil, fmt.Errorf("failed to unmarshal gov genesis bytes into map: %w", err) } appStateMap[ibcexported.ModuleName] = ibcModuleGenesisMap - } if !testvalues.ChannelsV2FeatureReleases.IsSupported(version) { ibcModuleBytes, err := json.Marshal(appStateMap[ibcexported.ModuleName]) if err != nil { - return nil, fmt.Errorf("failed to extract ibc genesis bytes: %s", err) + return nil, fmt.Errorf("failed to extract ibc genesis bytes: %w", err) } ibcGenesisBytes, err := modifyChannelV2GenesisAppState(ibcModuleBytes) @@ -958,7 +957,7 @@ func defaultGovv1Beta1ModifyGenesis(version string) func(ibc.ChainConfig, []byte if !testvalues.ClientV2FeatureReleases.IsSupported(version) { ibcModuleBytes, err := json.Marshal(appStateMap[ibcexported.ModuleName]) if err != nil { - return nil, fmt.Errorf("failed to extract ibc genesis bytes: %s", err) + return nil, fmt.Errorf("failed to extract ibc genesis bytes: %w", err) } ibcGenesisBytes, err := modifyClientV2GenesisAppState(ibcModuleBytes) diff --git a/e2e/testsuite/testsuite.go b/e2e/testsuite/testsuite.go index eb520cc8c5c..55d435b7397 100644 --- a/e2e/testsuite/testsuite.go +++ b/e2e/testsuite/testsuite.go @@ -2,6 +2,7 @@ package testsuite import ( "context" + "encoding/json" "errors" "fmt" "os" @@ -10,12 +11,13 @@ import ( "strings" "sync" - dockerclient "github.com/docker/docker/client" - interchaintest "github.com/strangelove-ventures/interchaintest/v8" - "github.com/strangelove-ventures/interchaintest/v8/chain/cosmos" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - "github.com/strangelove-ventures/interchaintest/v8/testreporter" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10" + "github.com/cosmos/interchaintest/v10/chain/cosmos" + "github.com/cosmos/interchaintest/v10/ibc" + "github.com/cosmos/interchaintest/v10/relayer/hermes" + "github.com/cosmos/interchaintest/v10/testreporter" + test "github.com/cosmos/interchaintest/v10/testutil" + dockerclient "github.com/moby/moby/client" testifysuite "github.com/stretchr/testify/suite" "go.uber.org/zap" @@ -62,9 +64,10 @@ type E2ETestSuite struct { pathNameIndex int64 // testSuiteName is the name of the test suite, used to store chains under the test suite name. - testSuiteName string - testPaths map[string][]string - channels map[string]map[ibc.Chain][]ibc.ChannelOutput + testSuiteName string + testPathsByTestName map[string][]string + testPathsByChains map[ibc.Chain]map[ibc.Chain]string + channelByChains map[string]map[ibc.Chain]map[ibc.Chain]ibc.ChannelOutput // channelLock ensures concurrent tests are not creating and accessing channels as the same time. channelLock sync.Mutex @@ -82,8 +85,9 @@ type E2ETestSuite struct { func (s *E2ETestSuite) initState() { s.initDockerClient() s.proposalIDs = map[string]uint64{} - s.testPaths = make(map[string][]string) - s.channels = make(map[string]map[ibc.Chain][]ibc.ChannelOutput) + s.testPathsByTestName = make(map[string][]string) + s.testPathsByChains = make(map[ibc.Chain]map[ibc.Chain]string) + s.channelByChains = make(map[string]map[ibc.Chain]map[ibc.Chain]ibc.ChannelOutput) s.relayerPool = []ibc.Relayer{} s.testRelayerMap = make(map[string]ibc.Relayer) s.relayerWallets = make(relayer.Map) @@ -128,14 +132,14 @@ func (s *E2ETestSuite) configureGenesisDebugExport() { } // This env variables are set by the interchain test code: - // https://github.com/strangelove-ventures/interchaintest/blob/7aa0fd6487f76238ab44231fdaebc34627bc5990/chain/cosmos/cosmos_chain.go#L1007-L1008 + // https://github.com/cosmos/interchaintest/blob/7aa0fd6487f76238ab44231fdaebc34627bc5990/chain/cosmos/cosmos_chain.go#L1007-L1008 t.Setenv("EXPORT_GENESIS_FILE_PATH", exportPath) chainName := tc.GetGenesisChainName() chainIdx, err := tc.GetChainIndex(chainName) s.Require().NoError(err) - // Interchaintest adds a suffix (https://github.com/strangelove-ventures/interchaintest/blob/a3f4c7bcccf1925ffa6dc793a298f15497919a38/chainspec.go#L125) + // Interchaintest adds a suffix (https://github.com/cosmos/interchaintest/blob/a3f4c7bcccf1925ffa6dc793a298f15497919a38/chainspec.go#L125) // to the chain name, so we need to do the same. genesisChainName := fmt.Sprintf("%s-%d", chainName, chainIdx+1) t.Setenv("EXPORT_GENESIS_CHAIN", genesisChainName) @@ -143,7 +147,7 @@ func (s *E2ETestSuite) configureGenesisDebugExport() { // initializeRelayerPool pre-loads the relayer pool with n relayers. // this is a workaround due to the restriction on relayer creation during the test -// ref: https://github.com/strangelove-ventures/interchaintest/issues/1153 +// ref: https://github.com/cosmos/interchaintest/issues/1153 // if the above issue is resolved, it should be possible to lazily create relayers in each test. func (s *E2ETestSuite) initializeRelayerPool(n int) []ibc.Relayer { var relayers []ibc.Relayer @@ -197,21 +201,11 @@ func (s *E2ETestSuite) SetupChains(ctx context.Context, chainCount int, channelO } } -// CreateDefaultPaths creates a path between the chains using the default client and channel options. -// this should be called as the setup function in most tests if no additional options are required. -func (s *E2ETestSuite) CreateDefaultPaths(testName string) ibc.Relayer { - return s.CreatePaths(ibc.DefaultClientOpts(), DefaultChannelOpts(s.GetAllChains()), testName) -} - // CreatePaths creates paths between the chains using the provided client and channel options. // The paths are created such that ChainA is connected to ChainB, ChainB is connected to ChainC etc. -func (s *E2ETestSuite) CreatePaths(clientOpts ibc.CreateClientOptions, channelOpts ibc.CreateChannelOptions, testName string) ibc.Relayer { +func (s *E2ETestSuite) CreatePaths(clientOpts ibc.CreateClientOptions, channelOpts ibc.CreateChannelOptions, testName string) { s.T().Logf("Setting up path for: %s", testName) - if s.channels[testName] == nil { - s.channels[testName] = make(map[ibc.Chain][]ibc.ChannelOutput) - } - r := s.GetRelayerForTest(testName) ctx := context.TODO() @@ -220,8 +214,6 @@ func (s *E2ETestSuite) CreatePaths(clientOpts ibc.CreateClientOptions, channelOp chainA, chainB := allChains[i], allChains[i+1] s.CreatePath(ctx, r, chainA, chainB, clientOpts, channelOpts, testName) } - - return r } // CreatePath creates a path between chainA and chainB using the provided client and channel options. @@ -233,9 +225,9 @@ func (s *E2ETestSuite) CreatePath( clientOpts ibc.CreateClientOptions, channelOpts ibc.CreateChannelOptions, testName string, -) (chainAChannel ibc.ChannelOutput, chainBChannel ibc.ChannelOutput) { +) (ibc.ChannelOutput, ibc.ChannelOutput) { pathName := s.generatePathName() - s.testPaths[testName] = append(s.testPaths[testName], pathName) + s.testPathsByTestName[testName] = append(s.testPathsByTestName[testName], pathName) s.T().Logf("establishing path between %s and %s on path %s", chainA.Config().ChainID, chainB.Config().ChainID, pathName) @@ -253,17 +245,23 @@ func (s *E2ETestSuite) CreatePath( err = test.WaitForBlocks(ctx, 1, chainA, chainB) s.Require().NoError(err) - s.createChannelWithLock(ctx, r, pathName, testName, channelOpts, chainA, chainB) + channelA, channelB := s.createChannelWithLock(ctx, r, pathName, testName, channelOpts, chainA, chainB) - aChannels := s.channels[testName][chainA] - bChannels := s.channels[testName][chainB] + if s.testPathsByChains[chainA] == nil { + s.testPathsByChains[chainA] = make(map[ibc.Chain]string) + } + s.testPathsByChains[chainA][chainB] = pathName + if s.testPathsByChains[chainB] == nil { + s.testPathsByChains[chainB] = make(map[ibc.Chain]string) + } + s.testPathsByChains[chainB][chainA] = pathName - return aChannels[len(aChannels)-1], bChannels[len(bChannels)-1] + return channelA, channelB } // createChannelWithLock creates a channel between the two provided chains for the given test name. This applies a lock // to ensure that the channels that are created are correctly mapped to the test that created them. -func (s *E2ETestSuite) createChannelWithLock(ctx context.Context, r ibc.Relayer, pathName, testName string, channelOpts ibc.CreateChannelOptions, chainA, chainB ibc.Chain) { +func (s *E2ETestSuite) createChannelWithLock(ctx context.Context, r ibc.Relayer, pathName, testName string, channelOpts ibc.CreateChannelOptions, chainA, chainB ibc.Chain) (ibc.ChannelOutput, ibc.ChannelOutput) { // NOTE: we need to lock the creation of channels and applying of packet filters, as if we don't, the result // of `r.GetChannels` may return channels created by other relayers in different tests. s.channelLock.Lock() @@ -274,21 +272,69 @@ func (s *E2ETestSuite) createChannelWithLock(ctx context.Context, r ibc.Relayer, err = test.WaitForBlocks(ctx, 1, chainA, chainB) s.Require().NoError(err) - for _, c := range []ibc.Chain{chainA, chainB} { - channels, err := r.GetChannels(ctx, s.GetRelayerExecReporter(), c.Config().ChainID) - s.Require().NoError(err) + aChannels := s.fetchChannelsBetweenChains(ctx, r, chainA, chainB) + latestAChannel := getLatestChannel(aChannels) + s.mapChannel(testName, chainA, chainB, latestAChannel) - if _, ok := s.channels[testName][c]; !ok { - s.channels[testName][c] = []ibc.ChannelOutput{} + bChannels := s.fetchChannelsBetweenChains(ctx, r, chainB, chainA) + latestBChannel := getLatestChannel(bChannels) + s.mapChannel(testName, chainB, chainA, latestBChannel) + + err = relayer.ApplyPacketFilter(ctx, s.T(), r, chainB.Config().ChainID, []ibc.ChannelOutput{latestAChannel, latestBChannel}) + s.Require().NoError(err) + + return latestAChannel, latestBChannel +} + +func (s *E2ETestSuite) fetchChannelsBetweenChains(ctx context.Context, r ibc.Relayer, chainA ibc.Chain, chainB ibc.Chain) []ibc.ChannelOutput { + hermesQueryChannels := []string{"hermes", "--json", "query", "channels", "--chain", chainA.Config().ChainID, "--counterparty-chain", chainB.Config().ChainID, "--show-counterparty", "--verbose"} + hermesResp := r.Exec(ctx, s.GetRelayerExecReporter(), hermesQueryChannels, nil) + s.Require().NoError(hermesResp.Err, "failed to query channels between %s and %s", chainA.Config().ChainID, chainB.Config().ChainID) + + // This code is taken from interchaintests own Hermes query code, but since we need it here, it is copied - for now. + // extractJsonResult: + stdoutLines := strings.Split(string(hermesResp.Stdout), "\n") + var jsonOutput string + for _, line := range stdoutLines { + if strings.Contains(line, "result") { + jsonOutput = line + break } + } + jsonBz := []byte(jsonOutput) + var result hermes.ChannelOutputResult + err := json.Unmarshal(jsonBz, &result) + s.Require().NoError(err, "failed to unmarshal hermes channel output result: %s", err) + + var ibcChannelOutput []ibc.ChannelOutput + for _, r := range result.Result { + ibcChannelOutput = append(ibcChannelOutput, ibc.ChannelOutput{ + State: r.ChannelEnd.State, + Ordering: r.ChannelEnd.Ordering, + Counterparty: ibc.ChannelCounterparty{ + PortID: r.ChannelEnd.Remote.PortID, + ChannelID: r.ChannelEnd.Remote.ChannelID, + }, + ConnectionHops: r.ChannelEnd.ConnectionHops, + Version: r.ChannelEnd.Version, + PortID: r.CounterPartyChannelEnd.Remote.PortID, + ChannelID: r.CounterPartyChannelEnd.Remote.ChannelID, + }) + } - // keep track of channels associated with a given chain for access within the tests. - // only the most recent channel is relevant. - s.channels[testName][c] = append(s.channels[testName][c], getLatestChannel(channels)) + return ibcChannelOutput +} - err = relayer.ApplyPacketFilter(ctx, s.T(), r, c.Config().ChainID, s.channels[testName][c]) - s.Require().NoError(err, "failed to watch port and channel on chain: %s", c.Config().ChainID) +func (s *E2ETestSuite) mapChannel(testName string, fromChain ibc.Chain, toChain ibc.Chain, channel ibc.ChannelOutput) { + if _, ok := s.channelByChains[testName]; !ok { + s.channelByChains[testName] = make(map[ibc.Chain]map[ibc.Chain]ibc.ChannelOutput) } + + if _, ok := s.channelByChains[testName][fromChain]; !ok { + s.channelByChains[testName][fromChain] = make(map[ibc.Chain]ibc.ChannelOutput) + } + + s.channelByChains[testName][fromChain][toChain] = channel } // getLatestChannel returns the latest channel from the list of channels. @@ -300,18 +346,12 @@ func getLatestChannel(channels []ibc.ChannelOutput) ibc.ChannelOutput { }) } -// GetChainAChannelForTest returns the ibc.ChannelOutput for the current test. -// this defaults to the first entry in the list, and will be what is needed in the case of -// a single channel test. -func (s *E2ETestSuite) GetChainAChannelForTest(testName string) ibc.ChannelOutput { - return s.GetChannelsForTest(s.GetAllChains()[0], testName)[0] -} +// GetChannelsBetweenChains returns the channels between the two provided chains for the specified test. +func (s *E2ETestSuite) GetChannelBetweenChains(testname string, chainA ibc.Chain, chainB ibc.Chain) ibc.ChannelOutput { + channel, ok := s.channelByChains[testname][chainA][chainB] + s.Require().True(ok, "channel not found between chains %s and %s for test %s", chainA.Config().ChainID, chainB.Config().ChainID, testname) -// GetChannelsForTest returns all channels for the specified test. -func (s *E2ETestSuite) GetChannelsForTest(chain ibc.Chain, testName string) []ibc.ChannelOutput { - channels, ok := s.channels[testName][chain] - s.Require().True(ok, "channel not found for test %s", testName) - return channels + return channel } // GetRelayerForTest returns the relayer for the current test from the available pool of relayers. @@ -363,6 +403,37 @@ func (s *E2ETestSuite) GetRelayerUsers(ctx context.Context, testName string) (ib return chainARelayerUser, chainBRelayerUser } +func (s *E2ETestSuite) FlushPackets(ctx context.Context, ibcrelayer ibc.Relayer, orderedChains []ibc.Chain) { + for i := range len(orderedChains) - 1 { + chainA := orderedChains[i] + chainB := orderedChains[i+1] + + s.T().Logf("Flushing packets between %s and %s", chainA.Config().ChainID, chainB.Config().ChainID) + + pathName := s.GetPathByChains(chainA, chainB) + channel := s.GetChannelBetweenChains(s.T().Name(), chainA, chainB) + err := ibcrelayer.Flush(ctx, s.GetRelayerExecReporter(), pathName, channel.ChannelID) + s.Require().NoError(err) + + s.Require().NoError(test.WaitForBlocks(ctx, 1, chainA, chainB)) + } + + // Then we flush back the acknowledgements + for i := len(orderedChains) - 1; i > 0; i-- { + chainA := orderedChains[i] + chainB := orderedChains[i-1] + + s.T().Logf("Flushing acknowledgements between %s and %s", chainA.Config().ChainID, chainB.Config().ChainID) + + pathName := s.GetPathByChains(chainA, chainB) + channel := s.GetChannelBetweenChains(s.T().Name(), chainA, chainB) + err := ibcrelayer.Flush(ctx, s.GetRelayerExecReporter(), pathName, channel.ChannelID) + s.Require().NoError(err) + + s.Require().NoError(test.WaitForBlocks(ctx, 1, chainA, chainB)) + } +} + // ChainOptionModifier is a function which accepts 2 chains as inputs, and returns a channel creation modifier function // in order to conditionally modify the channel options based on the chains being used. type ChainOptionModifier func(chainA, chainB ibc.Chain) func(options *ibc.CreateChannelOptions) @@ -415,11 +486,17 @@ func (s *E2ETestSuite) generatePathName() string { } func (s *E2ETestSuite) GetPaths(testName string) []string { - paths, ok := s.testPaths[testName] + paths, ok := s.testPathsByTestName[testName] s.Require().True(ok, "paths not found for test %s", testName) return paths } +func (s *E2ETestSuite) GetPathByChains(chainA ibc.Chain, chainB ibc.Chain) string { + pathName, ok := s.testPathsByChains[chainA][chainB] + s.Require().True(ok, "path not found for chains %s and %s", chainA.Config().ChainID, chainB.Config().ChainID) + return pathName +} + // GetPathName returns the name of a path at a specific index. This can be used in tests // when the path name is required. func GetPathName(idx int64) string { @@ -504,10 +581,10 @@ func (s *E2ETestSuite) RecoverRelayerWallets(ctx context.Context, ibcrelayer ibc rlyBName := fmt.Sprintf("%s-%s", ChainBRelayerName, testName) if err := chainA.RecoverKey(ctx, rlyAName, chainARelayerWallet.Mnemonic()); err != nil { - return nil, nil, fmt.Errorf("could not recover relayer wallet on chain A: %s", err) + return nil, nil, fmt.Errorf("could not recover relayer wallet on chain A: %w", err) } if err := chainB.RecoverKey(ctx, rlyBName, chainBRelayerWallet.Mnemonic()); err != nil { - return nil, nil, fmt.Errorf("could not recover relayer wallet on chain B: %s", err) + return nil, nil, fmt.Errorf("could not recover relayer wallet on chain B: %w", err) } return chainARelayerWallet, chainBRelayerWallet, nil } @@ -553,6 +630,11 @@ func (s *E2ETestSuite) CreateUserOnChainC(ctx context.Context, amount int64) ibc return s.createWalletOnChainIndex(ctx, amount, 2) } +// CreateUserOnChainD creates a user with the given amount of funds on chain C. +func (s *E2ETestSuite) CreateUserOnChainD(ctx context.Context, amount int64) ibc.Wallet { + return s.createWalletOnChainIndex(ctx, amount, 3) +} + // createWalletOnChainIndex creates a wallet with the given amount of funds on the chain of the given index. func (s *E2ETestSuite) createWalletOnChainIndex(ctx context.Context, amount, chainIndex int64) ibc.Wallet { chain := s.GetAllChains()[chainIndex] @@ -576,15 +658,11 @@ func (s *E2ETestSuite) GetChainBNativeBalance(ctx context.Context, user ibc.Wall // GetChainBalanceForDenom returns the balance for a given denom given a chain. func GetChainBalanceForDenom(ctx context.Context, chain ibc.Chain, denom string, user ibc.Wallet) (int64, error) { - balanceResp, err := query.GRPCQuery[banktypes.QueryBalanceResponse](ctx, chain, &banktypes.QueryBalanceRequest{ - Address: user.FormattedAddress(), - Denom: denom, - }) + resp, err := query.Balance(ctx, chain, user.FormattedAddress(), denom) if err != nil { return 0, err } - - return balanceResp.Balance.Amount.Int64(), nil + return resp.Int64(), nil } // AssertPacketRelayed asserts that the packet commitment does not exist on the sending chain. diff --git a/e2e/testsuite/tx.go b/e2e/testsuite/tx.go index ebe598b5ff2..e6d83112385 100644 --- a/e2e/testsuite/tx.go +++ b/e2e/testsuite/tx.go @@ -5,13 +5,12 @@ import ( "errors" "fmt" "slices" - "strconv" "strings" "time" - "github.com/strangelove-ventures/interchaintest/v8/chain/cosmos" - "github.com/strangelove-ventures/interchaintest/v8/ibc" - test "github.com/strangelove-ventures/interchaintest/v8/testutil" + "github.com/cosmos/interchaintest/v10/chain/cosmos" + "github.com/cosmos/interchaintest/v10/ibc" + test "github.com/cosmos/interchaintest/v10/testutil" errorsmod "cosmossdk.io/errors" sdkmath "cosmossdk.io/math" @@ -184,7 +183,7 @@ func (s *E2ETestSuite) ExecuteGovV1Proposal(ctx context.Context, msg sdk.Msg, ch resp := s.BroadcastMessages(ctx, cosmosChain, user, msgSubmitProposal) s.AssertTxSuccess(resp) - s.Require().NoError(cosmosChain.VoteOnProposalAllValidators(ctx, strconv.Itoa(int(proposalID)), cosmos.ProposalVoteYes)) + s.Require().NoError(cosmosChain.VoteOnProposalAllValidators(ctx, proposalID, cosmos.ProposalVoteYes)) s.T().Logf("validators voted %s on proposal with ID: %d", cosmos.ProposalVoteYes, proposalID) return s.waitForGovV1ProposalToPass(ctx, cosmosChain, proposalID) @@ -242,7 +241,7 @@ func (s *E2ETestSuite) ExecuteAndPassGovV1Beta1Proposal(ctx context.Context, cha proposal := proposalResp.Proposal s.Require().Equal(govtypesv1beta1.StatusVotingPeriod, proposal.Status) - err = cosmosChain.VoteOnProposalAllValidators(ctx, fmt.Sprintf("%d", proposalID), cosmos.ProposalVoteYes) + err = cosmosChain.VoteOnProposalAllValidators(ctx, proposalID, cosmos.ProposalVoteYes) s.Require().NoError(err) // ensure voting period has not passed before validators finished voting diff --git a/e2e/testvalues/values.go b/e2e/testvalues/values.go index 0a06763f287..f2e95ff297a 100644 --- a/e2e/testvalues/values.go +++ b/e2e/testvalues/values.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/strangelove-ventures/interchaintest/v8/ibc" + "github.com/cosmos/interchaintest/v10/ibc" sdkmath "cosmossdk.io/math" diff --git a/go.mod b/go.mod index 575ce4c6cfe..d658602a2b1 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/cosmos/ibc-go/v10 -go 1.23.8 +go 1.24.3 replace github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 @@ -8,48 +8,48 @@ require ( cosmossdk.io/api v0.9.2 cosmossdk.io/core v0.11.3 cosmossdk.io/errors v1.0.2 - cosmossdk.io/log v1.6.0 + cosmossdk.io/log v1.6.1 cosmossdk.io/math v1.5.3 cosmossdk.io/store v1.1.2 cosmossdk.io/x/tx v0.14.0 cosmossdk.io/x/upgrade v0.2.0 github.com/cometbft/cometbft v0.38.17 - github.com/cosmos/cosmos-db v1.1.1 + github.com/cosmos/cosmos-db v1.1.3 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-sdk v0.53.4 github.com/cosmos/gogoproto v1.7.0 github.com/cosmos/ics23/go v0.11.0 - github.com/ethereum/go-ethereum v1.15.11 + github.com/ethereum/go-ethereum v1.16.3 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/go-metrics v0.5.4 - github.com/spf13/cast v1.8.0 - github.com/spf13/cobra v1.9.1 - github.com/stretchr/testify v1.10.0 - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e - google.golang.org/grpc v1.72.0 - google.golang.org/protobuf v1.36.6 + github.com/spf13/cast v1.9.2 + github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.11.1 + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 gopkg.in/yaml.v2 v2.4.0 ) require ( - cel.dev/expr v0.20.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.116.0 // indirect cloud.google.com/go/auth v0.14.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect cloud.google.com/go/storage v1.49.0 // indirect - cosmossdk.io/collections v1.2.1 // indirect - cosmossdk.io/depinject v1.2.0 // indirect + cosmossdk.io/collections v1.3.1 // indirect + cosmossdk.io/depinject v1.2.1 // indirect cosmossdk.io/schema v1.1.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -59,14 +59,13 @@ require ( github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.22.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cloudwego/base64x v0.1.5 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect - github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/cockroachdb/errors v1.12.0 // indirect github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect @@ -94,18 +93,18 @@ require ( github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.32.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/getsentry/sentry-go v0.33.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/btree v1.1.3 // indirect @@ -176,7 +175,7 @@ require ( github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/spf13/viper v1.20.1 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -191,32 +190,33 @@ require ( go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.15.0 // indirect - golang.org/x/crypto v0.37.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/arch v0.17.0 // indirect + golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.10.0 // indirect google.golang.org/api v0.222.0 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect nhooyr.io/websocket v1.8.11 // indirect pgregory.net/rapid v1.2.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 5e8574614d0..8e8e574b8a8 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -616,16 +616,16 @@ cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= -cosmossdk.io/collections v1.2.1 h1:mAlNMs5vJwkda4TA+k5q/43p24RVAQ/qyDrjANu3BXE= -cosmossdk.io/collections v1.2.1/go.mod h1:PSsEJ/fqny0VPsHLFT6gXDj/2C1tBOTS9eByK0+PBFU= +cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= +cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= -cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= -cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= -cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= +cosmossdk.io/log v1.6.1 h1:YXNwAgbDwMEKwDlCdH8vPcoggma48MgZrTQXCfmMBeI= +cosmossdk.io/log v1.6.1/go.mod h1:gMwsWyyDBjpdG9u2avCFdysXqxq28WJapJvu+vF1y+E= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= @@ -654,8 +654,8 @@ github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bp github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= @@ -723,11 +723,11 @@ github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/ github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -772,8 +772,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -804,12 +804,12 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= +github.com/cosmos/cosmos-sdk v0.53.4 h1:kPF6vY68+/xi1/VebSZGpoxQqA52qkhUzqkrgeBn3Mg= +github.com/cosmos/cosmos-sdk v0.53.4/go.mod h1:7U3+WHZtI44dEOnU46+lDzBb2tFh1QlMvi8Z5JugopI= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -889,8 +889,8 @@ github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0+ github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= -github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= @@ -909,8 +909,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= -github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= +github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= @@ -924,8 +924,8 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -942,8 +942,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= @@ -978,8 +978,8 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1475,15 +1475,15 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -1508,8 +1508,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -1562,24 +1562,24 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1601,8 +1601,12 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= -golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= +golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1620,8 +1624,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1759,8 +1763,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1790,8 +1794,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1812,8 +1816,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1926,8 +1930,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1942,8 +1946,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1964,8 +1968,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2061,6 +2065,8 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -2272,10 +2278,10 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2323,8 +2329,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2344,8 +2350,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2432,6 +2438,6 @@ rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/go.work.example b/go.work.example index c9297b1ace8..0d4d9d1493d 100644 --- a/go.work.example +++ b/go.work.example @@ -1,11 +1,8 @@ -go 1.22.0 - -toolchain go1.22.3 +go 1.24.3 use ( ./ - ./modules/apps/callbacks + ./simapp ./modules/light-clients/08-wasm ./e2e - ./simapp ) diff --git a/modules/apps/27-interchain-accounts/controller/ibc_middleware.go b/modules/apps/27-interchain-accounts/controller/ibc_middleware.go index 99a7ae2d80f..6a13ca932c5 100644 --- a/modules/apps/27-interchain-accounts/controller/ibc_middleware.go +++ b/modules/apps/27-interchain-accounts/controller/ibc_middleware.go @@ -26,34 +26,53 @@ var ( // ICA controller keeper and the underlying application. type IBCMiddleware struct { app porttypes.IBCModule - keeper keeper.Keeper + keeper *keeper.Keeper } // NewIBCMiddleware creates a new IBCMiddleware given the associated keeper. // The underlying application is set to nil and authentication is assumed to // be performed by a Cosmos SDK module that sends messages to controller message server. -func NewIBCMiddleware(k keeper.Keeper) IBCMiddleware { - return IBCMiddleware{ +func NewIBCMiddleware(k *keeper.Keeper) *IBCMiddleware { + return &IBCMiddleware{ app: nil, keeper: k, } } // NewIBCMiddlewareWithAuth creates a new IBCMiddleware given the associated keeper and underlying application -func NewIBCMiddlewareWithAuth(app porttypes.IBCModule, k keeper.Keeper) IBCMiddleware { - return IBCMiddleware{ +func NewIBCMiddlewareWithAuth(app porttypes.IBCModule, k *keeper.Keeper) *IBCMiddleware { + return &IBCMiddleware{ app: app, keeper: k, } } +// SetUnderlyingApplication sets the underlying application for the middleware. +func (im *IBCMiddleware) SetUnderlyingApplication(app porttypes.IBCModule) { + if app == nil { + panic(errors.New("underlying application cannot be nil")) + } + if im.app != nil { + panic(errors.New("underlying application already set")) + } + im.app = app +} + +// SetICS4Wrapper sets the ICS4Wrapper for the middleware. +func (im *IBCMiddleware) SetICS4Wrapper(ics4Wrapper porttypes.ICS4Wrapper) { + if ics4Wrapper == nil { + panic(errors.New("ICS4Wrapper cannot be nil")) + } + im.keeper.WithICS4Wrapper(ics4Wrapper) +} + // OnChanOpenInit implements the IBCMiddleware interface // // Interchain Accounts is implemented to act as middleware for connected authentication modules on // the controller side. The connected modules may not change the controller side portID or // version. They will be allowed to perform custom logic without changing // the parameters stored within a channel struct. -func (im IBCMiddleware) OnChanOpenInit( +func (im *IBCMiddleware) OnChanOpenInit( ctx sdk.Context, order channeltypes.Order, connectionHops []string, @@ -84,7 +103,7 @@ func (im IBCMiddleware) OnChanOpenInit( } // OnChanOpenTry implements the IBCMiddleware interface -func (IBCMiddleware) OnChanOpenTry( +func (*IBCMiddleware) OnChanOpenTry( ctx sdk.Context, order channeltypes.Order, connectionHops []string, @@ -102,7 +121,7 @@ func (IBCMiddleware) OnChanOpenTry( // the controller side. The connected modules may not change the portID or // version. They will be allowed to perform custom logic without changing // the parameters stored within a channel struct. -func (im IBCMiddleware) OnChanOpenAck( +func (im *IBCMiddleware) OnChanOpenAck( ctx sdk.Context, portID, channelID string, @@ -131,7 +150,7 @@ func (im IBCMiddleware) OnChanOpenAck( } // OnChanOpenConfirm implements the IBCMiddleware interface -func (IBCMiddleware) OnChanOpenConfirm( +func (*IBCMiddleware) OnChanOpenConfirm( ctx sdk.Context, portID, channelID string, @@ -140,7 +159,7 @@ func (IBCMiddleware) OnChanOpenConfirm( } // OnChanCloseInit implements the IBCMiddleware interface -func (IBCMiddleware) OnChanCloseInit( +func (*IBCMiddleware) OnChanCloseInit( ctx sdk.Context, portID, channelID string, @@ -150,7 +169,7 @@ func (IBCMiddleware) OnChanCloseInit( } // OnChanCloseConfirm implements the IBCMiddleware interface -func (im IBCMiddleware) OnChanCloseConfirm( +func (im *IBCMiddleware) OnChanCloseConfirm( ctx sdk.Context, portID, channelID string, @@ -172,7 +191,7 @@ func (im IBCMiddleware) OnChanCloseConfirm( } // OnRecvPacket implements the IBCMiddleware interface -func (IBCMiddleware) OnRecvPacket( +func (*IBCMiddleware) OnRecvPacket( ctx sdk.Context, _ string, packet channeltypes.Packet, @@ -185,7 +204,7 @@ func (IBCMiddleware) OnRecvPacket( } // OnAcknowledgementPacket implements the IBCMiddleware interface -func (im IBCMiddleware) OnAcknowledgementPacket( +func (im *IBCMiddleware) OnAcknowledgementPacket( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, @@ -210,7 +229,7 @@ func (im IBCMiddleware) OnAcknowledgementPacket( } // OnTimeoutPacket implements the IBCMiddleware interface -func (im IBCMiddleware) OnTimeoutPacket( +func (im *IBCMiddleware) OnTimeoutPacket( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, @@ -237,7 +256,7 @@ func (im IBCMiddleware) OnTimeoutPacket( } // SendPacket implements the ICS4 Wrapper interface -func (IBCMiddleware) SendPacket( +func (*IBCMiddleware) SendPacket( ctx sdk.Context, sourcePort string, sourceChannel string, @@ -249,7 +268,7 @@ func (IBCMiddleware) SendPacket( } // WriteAcknowledgement implements the ICS4 Wrapper interface -func (IBCMiddleware) WriteAcknowledgement( +func (*IBCMiddleware) WriteAcknowledgement( ctx sdk.Context, packet ibcexported.PacketI, ack ibcexported.Acknowledgement, @@ -258,14 +277,14 @@ func (IBCMiddleware) WriteAcknowledgement( } // GetAppVersion returns the interchain accounts metadata. -func (im IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { +func (im *IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { return im.keeper.GetAppVersion(ctx, portID, channelID) } // UnmarshalPacketData attempts to unmarshal the provided packet data bytes // into an InterchainAccountPacketData. This function implements the optional // PacketDataUnmarshaler interface required for ADR 008 support. -func (im IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { +func (im *IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { var data icatypes.InterchainAccountPacketData err := data.UnmarshalJSON(bz) if err != nil { diff --git a/modules/apps/27-interchain-accounts/controller/ibc_middleware_test.go b/modules/apps/27-interchain-accounts/controller/ibc_middleware_test.go index 64acba29059..1725ca21b71 100644 --- a/modules/apps/27-interchain-accounts/controller/ibc_middleware_test.go +++ b/modules/apps/27-interchain-accounts/controller/ibc_middleware_test.go @@ -19,6 +19,7 @@ import ( host "github.com/cosmos/ibc-go/v10/modules/core/24-host" ibcerrors "github.com/cosmos/ibc-go/v10/modules/core/errors" ibctesting "github.com/cosmos/ibc-go/v10/testing" + ibcmock "github.com/cosmos/ibc-go/v10/testing/mock" ) const invalidVersion = "invalid|version" @@ -49,11 +50,11 @@ func TestICATestSuite(t *testing.T) { testifysuite.Run(t, new(InterchainAccountsTestSuite)) } -func (suite *InterchainAccountsTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *InterchainAccountsTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } func NewICAPath(chainA, chainB *ibctesting.TestChain, ordering channeltypes.Order) *ibctesting.Path { @@ -108,7 +109,89 @@ func SetupICAPath(path *ibctesting.Path, owner string) error { return path.EndpointB.ChanOpenConfirm() } -func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { +func (s *InterchainAccountsTestSuite) TestSetUnderlyingApplication() { + var ( + app porttypes.IBCModule + mw porttypes.Middleware + ) + testCases := []struct { + name string + malleate func() + expPanic bool + }{ + { + "success", func() {}, false, + }, + { + "nil underlying app", func() { + app = nil + }, true, + }, + { + "app already set", func() { + mw.SetUnderlyingApplication(&ibcmock.IBCModule{}) + }, true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.SetupTest() // reset + + app = &ibcmock.IBCModule{} + mw = controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) + + tc.malleate() // malleate mutates test data + + if tc.expPanic { + s.Require().Panics(func() { + mw.SetUnderlyingApplication(app) + }) + } else { + s.Require().NotPanics(func() { + mw.SetUnderlyingApplication(app) + }) + } + }) + } +} + +func (s *InterchainAccountsTestSuite) TestSetICS4Wrapper() { + var wrapper porttypes.ICS4Wrapper + mw := controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) + testCases := []struct { + name string + malleate func() + expPanic bool + }{ + { + "success", func() {}, false, + }, + { + "nil ICS4Wrapper", func() { + wrapper = nil + }, true, + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + s.SetupTest() // reset + wrapper = s.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper + tc.malleate() // malleate mutates test data + if tc.expPanic { + s.Require().Panics(func() { + mw.SetICS4Wrapper(wrapper) + }) + } else { + s.Require().NotPanics(func() { + mw.SetICS4Wrapper(wrapper) + }) + } + }) + } +} + +func (s *InterchainAccountsTestSuite) TestOnChanOpenInit() { var ( channel *channeltypes.Channel isNilApp bool @@ -127,7 +210,7 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { "ICA auth module modification of channel version is ignored", func() { // NOTE: explicitly modify the channel version via the auth module callback, // ensuring the expected JSON encoded metadata is not modified upon return - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenInit = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenInit = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, version string, ) (string, error) { @@ -137,12 +220,12 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { }, { "controller submodule disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(false)) + s.chainA.GetSimApp().ICAControllerKeeper.SetParams(s.chainA.GetContext(), types.NewParams(false)) }, types.ErrControllerSubModuleDisabled, }, { "ICA auth module callback fails", func() { - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenInit = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenInit = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, version string, ) (string, error) { @@ -157,9 +240,9 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { }, { "middleware disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) + s.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenInit = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenInit = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, version string, ) (string, error) { @@ -171,21 +254,21 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset isNilApp = false - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() // mock init interchain account portID, err := icatypes.NewControllerPortID(TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.PortID = portID path.EndpointA.ChannelID = ibctesting.FirstChannelID - suite.chainA.GetSimApp().ICAControllerKeeper.SetMiddlewareEnabled(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) + s.chainA.GetSimApp().ICAControllerKeeper.SetMiddlewareEnabled(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) // default values counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) @@ -200,24 +283,24 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { tc.malleate() // malleate mutates test data // ensure channel on chainA is set in state - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, *channel) + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, *channel) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) if isNilApp { - cbs = controller.NewIBCMiddleware(suite.chainA.GetSimApp().ICAControllerKeeper) + cbs = controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) } - version, err := cbs.OnChanOpenInit(suite.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, + version, err := cbs.OnChanOpenInit(s.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel.Counterparty, channel.Version, ) if tc.expErr == nil { - suite.Require().Equal(TestVersion, version) - suite.Require().NoError(err) + s.Require().Equal(TestVersion, version) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } @@ -230,52 +313,49 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenInit() { // Sending a MsgChanOpenTry will never reach the application callback due to // core IBC checks not passing, so a call to the application callback is also // done directly. -func (suite *InterchainAccountsTestSuite) TestChanOpenTry() { +func (s *InterchainAccountsTestSuite) TestChanOpenTry() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + s.SetupTest() // reset + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // chainB also creates a controller port err = RegisterInterchainAccount(path.EndpointB, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) initProof, proofHeight := path.EndpointB.Chain.QueryProof(channelKey) // use chainA (controller) for ChanOpenTry msg := channeltypes.NewMsgChannelOpenTry(path.EndpointA.ChannelConfig.PortID, TestVersion, ordering, []string{path.EndpointA.ConnectionID}, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, TestVersion, initProof, proofHeight, icatypes.ModuleName) - handler := suite.chainA.GetSimApp().MsgServiceRouter().Handler(msg) - _, err = handler(suite.chainA.GetContext(), msg) + handler := s.chainA.GetSimApp().MsgServiceRouter().Handler(msg) + _, err = handler(s.chainA.GetContext(), msg) - suite.Require().Error(err) + s.Require().Error(err) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) version, err := cbs.OnChanOpenTry( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.Order, []string{path.EndpointA.ConnectionID}, + s.chainA.GetContext(), path.EndpointA.ChannelConfig.Order, []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, counterparty, path.EndpointB.ChannelConfig.Version, ) - suite.Require().Error(err) - suite.Require().Equal("", version) + s.Require().Error(err) + s.Require().Empty(version) } } -func (suite *InterchainAccountsTestSuite) TestOnChanOpenAck() { - var ( - path *ibctesting.Path - isNilApp bool - ) +func (s *InterchainAccountsTestSuite) TestOnChanOpenAck() { + var path *ibctesting.Path testCases := []struct { name string @@ -287,7 +367,7 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenAck() { }, { "controller submodule disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(false)) + s.chainA.GetSimApp().ICAControllerKeeper.SetParams(s.chainA.GetContext(), types.NewParams(false)) }, types.ErrControllerSubModuleDisabled, }, { @@ -297,23 +377,18 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenAck() { }, { "ICA auth module callback fails", func() { - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenAck = func( + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenAck = func( ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string, ) error { return errors.New("mock ica auth fails") } }, errors.New("mock ica auth fails"), }, - { - "nil underlying app", func() { - isNilApp = true - }, nil, - }, { "middleware disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) + s.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenAck = func( + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenAck = func( ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string, ) error { return errors.New("error should be unreachable") @@ -324,34 +399,29 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenAck() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - isNilApp = false + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) - - err = cbs.OnChanOpenAck(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelID, path.EndpointB.ChannelConfig.Version) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) - if isNilApp { - cbs = controller.NewIBCMiddleware(suite.chainA.GetSimApp().ICAControllerKeeper) - } + err = cbs.OnChanOpenAck(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelID, path.EndpointB.ChannelConfig.Version) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } @@ -363,27 +433,27 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenAck() { // Sending a MsgChanOpenConfirm will never reach the application callback due to // core IBC checks not passing, so a call to the application callback is also // done directly. -func (suite *InterchainAccountsTestSuite) TestChanOpenConfirm() { +func (s *InterchainAccountsTestSuite) TestChanOpenConfirm() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + s.SetupTest() // reset + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) // chainB maliciously sets channel to OPEN channel := channeltypes.NewChannel(channeltypes.OPEN, ordering, channeltypes.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointB.ConnectionID}, TestVersion) - suite.chainB.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel) + s.chainB.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel) // commit state changes so proof can be created - suite.chainB.NextBlock() + s.chainB.NextBlock() err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // query proof from ChainB channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) @@ -391,44 +461,44 @@ func (suite *InterchainAccountsTestSuite) TestChanOpenConfirm() { // use chainA (controller) for ChanOpenConfirm msg := channeltypes.NewMsgChannelOpenConfirm(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ackProof, proofHeight, icatypes.ModuleName) - handler := suite.chainA.GetSimApp().MsgServiceRouter().Handler(msg) - _, err = handler(suite.chainA.GetContext(), msg) + handler := s.chainA.GetSimApp().MsgServiceRouter().Handler(msg) + _, err = handler(s.chainA.GetContext(), msg) - suite.Require().Error(err) + s.Require().Error(err) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) err = cbs.OnChanOpenConfirm( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ) - suite.Require().Error(err) + s.Require().Error(err) } } // OnChanCloseInit on controller (chainA) -func (suite *InterchainAccountsTestSuite) TestOnChanCloseInit() { +func (s *InterchainAccountsTestSuite) TestOnChanCloseInit() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) err = cbs.OnChanCloseInit( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ) - suite.Require().Error(err) + s.Require().Error(err) } } -func (suite *InterchainAccountsTestSuite) TestOnChanCloseConfirm() { +func (s *InterchainAccountsTestSuite) TestOnChanCloseConfirm() { var ( path *ibctesting.Path isNilApp bool @@ -451,39 +521,39 @@ func (suite *InterchainAccountsTestSuite) TestOnChanCloseConfirm() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset isNilApp = false - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) if isNilApp { - cbs = controller.NewIBCMiddleware(suite.chainA.GetSimApp().ICAControllerKeeper) + cbs = controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) } err = cbs.OnChanCloseConfirm( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { +func (s *InterchainAccountsTestSuite) TestOnRecvPacket() { testCases := []struct { name string malleate func() @@ -496,23 +566,23 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) packet := channeltypes.NewPacket( []byte("empty packet data"), - suite.chainB.SenderAccount.GetSequence(), + s.chainB.SenderAccount.GetSequence(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, @@ -521,9 +591,9 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { 0, ) - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() ack := cbs.OnRecvPacket(ctx, path.EndpointA.GetChannel().Version, packet, nil) - suite.Require().Equal(tc.expSuccess, ack.Success()) + s.Require().Equal(tc.expSuccess, ack.Success()) expectedEvents := sdk.Events{ sdk.NewEvent( @@ -536,13 +606,13 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) }) } } } -func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { +func (s *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { var ( path *ibctesting.Path isNilApp bool @@ -560,12 +630,12 @@ func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { }, { "controller submodule disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(false)) + s.chainA.GetSimApp().ICAControllerKeeper.SetParams(s.chainA.GetContext(), types.NewParams(false)) }, types.ErrControllerSubModuleDisabled, }, { "ICA auth module callback fails", func() { - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnAcknowledgementPacket = func( + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnAcknowledgementPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress, ) error { return errors.New("mock ica auth fails") @@ -579,9 +649,9 @@ func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { }, { "middleware disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) + s.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnAcknowledgementPacket = func( + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnAcknowledgementPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress, ) error { return errors.New("error should be unreachable") @@ -592,19 +662,19 @@ func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset isNilApp = false - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) packet := channeltypes.NewPacket( []byte("empty packet data"), - suite.chainA.SenderAccount.GetSequence(), + s.chainA.SenderAccount.GetSequence(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, @@ -615,26 +685,26 @@ func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { tc.malleate() // malleate mutates test data - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) if isNilApp { - cbs = controller.NewIBCMiddleware(suite.chainA.GetSimApp().ICAControllerKeeper) + cbs = controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) } - err = cbs.OnAcknowledgementPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, []byte("ack"), nil) + err = cbs.OnAcknowledgementPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, []byte("ack"), nil) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { +func (s *InterchainAccountsTestSuite) TestOnTimeoutPacket() { var ( path *ibctesting.Path isNilApp bool @@ -652,12 +722,12 @@ func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { }, { "controller submodule disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(false)) + s.chainA.GetSimApp().ICAControllerKeeper.SetParams(s.chainA.GetContext(), types.NewParams(false)) }, types.ErrControllerSubModuleDisabled, }, { "ICA auth module callback fails", func() { - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnTimeoutPacket = func( + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnTimeoutPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress, ) error { return errors.New("mock ica auth fails") @@ -671,9 +741,9 @@ func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { }, { "middleware disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) + s.chainA.GetSimApp().ICAControllerKeeper.DeleteMiddlewareEnabled(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ConnectionID) - suite.chainA.GetSimApp().ICAAuthModule.IBCApp.OnTimeoutPacket = func( + s.chainA.GetSimApp().ICAAuthModule.IBCApp.OnTimeoutPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress, ) error { return errors.New("error should be unreachable") @@ -684,19 +754,19 @@ func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset isNilApp = false - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) packet := channeltypes.NewPacket( []byte("empty packet data"), - suite.chainA.SenderAccount.GetSequence(), + s.chainA.SenderAccount.GetSequence(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, @@ -707,26 +777,26 @@ func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { tc.malleate() // malleate mutates test data - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) if isNilApp { - cbs = controller.NewIBCMiddleware(suite.chainA.GetSimApp().ICAControllerKeeper) + cbs = controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) } - err = cbs.OnTimeoutPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, nil) + err = cbs.OnTimeoutPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, nil) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *InterchainAccountsTestSuite) TestSingleHostMultipleControllers() { +func (s *InterchainAccountsTestSuite) TestSingleHostMultipleControllers() { var ( pathAToB *ibctesting.Path pathCToB *ibctesting.Path @@ -744,20 +814,20 @@ func (suite *InterchainAccountsTestSuite) TestSingleHostMultipleControllers() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.msg, func() { + s.Run(tc.msg, func() { // reset - suite.SetupTest() + s.SetupTest() TestVersion = icatypes.NewDefaultMetadataString(ibctesting.FirstConnectionID, ibctesting.FirstConnectionID) // Setup a new path from A(controller) -> B(host) - pathAToB = NewICAPath(suite.chainA, suite.chainB, ordering) + pathAToB = NewICAPath(s.chainA, s.chainB, ordering) pathAToB.SetupConnections() err := SetupICAPath(pathAToB, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // Setup a new path from C(controller) -> B(host) - pathCToB = NewICAPath(suite.chainC, suite.chainB, ordering) + pathCToB = NewICAPath(s.chainC, s.chainB, ordering) pathCToB.SetupConnections() // NOTE: Here the version metadata is overridden to include to the next host connection sequence (i.e. chainB's connection to chainC) @@ -771,103 +841,103 @@ func (suite *InterchainAccountsTestSuite) TestSingleHostMultipleControllers() { })) err = SetupICAPath(pathCToB, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - accAddressChainA, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), pathAToB.EndpointB.ConnectionID, pathAToB.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + accAddressChainA, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), pathAToB.EndpointB.ConnectionID, pathAToB.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - accAddressChainC, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), pathCToB.EndpointB.ConnectionID, pathCToB.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + accAddressChainC, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), pathCToB.EndpointB.ConnectionID, pathCToB.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - suite.Require().NotEqual(accAddressChainA, accAddressChainC) + s.Require().NotEqual(accAddressChainA, accAddressChainC) - chainAChannelID, found := suite.chainB.GetSimApp().ICAHostKeeper.GetActiveChannelID(suite.chainB.GetContext(), pathAToB.EndpointB.ConnectionID, pathAToB.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + chainAChannelID, found := s.chainB.GetSimApp().ICAHostKeeper.GetActiveChannelID(s.chainB.GetContext(), pathAToB.EndpointB.ConnectionID, pathAToB.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - chainCChannelID, found := suite.chainB.GetSimApp().ICAHostKeeper.GetActiveChannelID(suite.chainB.GetContext(), pathCToB.EndpointB.ConnectionID, pathCToB.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + chainCChannelID, found := s.chainB.GetSimApp().ICAHostKeeper.GetActiveChannelID(s.chainB.GetContext(), pathCToB.EndpointB.ConnectionID, pathCToB.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - suite.Require().NotEqual(chainAChannelID, chainCChannelID) + s.Require().NotEqual(chainAChannelID, chainCChannelID) }) } } } -func (suite *InterchainAccountsTestSuite) TestGetAppVersion() { +func (s *InterchainAccountsTestSuite) TestGetAppVersion() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointA.ChannelConfig.PortID) + s.Require().True(ok) controllerStack, ok := cbs.(porttypes.ICS4Wrapper) - suite.Require().True(ok) + s.Require().True(ok) - appVersion, found := controllerStack.GetAppVersion(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.Require().True(found) - suite.Require().Equal(path.EndpointA.ChannelConfig.Version, appVersion) + appVersion, found := controllerStack.GetAppVersion(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(found) + s.Require().Equal(path.EndpointA.ChannelConfig.Version, appVersion) } } -func (suite *InterchainAccountsTestSuite) TestInFlightHandshakeRespectsGoAPICaller() { +func (s *InterchainAccountsTestSuite) TestInFlightHandshakeRespectsGoAPICaller() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() // initiate a channel handshake such that channel.State == INIT - err := RegisterInterchainAccount(path.EndpointA, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + err := RegisterInterchainAccount(path.EndpointA, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) // attempt to start a second handshake via the controller msg server - msgServer := controllerkeeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAControllerKeeper) - msgRegisterInterchainAccount := types.NewMsgRegisterInterchainAccount(path.EndpointA.ConnectionID, suite.chainA.SenderAccount.GetAddress().String(), TestVersion, ordering) + msgServer := controllerkeeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAControllerKeeper) + msgRegisterInterchainAccount := types.NewMsgRegisterInterchainAccount(path.EndpointA.ConnectionID, s.chainA.SenderAccount.GetAddress().String(), TestVersion, ordering) - res, err := msgServer.RegisterInterchainAccount(suite.chainA.GetContext(), msgRegisterInterchainAccount) - suite.Require().Error(err) - suite.Require().Nil(res) + res, err := msgServer.RegisterInterchainAccount(s.chainA.GetContext(), msgRegisterInterchainAccount) + s.Require().Error(err) + s.Require().Nil(res) } } -func (suite *InterchainAccountsTestSuite) TestInFlightHandshakeRespectsMsgServerCaller() { +func (s *InterchainAccountsTestSuite) TestInFlightHandshakeRespectsMsgServerCaller() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() // initiate a channel handshake such that channel.State == INIT - msgServer := controllerkeeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAControllerKeeper) - msgRegisterInterchainAccount := types.NewMsgRegisterInterchainAccount(path.EndpointA.ConnectionID, suite.chainA.SenderAccount.GetAddress().String(), TestVersion, ordering) + msgServer := controllerkeeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAControllerKeeper) + msgRegisterInterchainAccount := types.NewMsgRegisterInterchainAccount(path.EndpointA.ConnectionID, s.chainA.SenderAccount.GetAddress().String(), TestVersion, ordering) - res, err := msgServer.RegisterInterchainAccount(suite.chainA.GetContext(), msgRegisterInterchainAccount) - suite.Require().NotNil(res) - suite.Require().NoError(err) + res, err := msgServer.RegisterInterchainAccount(s.chainA.GetContext(), msgRegisterInterchainAccount) + s.Require().NotNil(res) + s.Require().NoError(err) // attempt to start a second handshake via the legacy Go API - err = RegisterInterchainAccount(path.EndpointA, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().Error(err) + err = RegisterInterchainAccount(path.EndpointA, s.chainA.SenderAccount.GetAddress().String()) + s.Require().Error(err) } } -func (suite *InterchainAccountsTestSuite) TestClosedChannelReopensWithMsgServer() { +func (s *InterchainAccountsTestSuite) TestClosedChannelReopensWithMsgServer() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() - err := SetupICAPath(path, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + err := SetupICAPath(path, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) // set the channel state to closed path.EndpointA.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) @@ -878,15 +948,15 @@ func (suite *InterchainAccountsTestSuite) TestClosedChannelReopensWithMsgServer( path.EndpointB.ChannelID = "" // fetch the next channel sequence before reinitiating the channel handshake - channelSeq := suite.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.GetNextChannelSequence(suite.chainA.GetContext()) + channelSeq := s.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.GetNextChannelSequence(s.chainA.GetContext()) // route a new MsgRegisterInterchainAccount in order to reopen the - msgServer := controllerkeeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAControllerKeeper) - msgRegisterInterchainAccount := types.NewMsgRegisterInterchainAccount(path.EndpointA.ConnectionID, suite.chainA.SenderAccount.GetAddress().String(), path.EndpointA.ChannelConfig.Version, ordering) + msgServer := controllerkeeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAControllerKeeper) + msgRegisterInterchainAccount := types.NewMsgRegisterInterchainAccount(path.EndpointA.ConnectionID, s.chainA.SenderAccount.GetAddress().String(), path.EndpointA.ChannelConfig.Version, ordering) - res, err := msgServer.RegisterInterchainAccount(suite.chainA.GetContext(), msgRegisterInterchainAccount) - suite.Require().NoError(err) - suite.Require().Equal(channeltypes.FormatChannelIdentifier(channelSeq), res.ChannelId) + res, err := msgServer.RegisterInterchainAccount(s.chainA.GetContext(), msgRegisterInterchainAccount) + s.Require().NoError(err) + s.Require().Equal(channeltypes.FormatChannelIdentifier(channelSeq), res.ChannelId) // assign the channel sequence to endpointA before generating proofs and initiating the TRY step path.EndpointA.ChannelID = channeltypes.FormatChannelIdentifier(channelSeq) @@ -894,24 +964,24 @@ func (suite *InterchainAccountsTestSuite) TestClosedChannelReopensWithMsgServer( path.EndpointA.Chain.NextBlock() err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenConfirm() - suite.Require().NoError(err) + s.Require().NoError(err) } } -func (suite *InterchainAccountsTestSuite) TestPacketDataUnmarshalerInterface() { +func (s *InterchainAccountsTestSuite) TestPacketDataUnmarshalerInterface() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) expPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -919,18 +989,18 @@ func (suite *InterchainAccountsTestSuite) TestPacketDataUnmarshalerInterface() { Memo: "", } - controllerMiddleware := controller.NewIBCMiddleware(suite.chainA.GetSimApp().ICAControllerKeeper) - packetData, version, err := controllerMiddleware.UnmarshalPacketData(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, expPacketData.GetBytes()) - suite.Require().NoError(err) - suite.Require().Equal(version, path.EndpointA.ChannelConfig.Version) - suite.Require().Equal(expPacketData, packetData) + controllerMiddleware := controller.NewIBCMiddleware(s.chainA.GetSimApp().ICAControllerKeeper) + packetData, version, err := controllerMiddleware.UnmarshalPacketData(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, expPacketData.GetBytes()) + s.Require().NoError(err) + s.Require().Equal(version, path.EndpointA.ChannelConfig.Version) + s.Require().Equal(expPacketData, packetData) // test invalid packet data invalidPacketData := []byte("invalid packet data") // Context, port identifier and channel identifier are not used for controller. - packetData, version, err = controllerMiddleware.UnmarshalPacketData(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, invalidPacketData) - suite.Require().Error(err) - suite.Require().Empty(version) - suite.Require().Nil(packetData) + packetData, version, err = controllerMiddleware.UnmarshalPacketData(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, invalidPacketData) + s.Require().Error(err) + s.Require().Empty(version) + s.Require().Nil(packetData) } } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/account.go b/modules/apps/27-interchain-accounts/controller/keeper/account.go index 8e0d334dfdc..70bf1e064da 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/account.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/account.go @@ -28,7 +28,7 @@ import ( // Prior to v6.x.x of ibc-go, the controller module was only functional as middleware, with authentication performed // by the underlying application. For a full summary of the changes in v6.x.x, please see ADR009. // This API will be removed in later releases. -func (k Keeper) RegisterInterchainAccount(ctx sdk.Context, connectionID, owner, version string, +func (k *Keeper) RegisterInterchainAccount(ctx sdk.Context, connectionID, owner, version string, ordering channeltypes.Order, ) error { portID, err := icatypes.NewControllerPortID(owner) @@ -57,7 +57,7 @@ func (k Keeper) RegisterInterchainAccount(ctx sdk.Context, connectionID, owner, // registerInterchainAccount registers an interchain account, returning the channel id of the MsgChannelOpenInitResponse // and an error if one occurred. -func (k Keeper) registerInterchainAccount(ctx sdk.Context, connectionID, portID, version string, +func (k *Keeper) registerInterchainAccount(ctx sdk.Context, connectionID, portID, version string, ordering channeltypes.Order, ) (string, error) { // if there is an active channel for this portID / connectionID return an error diff --git a/modules/apps/27-interchain-accounts/controller/keeper/account_test.go b/modules/apps/27-interchain-accounts/controller/keeper/account_test.go index fe0c8ae7834..742064afe48 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/account_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/account_test.go @@ -6,7 +6,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestRegisterInterchainAccount() { +func (s *KeeperTestSuite) TestRegisterInterchainAccount() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( owner string @@ -33,12 +33,12 @@ func (suite *KeeperTestSuite) TestRegisterInterchainAccount() { "MsgChanOpenInit fails - channel is already active & in state OPEN", func() { portID, err := icatypes.NewControllerPortID(TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - channelID := channeltypes.FormatChannelIdentifier(suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetNextChannelSequence(suite.chainA.GetContext())) + channelID := channeltypes.FormatChannelIdentifier(s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetNextChannelSequence(s.chainA.GetContext())) path.EndpointA.ChannelID = channelID - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, portID, path.EndpointA.ChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, portID, path.EndpointA.ChannelID) counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) channel := channeltypes.Channel{ @@ -48,45 +48,45 @@ func (suite *KeeperTestSuite) TestRegisterInterchainAccount() { ConnectionHops: []string{path.EndpointA.ConnectionID}, Version: TestVersion, } - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), portID, path.EndpointA.ChannelID, channel) + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), portID, path.EndpointA.ChannelID, channel) }, icatypes.ErrActiveChannelAlreadySet, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() owner = TestOwnerAddress // must be explicitly changed - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() tc.malleate() // malleate mutates test data - err = suite.chainA.GetSimApp().ICAControllerKeeper.RegisterInterchainAccount(suite.chainA.GetContext(), path.EndpointA.ConnectionID, owner, TestVersion, ordering) + err = s.chainA.GetSimApp().ICAControllerKeeper.RegisterInterchainAccount(s.chainA.GetContext(), path.EndpointA.ConnectionID, owner, TestVersion, ordering) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *KeeperTestSuite) TestRegisterSameOwnerMultipleConnections() { +func (s *KeeperTestSuite) TestRegisterSameOwnerMultipleConnections() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() owner := TestOwnerAddress - pathAToB := NewICAPath(suite.chainA, suite.chainB, ordering) + pathAToB := NewICAPath(s.chainA, s.chainB, ordering) pathAToB.SetupConnections() - pathAToC := NewICAPath(suite.chainA, suite.chainC, ordering) + pathAToC := NewICAPath(s.chainA, s.chainC, ordering) pathAToC.SetupConnections() // build ICS27 metadata with connection identifiers for path A->B @@ -98,8 +98,8 @@ func (suite *KeeperTestSuite) TestRegisterSameOwnerMultipleConnections() { TxType: icatypes.TxTypeSDKMultiMsg, } - err := suite.chainA.GetSimApp().ICAControllerKeeper.RegisterInterchainAccount(suite.chainA.GetContext(), pathAToB.EndpointA.ConnectionID, owner, string(icatypes.ModuleCdc.MustMarshalJSON(metadata)), ordering) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().ICAControllerKeeper.RegisterInterchainAccount(s.chainA.GetContext(), pathAToB.EndpointA.ConnectionID, owner, string(icatypes.ModuleCdc.MustMarshalJSON(metadata)), ordering) + s.Require().NoError(err) // build ICS27 metadata with connection identifiers for path A->C metadata = &icatypes.Metadata{ @@ -110,7 +110,7 @@ func (suite *KeeperTestSuite) TestRegisterSameOwnerMultipleConnections() { TxType: icatypes.TxTypeSDKMultiMsg, } - err = suite.chainA.GetSimApp().ICAControllerKeeper.RegisterInterchainAccount(suite.chainA.GetContext(), pathAToC.EndpointA.ConnectionID, owner, string(icatypes.ModuleCdc.MustMarshalJSON(metadata)), ordering) - suite.Require().NoError(err) + err = s.chainA.GetSimApp().ICAControllerKeeper.RegisterInterchainAccount(s.chainA.GetContext(), pathAToC.EndpointA.ConnectionID, owner, string(icatypes.ModuleCdc.MustMarshalJSON(metadata)), ordering) + s.Require().NoError(err) } } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/export_test.go b/modules/apps/27-interchain-accounts/controller/keeper/export_test.go index 19e345bfd1a..984479e4663 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/export_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/export_test.go @@ -11,6 +11,6 @@ import ( ) // GetAppMetadata is a wrapper around getAppMetadata to allow the function to be directly called in tests. -func (k Keeper) GetAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { +func (k *Keeper) GetAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { return k.getAppMetadata(ctx, portID, channelID) } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/genesis_test.go b/modules/apps/27-interchain-accounts/controller/keeper/genesis_test.go index c75745020eb..6707c81fafe 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/genesis_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/genesis_test.go @@ -9,7 +9,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestInitGenesis() { +func (s *KeeperTestSuite) TestInitGenesis() { ports := []string{"port1", "port2", "port3"} testCases := []struct { @@ -21,7 +21,7 @@ func (suite *KeeperTestSuite) TestInitGenesis() { }, } - interchainAccAddr := icatypes.GenerateAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, TestPortID) + interchainAccAddr := icatypes.GenerateAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, TestPortID) genesisState := genesistypes.ControllerGenesisState{ ActiveChannels: []genesistypes.ActiveChannel{ { @@ -47,64 +47,64 @@ func (suite *KeeperTestSuite) TestInitGenesis() { Ports: ports, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - keeper.InitGenesis(suite.chainA.GetContext(), suite.chainA.GetSimApp().ICAControllerKeeper, genesisState) + keeper.InitGenesis(s.chainA.GetContext(), *s.chainA.GetSimApp().ICAControllerKeeper, genesisState) - channelID, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) - suite.Require().True(found) - suite.Require().Equal(ibctesting.FirstChannelID, channelID) + channelID, found := s.chainA.GetSimApp().ICAControllerKeeper.GetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Require().True(found) + s.Require().Equal(ibctesting.FirstChannelID, channelID) - isMiddlewareEnabled := suite.chainA.GetSimApp().ICAControllerKeeper.IsMiddlewareEnabled(suite.chainA.GetContext(), TestPortID, ibctesting.FirstConnectionID) - suite.Require().True(isMiddlewareEnabled) + isMiddlewareEnabled := s.chainA.GetSimApp().ICAControllerKeeper.IsMiddlewareEnabled(s.chainA.GetContext(), TestPortID, ibctesting.FirstConnectionID) + s.Require().True(isMiddlewareEnabled) - isMiddlewareDisabled := suite.chainA.GetSimApp().ICAControllerKeeper.IsMiddlewareDisabled(suite.chainA.GetContext(), "test-port-1", "connection-1") - suite.Require().True(isMiddlewareDisabled) + isMiddlewareDisabled := s.chainA.GetSimApp().ICAControllerKeeper.IsMiddlewareDisabled(s.chainA.GetContext(), "test-port-1", "connection-1") + s.Require().True(isMiddlewareDisabled) - accountAdrr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) - suite.Require().True(found) - suite.Require().Equal(interchainAccAddr.String(), accountAdrr) + accountAdrr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Require().True(found) + s.Require().Equal(interchainAccAddr.String(), accountAdrr) expParams := types.NewParams(false) - params := suite.chainA.GetSimApp().ICAControllerKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(expParams, params) + params := s.chainA.GetSimApp().ICAControllerKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(expParams, params) for _, port := range ports { - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(types.StoreKey)) - suite.Require().True(store.Has(icatypes.KeyPort(port))) + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(types.StoreKey)) + s.Require().True(store.Has(icatypes.KeyPort(port))) } }) } } -func (suite *KeeperTestSuite) TestExportGenesis() { +func (s *KeeperTestSuite) TestExportGenesis() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - interchainAccAddr, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(exists) + interchainAccAddr, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(exists) - genesisState := keeper.ExportGenesis(suite.chainA.GetContext(), suite.chainA.GetSimApp().ICAControllerKeeper) + genesisState := keeper.ExportGenesis(s.chainA.GetContext(), *s.chainA.GetSimApp().ICAControllerKeeper) - suite.Require().Equal(path.EndpointA.ChannelID, genesisState.ActiveChannels[0].ChannelId) - suite.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.ActiveChannels[0].PortId) - suite.Require().True(genesisState.ActiveChannels[0].IsMiddlewareEnabled) + s.Require().Equal(path.EndpointA.ChannelID, genesisState.ActiveChannels[0].ChannelId) + s.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.ActiveChannels[0].PortId) + s.Require().True(genesisState.ActiveChannels[0].IsMiddlewareEnabled) - suite.Require().Equal(interchainAccAddr, genesisState.InterchainAccounts[0].AccountAddress) - suite.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.InterchainAccounts[0].PortId) + s.Require().Equal(interchainAccAddr, genesisState.InterchainAccounts[0].AccountAddress) + s.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.InterchainAccounts[0].PortId) - suite.Require().Equal([]string{TestPortID}, genesisState.GetPorts()) + s.Require().Equal([]string{TestPortID}, genesisState.GetPorts()) expParams := types.DefaultParams() - suite.Require().Equal(expParams, genesisState.GetParams()) + s.Require().Equal(expParams, genesisState.GetParams()) } } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/grpc_query.go b/modules/apps/27-interchain-accounts/controller/keeper/grpc_query.go index 0148ac2f1e9..368ca0a36c8 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/grpc_query.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/grpc_query.go @@ -15,7 +15,7 @@ import ( var _ types.QueryServer = (*Keeper)(nil) // InterchainAccount implements the Query/InterchainAccount gRPC method -func (k Keeper) InterchainAccount(goCtx context.Context, req *types.QueryInterchainAccountRequest) (*types.QueryInterchainAccountResponse, error) { +func (k *Keeper) InterchainAccount(goCtx context.Context, req *types.QueryInterchainAccountRequest) (*types.QueryInterchainAccountResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -38,7 +38,7 @@ func (k Keeper) InterchainAccount(goCtx context.Context, req *types.QueryInterch } // Params implements the Query/Params gRPC method -func (k Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { +func (k *Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) params := k.GetParams(ctx) diff --git a/modules/apps/27-interchain-accounts/controller/keeper/grpc_query_test.go b/modules/apps/27-interchain-accounts/controller/keeper/grpc_query_test.go index 7b56ad7beb1..4051cb5eac0 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/grpc_query_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/grpc_query_test.go @@ -6,7 +6,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestQueryInterchainAccount() { +func (s *KeeperTestSuite) TestQueryInterchainAccount() { var req *types.QueryInterchainAccountRequest testCases := []struct { @@ -44,14 +44,14 @@ func (suite *KeeperTestSuite) TestQueryInterchainAccount() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, ibctesting.TestAccAddress) - suite.Require().NoError(err) + s.Require().NoError(err) req = &types.QueryInterchainAccountRequest{ ConnectionId: ibctesting.FirstConnectionID, @@ -60,25 +60,25 @@ func (suite *KeeperTestSuite) TestQueryInterchainAccount() { tc.malleate() - res, err := suite.chainA.GetSimApp().ICAControllerKeeper.InterchainAccount(suite.chainA.GetContext(), req) + res, err := s.chainA.GetSimApp().ICAControllerKeeper.InterchainAccount(s.chainA.GetContext(), req) if tc.errMsg == "" { - expAddress, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(exists) + expAddress, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(exists) - suite.Require().NoError(err) - suite.Require().Equal(expAddress, res.Address) + s.Require().NoError(err) + s.Require().Equal(expAddress, res.Address) } else { - suite.Require().ErrorContains(err, tc.errMsg) + s.Require().ErrorContains(err, tc.errMsg) } }) } } } -func (suite *KeeperTestSuite) TestQueryParams() { - ctx := suite.chainA.GetContext() +func (s *KeeperTestSuite) TestQueryParams() { + ctx := s.chainA.GetContext() expParams := types.DefaultParams() - res, _ := suite.chainA.GetSimApp().ICAControllerKeeper.Params(ctx, &types.QueryParamsRequest{}) - suite.Require().Equal(&expParams, res.Params) + res, _ := s.chainA.GetSimApp().ICAControllerKeeper.Params(ctx, &types.QueryParamsRequest{}) + s.Require().Equal(&expParams, res.Params) } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/handshake.go b/modules/apps/27-interchain-accounts/controller/keeper/handshake.go index 5b557b63b4e..2221b5ca78c 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/handshake.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/handshake.go @@ -17,7 +17,7 @@ import ( // The counterparty port identifier must be the host chain representation as defined in the types package, // the channel version must be equal to the version in the types package, // there must not be an active channel for the specified port identifier. -func (k Keeper) OnChanOpenInit( +func (k *Keeper) OnChanOpenInit( ctx sdk.Context, order channeltypes.Order, connectionHops []string, @@ -86,7 +86,7 @@ func (k Keeper) OnChanOpenInit( // OnChanOpenAck sets the active channel for the interchain account/owner pair // and stores the associated interchain account address in state keyed by it's corresponding port identifier -func (k Keeper) OnChanOpenAck( +func (k *Keeper) OnChanOpenAck( ctx sdk.Context, portID, channelID string, @@ -128,7 +128,7 @@ func (k Keeper) OnChanOpenAck( } // OnChanCloseConfirm removes the active channel stored in state -func (Keeper) OnChanCloseConfirm( +func (*Keeper) OnChanCloseConfirm( ctx sdk.Context, portID, channelID string, @@ -150,7 +150,7 @@ func (Keeper) OnChanCloseConfirm( // - connectionHops (and subsequently host/controller connectionIDs) // - interchain account address // - ICS27 protocol version -func (k Keeper) OnChanUpgradeInit(ctx sdk.Context, portID, channelID string, proposedOrder channeltypes.Order, proposedConnectionHops []string, proposedversion string) (string, error) { +func (k *Keeper) OnChanUpgradeInit(ctx sdk.Context, portID, channelID string, proposedOrder channeltypes.Order, proposedConnectionHops []string, proposedversion string) (string, error) { // verify connection hops has not changed connectionID, err := k.GetConnectionID(ctx, portID, channelID) if err != nil { @@ -213,7 +213,7 @@ func (k Keeper) OnChanUpgradeInit(ctx sdk.Context, portID, channelID string, pro // - host connectionID // - interchain account address // - ICS27 protocol version -func (k Keeper) OnChanUpgradeAck(ctx sdk.Context, portID, channelID, counterpartyVersion string) error { +func (k *Keeper) OnChanUpgradeAck(ctx sdk.Context, portID, channelID, counterpartyVersion string) error { if strings.TrimSpace(counterpartyVersion) == "" { return errorsmod.Wrap(channeltypes.ErrInvalidChannelVersion, "counterparty version cannot be empty") } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/handshake_test.go b/modules/apps/27-interchain-accounts/controller/keeper/handshake_test.go index 6616a074f87..959bb8ccba2 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/handshake_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/handshake_test.go @@ -8,7 +8,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestOnChanOpenInit() { +func (s *KeeperTestSuite) TestOnChanOpenInit() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( channel *channeltypes.Channel @@ -30,7 +30,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { { "success: previous active channel closed", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) channel := channeltypes.Channel{ @@ -57,7 +57,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { "success: channel reopening", func() { err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) path.EndpointB.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) @@ -75,7 +75,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { differentOrdering = channeltypes.ORDERED } - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) channel := channeltypes.Channel{ @@ -94,13 +94,13 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { "invalid metadata - previous metadata is different", func() { // set active channel to closed - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) // attempt to downgrade version by reinitializing channel with version 1, but setting channel to version 2 metadata.Version = "ics27-2" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) closedChannel := channeltypes.Channel{ @@ -143,7 +143,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { metadata.Encoding = "invalid-encoding-format" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) channel.Version = string(versionBytes) path.EndpointA.SetChannel(*channel) @@ -156,7 +156,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { metadata.TxType = "invalid-tx-types" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) channel.Version = string(versionBytes) path.EndpointA.SetChannel(*channel) @@ -185,7 +185,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { metadata.ControllerConnectionId = ibctesting.InvalidID versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) channel.Version = string(versionBytes) path.EndpointA.SetChannel(*channel) @@ -198,7 +198,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { metadata.HostConnectionId = ibctesting.InvalidID versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) channel.Version = string(versionBytes) path.EndpointA.SetChannel(*channel) @@ -211,7 +211,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { metadata.Version = "invalid-version" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) channel.Version = string(versionBytes) path.EndpointA.SetChannel(*channel) @@ -221,7 +221,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { { "channel is already active (OPEN state)", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) counterparty := channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) channel := channeltypes.Channel{ @@ -231,29 +231,29 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { ConnectionHops: []string{path.EndpointA.ConnectionID}, Version: TestVersion, } - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) }, icatypes.ErrActiveChannelAlreadySet, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() // mock init interchain account portID, err := icatypes.NewControllerPortID(TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.PortID = portID // default values metadata = icatypes.NewMetadata(icatypes.Version, path.EndpointA.ConnectionID, path.EndpointB.ConnectionID, "", icatypes.EncodingProtobuf, icatypes.TxTypeSDKMultiMsg) versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) expectedVersion = string(versionBytes) @@ -266,28 +266,28 @@ func (suite *KeeperTestSuite) TestOnChanOpenInit() { Version: string(versionBytes), } - channelID := channeltypes.FormatChannelIdentifier(suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetNextChannelSequence(suite.chainA.GetContext())) + channelID := channeltypes.FormatChannelIdentifier(s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetNextChannelSequence(s.chainA.GetContext())) path.EndpointA.ChannelID = channelID tc.malleate() // malleate mutates test data - version, err := suite.chainA.GetSimApp().ICAControllerKeeper.OnChanOpenInit(suite.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, + version, err := s.chainA.GetSimApp().ICAControllerKeeper.OnChanOpenInit(s.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel.Counterparty, channel.Version, ) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().Equal(expectedVersion, version) + s.Require().NoError(err) + s.Require().Equal(expectedVersion, version) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } } -func (suite *KeeperTestSuite) TestOnChanOpenAck() { +func (s *KeeperTestSuite) TestOnChanOpenAck() { var ( path *ibctesting.Path metadata icatypes.Metadata @@ -328,7 +328,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenAck() { metadata.Encoding = "invalid-encoding-format" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.Counterparty.ChannelConfig.Version = string(versionBytes) }, @@ -340,7 +340,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenAck() { metadata.TxType = "invalid-tx-types" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.Counterparty.ChannelConfig.Version = string(versionBytes) }, @@ -352,7 +352,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenAck() { metadata.Address = "invalid-account-address" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.Counterparty.ChannelConfig.Version = string(versionBytes) }, @@ -364,7 +364,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenAck() { metadata.Address = "" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.Counterparty.ChannelConfig.Version = string(versionBytes) }, @@ -376,7 +376,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenAck() { metadata.Version = "invalid-version" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.Counterparty.ChannelConfig.Version = string(versionBytes) }, @@ -387,64 +387,64 @@ func (suite *KeeperTestSuite) TestOnChanOpenAck() { func() { // create a new channel and set it in state ch := channeltypes.NewChannel(channeltypes.OPEN, channeltypes.ORDERED, channeltypes.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointB.ConnectionID}, ibctesting.DefaultChannelVersion) - suite.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ch) + s.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ch) // set the active channelID in state - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) }, icatypes.ErrActiveChannelAlreadySet, }, } for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) - interchainAccAddr, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(exists) + interchainAccAddr, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(exists) metadata = icatypes.NewMetadata(icatypes.Version, ibctesting.FirstConnectionID, ibctesting.FirstConnectionID, interchainAccAddr, icatypes.EncodingProtobuf, icatypes.TxTypeSDKMultiMsg) versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.ChannelConfig.Version = string(versionBytes) tc.malleate() // malleate mutates test data - err = suite.chainA.GetSimApp().ICAControllerKeeper.OnChanOpenAck(suite.chainA.GetContext(), + err = s.chainA.GetSimApp().ICAControllerKeeper.OnChanOpenAck(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointA.Counterparty.ChannelConfig.Version, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) - activeChannelID, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + activeChannelID, found := s.chainA.GetSimApp().ICAControllerKeeper.GetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - suite.Require().Equal(path.EndpointA.ChannelID, activeChannelID) + s.Require().Equal(path.EndpointA.ChannelID, activeChannelID) - interchainAccAddress, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccAddress, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - suite.Require().Equal(metadata.Address, interchainAccAddress) + s.Require().Equal(metadata.Address, interchainAccAddress) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *KeeperTestSuite) TestOnChanCloseConfirm() { +func (s *KeeperTestSuite) TestOnChanCloseConfirm() { var path *ibctesting.Path testCases := []struct { @@ -459,28 +459,28 @@ func (suite *KeeperTestSuite) TestOnChanCloseConfirm() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - err = suite.chainB.GetSimApp().ICAControllerKeeper.OnChanCloseConfirm(suite.chainB.GetContext(), + err = s.chainB.GetSimApp().ICAControllerKeeper.OnChanCloseConfirm(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) - activeChannelID, found := suite.chainB.GetSimApp().ICAControllerKeeper.GetActiveChannelID(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointB.ChannelConfig.PortID) + activeChannelID, found := s.chainB.GetSimApp().ICAControllerKeeper.GetActiveChannelID(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointB.ChannelConfig.PortID) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().False(found) - suite.Require().Empty(activeChannelID) + s.Require().NoError(err) + s.Require().False(found) + s.Require().Empty(activeChannelID) } else { - suite.Require().Error(err) + s.Require().Error(err) } }) } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/keeper.go b/modules/apps/27-interchain-accounts/controller/keeper/keeper.go index 2eba8e80a46..3482ea6a554 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/keeper.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/keeper.go @@ -26,11 +26,10 @@ import ( // Keeper defines the IBC interchain accounts controller keeper type Keeper struct { - storeService corestore.KVStoreService - cdc codec.Codec - legacySubspace icatypes.ParamSubspace - ics4Wrapper porttypes.ICS4Wrapper - channelKeeper icatypes.ChannelKeeper + storeService corestore.KVStoreService + cdc codec.Codec + ics4Wrapper porttypes.ICS4Wrapper + channelKeeper icatypes.ChannelKeeper msgRouter icatypes.MessageRouter @@ -41,22 +40,23 @@ type Keeper struct { // NewKeeper creates a new interchain accounts controller Keeper instance func NewKeeper( - cdc codec.Codec, storeService corestore.KVStoreService, legacySubspace icatypes.ParamSubspace, - ics4Wrapper porttypes.ICS4Wrapper, channelKeeper icatypes.ChannelKeeper, + cdc codec.Codec, storeService corestore.KVStoreService, + channelKeeper icatypes.ChannelKeeper, msgRouter icatypes.MessageRouter, authority string, -) Keeper { +) *Keeper { if strings.TrimSpace(authority) == "" { panic(errors.New("authority must be non-empty")) } - return Keeper{ - storeService: storeService, - cdc: cdc, - legacySubspace: legacySubspace, - ics4Wrapper: ics4Wrapper, - channelKeeper: channelKeeper, - msgRouter: msgRouter, - authority: authority, + return &Keeper{ + storeService: storeService, + cdc: cdc, + // Defaults to using the channel keeper as the ICS4Wrapper + // This can be overridden later with WithICS4Wrapper (e.g. by the middleware stack wiring) + ics4Wrapper: channelKeeper, + channelKeeper: channelKeeper, + msgRouter: msgRouter, + authority: authority, } } @@ -68,17 +68,17 @@ func (k *Keeper) WithICS4Wrapper(wrapper porttypes.ICS4Wrapper) { } // GetICS4Wrapper returns the ICS4Wrapper. -func (k Keeper) GetICS4Wrapper() porttypes.ICS4Wrapper { +func (k *Keeper) GetICS4Wrapper() porttypes.ICS4Wrapper { return k.ics4Wrapper } // Logger returns the application logger, scoped to the associated module -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s-%s", exported.ModuleName, icatypes.ModuleName)) } // GetConnectionID returns the connection id for the given port and channelIDs. -func (k Keeper) GetConnectionID(ctx sdk.Context, portID, channelID string) (string, error) { +func (k *Keeper) GetConnectionID(ctx sdk.Context, portID, channelID string) (string, error) { channel, found := k.channelKeeper.GetChannel(ctx, portID, channelID) if !found { return "", errorsmod.Wrapf(channeltypes.ErrChannelNotFound, "port ID (%s) channel ID (%s)", portID, channelID) @@ -87,7 +87,7 @@ func (k Keeper) GetConnectionID(ctx sdk.Context, portID, channelID string) (stri } // GetAllPorts returns all ports to which the interchain accounts controller module is bound. Used in ExportGenesis -func (k Keeper) GetAllPorts(ctx sdk.Context) []string { +func (k *Keeper) GetAllPorts(ctx sdk.Context) []string { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(icatypes.PortKeyPrefix)) defer sdk.LogDeferred(k.Logger(ctx), func() error { return iterator.Close() }) @@ -103,7 +103,7 @@ func (k Keeper) GetAllPorts(ctx sdk.Context) []string { } // setPort sets the provided portID in state -func (k Keeper) setPort(ctx sdk.Context, portID string) { +func (k *Keeper) setPort(ctx sdk.Context, portID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyPort(portID), []byte{0x01}); err != nil { panic(err) @@ -111,12 +111,12 @@ func (k Keeper) setPort(ctx sdk.Context, portID string) { } // GetAppVersion calls the ICS4Wrapper GetAppVersion function. -func (k Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { +func (k *Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) } // GetActiveChannelID retrieves the active channelID from the store, keyed by the provided connectionID and portID -func (k Keeper) GetActiveChannelID(ctx sdk.Context, connectionID, portID string) (string, bool) { +func (k *Keeper) GetActiveChannelID(ctx sdk.Context, connectionID, portID string) (string, bool) { store := k.storeService.OpenKVStore(ctx) key := icatypes.KeyActiveChannel(portID, connectionID) @@ -132,7 +132,7 @@ func (k Keeper) GetActiveChannelID(ctx sdk.Context, connectionID, portID string) } // GetOpenActiveChannel retrieves the active channelID from the store, keyed by the provided connectionID and portID & checks if the channel in question is in state OPEN -func (k Keeper) GetOpenActiveChannel(ctx sdk.Context, connectionID, portID string) (string, bool) { +func (k *Keeper) GetOpenActiveChannel(ctx sdk.Context, connectionID, portID string) (string, bool) { channelID, found := k.GetActiveChannelID(ctx, connectionID, portID) if !found { return "", false @@ -148,7 +148,7 @@ func (k Keeper) GetOpenActiveChannel(ctx sdk.Context, connectionID, portID strin } // IsActiveChannelClosed retrieves the active channel from the store and returns true if the channel state is CLOSED, otherwise false -func (k Keeper) IsActiveChannelClosed(ctx sdk.Context, connectionID, portID string) bool { +func (k *Keeper) IsActiveChannelClosed(ctx sdk.Context, connectionID, portID string) bool { channelID, found := k.GetActiveChannelID(ctx, connectionID, portID) if !found { return false @@ -159,7 +159,7 @@ func (k Keeper) IsActiveChannelClosed(ctx sdk.Context, connectionID, portID stri } // GetAllActiveChannels returns a list of all active interchain accounts controller channels and their associated connection and port identifiers -func (k Keeper) GetAllActiveChannels(ctx sdk.Context) []genesistypes.ActiveChannel { +func (k *Keeper) GetAllActiveChannels(ctx sdk.Context) []genesistypes.ActiveChannel { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(icatypes.ActiveChannelKeyPrefix)) defer sdk.LogDeferred(k.Logger(ctx), func() error { return iterator.Close() }) @@ -186,7 +186,7 @@ func (k Keeper) GetAllActiveChannels(ctx sdk.Context) []genesistypes.ActiveChann } // SetActiveChannelID stores the active channelID, keyed by the provided connectionID and portID -func (k Keeper) SetActiveChannelID(ctx sdk.Context, connectionID, portID, channelID string) { +func (k *Keeper) SetActiveChannelID(ctx sdk.Context, connectionID, portID, channelID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyActiveChannel(portID, connectionID), []byte(channelID)); err != nil { panic(err) @@ -194,13 +194,13 @@ func (k Keeper) SetActiveChannelID(ctx sdk.Context, connectionID, portID, channe } // IsActiveChannel returns true if there exists an active channel for the provided connectionID and portID, otherwise false -func (k Keeper) IsActiveChannel(ctx sdk.Context, connectionID, portID string) bool { +func (k *Keeper) IsActiveChannel(ctx sdk.Context, connectionID, portID string) bool { _, ok := k.GetActiveChannelID(ctx, connectionID, portID) return ok } // GetInterchainAccountAddress retrieves the InterchainAccount address from the store associated with the provided connectionID and portID -func (k Keeper) GetInterchainAccountAddress(ctx sdk.Context, connectionID, portID string) (string, bool) { +func (k *Keeper) GetInterchainAccountAddress(ctx sdk.Context, connectionID, portID string) (string, bool) { store := k.storeService.OpenKVStore(ctx) key := icatypes.KeyOwnerAccount(portID, connectionID) @@ -216,7 +216,7 @@ func (k Keeper) GetInterchainAccountAddress(ctx sdk.Context, connectionID, portI } // GetAllInterchainAccounts returns a list of all registered interchain account addresses and their associated connection and controller port identifiers -func (k Keeper) GetAllInterchainAccounts(ctx sdk.Context) []genesistypes.RegisteredInterchainAccount { +func (k *Keeper) GetAllInterchainAccounts(ctx sdk.Context) []genesistypes.RegisteredInterchainAccount { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(icatypes.OwnerKeyPrefix)) @@ -237,7 +237,7 @@ func (k Keeper) GetAllInterchainAccounts(ctx sdk.Context) []genesistypes.Registe } // SetInterchainAccountAddress stores the InterchainAccount address, keyed by the associated connectionID and portID -func (k Keeper) SetInterchainAccountAddress(ctx sdk.Context, connectionID, portID, address string) { +func (k *Keeper) SetInterchainAccountAddress(ctx sdk.Context, connectionID, portID, address string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyOwnerAccount(portID, connectionID), []byte(address)); err != nil { panic(err) @@ -245,7 +245,7 @@ func (k Keeper) SetInterchainAccountAddress(ctx sdk.Context, connectionID, portI } // IsMiddlewareEnabled returns true if the underlying application callbacks are enabled for given port and connection identifier pair, otherwise false -func (k Keeper) IsMiddlewareEnabled(ctx sdk.Context, portID, connectionID string) bool { +func (k *Keeper) IsMiddlewareEnabled(ctx sdk.Context, portID, connectionID string) bool { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get(icatypes.KeyIsMiddlewareEnabled(portID, connectionID)) if err != nil { @@ -255,7 +255,7 @@ func (k Keeper) IsMiddlewareEnabled(ctx sdk.Context, portID, connectionID string } // IsMiddlewareDisabled returns true if the underlying application callbacks are disabled for the given port and connection identifier pair, otherwise false -func (k Keeper) IsMiddlewareDisabled(ctx sdk.Context, portID, connectionID string) bool { +func (k *Keeper) IsMiddlewareDisabled(ctx sdk.Context, portID, connectionID string) bool { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get(icatypes.KeyIsMiddlewareEnabled(portID, connectionID)) if err != nil { @@ -265,7 +265,7 @@ func (k Keeper) IsMiddlewareDisabled(ctx sdk.Context, portID, connectionID strin } // SetMiddlewareEnabled stores a flag to indicate that the underlying application callbacks should be enabled for the given port and connection identifier pair -func (k Keeper) SetMiddlewareEnabled(ctx sdk.Context, portID, connectionID string) { +func (k *Keeper) SetMiddlewareEnabled(ctx sdk.Context, portID, connectionID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyIsMiddlewareEnabled(portID, connectionID), icatypes.MiddlewareEnabled); err != nil { panic(err) @@ -273,7 +273,7 @@ func (k Keeper) SetMiddlewareEnabled(ctx sdk.Context, portID, connectionID strin } // SetMiddlewareDisabled stores a flag to indicate that the underlying application callbacks should be disabled for the given port and connection identifier pair -func (k Keeper) SetMiddlewareDisabled(ctx sdk.Context, portID, connectionID string) { +func (k *Keeper) SetMiddlewareDisabled(ctx sdk.Context, portID, connectionID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyIsMiddlewareEnabled(portID, connectionID), icatypes.MiddlewareDisabled); err != nil { panic(err) @@ -281,7 +281,7 @@ func (k Keeper) SetMiddlewareDisabled(ctx sdk.Context, portID, connectionID stri } // DeleteMiddlewareEnabled deletes the middleware enabled flag stored in state -func (k Keeper) DeleteMiddlewareEnabled(ctx sdk.Context, portID, connectionID string) { +func (k *Keeper) DeleteMiddlewareEnabled(ctx sdk.Context, portID, connectionID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Delete(icatypes.KeyIsMiddlewareEnabled(portID, connectionID)); err != nil { panic(err) @@ -289,12 +289,12 @@ func (k Keeper) DeleteMiddlewareEnabled(ctx sdk.Context, portID, connectionID st } // GetAuthority returns the ica/controller submodule's authority. -func (k Keeper) GetAuthority() string { +func (k *Keeper) GetAuthority() string { return k.authority } // getAppMetadata retrieves the interchain accounts channel metadata from the store associated with the provided portID and channelID -func (k Keeper) getAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { +func (k *Keeper) getAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { appVersion, found := k.GetAppVersion(ctx, portID, channelID) if !found { return icatypes.Metadata{}, errorsmod.Wrapf(ibcerrors.ErrNotFound, "app version not found for port %s and channel %s", portID, channelID) @@ -304,7 +304,7 @@ func (k Keeper) getAppMetadata(ctx sdk.Context, portID, channelID string) (icaty } // GetParams returns the current ica/controller submodule parameters. -func (k Keeper) GetParams(ctx sdk.Context) types.Params { +func (k *Keeper) GetParams(ctx sdk.Context) types.Params { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get([]byte(types.ParamsKey)) if err != nil { @@ -320,7 +320,7 @@ func (k Keeper) GetParams(ctx sdk.Context) types.Params { } // SetParams sets the ica/controller submodule parameters. -func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { +func (k *Keeper) SetParams(ctx sdk.Context, params types.Params) { store := k.storeService.OpenKVStore(ctx) bz := k.cdc.MustMarshal(¶ms) if err := store.Set([]byte(types.ParamsKey), bz); err != nil { diff --git a/modules/apps/27-interchain-accounts/controller/keeper/keeper_test.go b/modules/apps/27-interchain-accounts/controller/keeper/keeper_test.go index 1bea2db2640..079d8ed02e9 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/keeper_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/keeper_test.go @@ -46,11 +46,11 @@ type KeeperTestSuite struct { chainC *ibctesting.TestChain } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } func NewICAPath(chainA, chainB *ibctesting.TestChain, ordering channeltypes.Order) *ibctesting.Path { @@ -109,7 +109,7 @@ func TestKeeperTestSuite(t *testing.T) { testifysuite.Run(t, new(KeeperTestSuite)) } -func (suite *KeeperTestSuite) TestNewKeeper() { +func (s *KeeperTestSuite) TestNewKeeper() { testCases := []struct { name string instantiateFn func() @@ -117,39 +117,34 @@ func (suite *KeeperTestSuite) TestNewKeeper() { }{ {"success", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.SubModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), ) }, ""}, {"failure: empty authority", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.SubModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), "", // authority ) }, "authority must be non-empty"}, } for _, tc := range testCases { + s.SetupTest() - suite.SetupTest() - - suite.Run(tc.name, func() { + s.Run(tc.name, func() { if tc.errMsg == "" { - suite.Require().NotPanics( + s.Require().NotPanics( tc.instantiateFn, ) } else { - suite.Require().PanicsWithError( + s.Require().PanicsWithError( tc.errMsg, tc.instantiateFn, ) @@ -158,62 +153,62 @@ func (suite *KeeperTestSuite) TestNewKeeper() { } } -func (suite *KeeperTestSuite) TestGetAllPorts() { +func (s *KeeperTestSuite) TestGetAllPorts() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) expectedPorts := []string{TestPortID} - ports := suite.chainA.GetSimApp().ICAControllerKeeper.GetAllPorts(suite.chainA.GetContext()) - suite.Require().Len(ports, len(expectedPorts)) - suite.Require().Equal(expectedPorts, ports) + ports := s.chainA.GetSimApp().ICAControllerKeeper.GetAllPorts(s.chainA.GetContext()) + s.Require().Len(ports, len(expectedPorts)) + s.Require().Equal(expectedPorts, ports) } } -func (suite *KeeperTestSuite) TestGetInterchainAccountAddress() { +func (s *KeeperTestSuite) TestGetInterchainAccountAddress() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) counterpartyPortID := path.EndpointA.ChannelConfig.PortID - retrievedAddr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, counterpartyPortID) - suite.Require().True(found) - suite.Require().NotEmpty(retrievedAddr) + retrievedAddr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, counterpartyPortID) + s.Require().True(found) + s.Require().NotEmpty(retrievedAddr) - retrievedAddr, found = suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), "invalid conn", "invalid port") - suite.Require().False(found) - suite.Require().Empty(retrievedAddr) + retrievedAddr, found = s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), "invalid conn", "invalid port") + s.Require().False(found) + s.Require().Empty(retrievedAddr) } } -func (suite *KeeperTestSuite) TestGetAllActiveChannels() { +func (s *KeeperTestSuite) TestGetAllActiveChannels() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( expectedChannelID = "test-channel" expectedPortID = "test-port" ) - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedChannelID) + s.chainA.GetSimApp().ICAControllerKeeper.SetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedChannelID) expectedChannels := []genesistypes.ActiveChannel{ { @@ -230,31 +225,31 @@ func (suite *KeeperTestSuite) TestGetAllActiveChannels() { }, } - activeChannels := suite.chainA.GetSimApp().ICAControllerKeeper.GetAllActiveChannels(suite.chainA.GetContext()) - suite.Require().Len(activeChannels, len(expectedChannels)) - suite.Require().Equal(expectedChannels, activeChannels) + activeChannels := s.chainA.GetSimApp().ICAControllerKeeper.GetAllActiveChannels(s.chainA.GetContext()) + s.Require().Len(activeChannels, len(expectedChannels)) + s.Require().Equal(expectedChannels, activeChannels) } } -func (suite *KeeperTestSuite) TestGetAllInterchainAccounts() { +func (s *KeeperTestSuite) TestGetAllInterchainAccounts() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( expectedAccAddr = "test-acc-addr" expectedPortID = "test-port" ) - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - interchainAccAddr, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(exists) + interchainAccAddr, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(exists) - suite.chainA.GetSimApp().ICAControllerKeeper.SetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) + s.chainA.GetSimApp().ICAControllerKeeper.SetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) expectedAccounts := []genesistypes.RegisteredInterchainAccount{ { @@ -269,43 +264,43 @@ func (suite *KeeperTestSuite) TestGetAllInterchainAccounts() { }, } - interchainAccounts := suite.chainA.GetSimApp().ICAControllerKeeper.GetAllInterchainAccounts(suite.chainA.GetContext()) - suite.Require().Len(interchainAccounts, len(expectedAccounts)) - suite.Require().Equal(expectedAccounts, interchainAccounts) + interchainAccounts := s.chainA.GetSimApp().ICAControllerKeeper.GetAllInterchainAccounts(s.chainA.GetContext()) + s.Require().Len(interchainAccounts, len(expectedAccounts)) + s.Require().Equal(expectedAccounts, interchainAccounts) } } -func (suite *KeeperTestSuite) TestIsActiveChannel() { +func (s *KeeperTestSuite) TestIsActiveChannel() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) owner := TestOwnerAddress path.SetupConnections() err := SetupICAPath(path, owner) - suite.Require().NoError(err) + s.Require().NoError(err) portID := path.EndpointA.ChannelConfig.PortID - isActive := suite.chainA.GetSimApp().ICAControllerKeeper.IsActiveChannel(suite.chainA.GetContext(), ibctesting.FirstConnectionID, portID) - suite.Require().Equal(isActive, true) + isActive := s.chainA.GetSimApp().ICAControllerKeeper.IsActiveChannel(s.chainA.GetContext(), ibctesting.FirstConnectionID, portID) + s.Require().True(isActive) } } -func (suite *KeeperTestSuite) TestSetInterchainAccountAddress() { +func (s *KeeperTestSuite) TestSetInterchainAccountAddress() { var ( expectedAccAddr = "test-acc-addr" expectedPortID = "test-port" ) - suite.chainA.GetSimApp().ICAControllerKeeper.SetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) + s.chainA.GetSimApp().ICAControllerKeeper.SetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) - retrievedAddr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID) - suite.Require().True(found) - suite.Require().Equal(expectedAccAddr, retrievedAddr) + retrievedAddr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, expectedPortID) + s.Require().True(found) + s.Require().Equal(expectedAccAddr, retrievedAddr) } -func (suite *KeeperTestSuite) TestSetAndGetParams() { +func (s *KeeperTestSuite) TestSetAndGetParams() { testCases := []struct { name string input types.Params @@ -315,50 +310,50 @@ func (suite *KeeperTestSuite) TestSetAndGetParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(tc.name, func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() - suite.chainA.GetSimApp().ICAControllerKeeper.SetParams(ctx, tc.input) + s.chainA.GetSimApp().ICAControllerKeeper.SetParams(ctx, tc.input) expected := tc.input - p := suite.chainA.GetSimApp().ICAControllerKeeper.GetParams(ctx) - suite.Require().Equal(expected, p) + p := s.chainA.GetSimApp().ICAControllerKeeper.GetParams(ctx) + s.Require().Equal(expected, p) }) } } -func (suite *KeeperTestSuite) TestUnsetParams() { - suite.SetupTest() +func (s *KeeperTestSuite) TestUnsetParams() { + s.SetupTest() - ctx := suite.chainA.GetContext() - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(types.SubModuleName)) + ctx := s.chainA.GetContext() + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(types.SubModuleName)) store.Delete([]byte(types.ParamsKey)) - suite.Require().Panics(func() { - suite.chainA.GetSimApp().ICAControllerKeeper.GetParams(ctx) + s.Require().Panics(func() { + s.chainA.GetSimApp().ICAControllerKeeper.GetParams(ctx) }) } -func (suite *KeeperTestSuite) TestGetAuthority() { - suite.SetupTest() +func (s *KeeperTestSuite) TestGetAuthority() { + s.SetupTest() - authority := suite.chainA.GetSimApp().ICAControllerKeeper.GetAuthority() + authority := s.chainA.GetSimApp().ICAControllerKeeper.GetAuthority() expectedAuth := authtypes.NewModuleAddress(govtypes.ModuleName).String() - suite.Require().Equal(expectedAuth, authority) + s.Require().Equal(expectedAuth, authority) } -func (suite *KeeperTestSuite) TestWithICS4Wrapper() { - suite.SetupTest() +func (s *KeeperTestSuite) TestWithICS4Wrapper() { + s.SetupTest() // test if the ics4 wrapper is the channel keeper initially - ics4Wrapper := suite.chainA.GetSimApp().ICAControllerKeeper.GetICS4Wrapper() + ics4Wrapper := s.chainA.GetSimApp().ICAControllerKeeper.GetICS4Wrapper() _, isChannelKeeper := ics4Wrapper.(*channelkeeper.Keeper) - suite.Require().True(isChannelKeeper) - suite.Require().IsType((*channelkeeper.Keeper)(nil), ics4Wrapper) + s.Require().True(isChannelKeeper) + s.Require().IsType((*channelkeeper.Keeper)(nil), ics4Wrapper) // set the ics4 wrapper to the channel keeper - suite.chainA.GetSimApp().ICAControllerKeeper.WithICS4Wrapper(nil) - ics4Wrapper = suite.chainA.GetSimApp().ICAControllerKeeper.GetICS4Wrapper() - suite.Require().Nil(ics4Wrapper) + s.chainA.GetSimApp().ICAControllerKeeper.WithICS4Wrapper(nil) + ics4Wrapper = s.chainA.GetSimApp().ICAControllerKeeper.GetICS4Wrapper() + s.Require().Nil(ics4Wrapper) } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/migrations.go b/modules/apps/27-interchain-accounts/controller/keeper/migrations.go deleted file mode 100644 index 0f8520ea7e0..00000000000 --- a/modules/apps/27-interchain-accounts/controller/keeper/migrations.go +++ /dev/null @@ -1,32 +0,0 @@ -package keeper - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - controllertypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/types" -) - -// Migrator is a struct for handling in-place store migrations. -type Migrator struct { - keeper *Keeper -} - -// NewMigrator returns Migrator instance for the state migration. -func NewMigrator(k *Keeper) Migrator { - return Migrator{ - keeper: k, - } -} - -// MigrateParams migrates the controller submodule's parameters from the x/params to self store. -func (m Migrator) MigrateParams(ctx sdk.Context) error { - if m.keeper != nil { - params := controllertypes.DefaultParams() - if m.keeper.legacySubspace != nil { - m.keeper.legacySubspace.GetParamSetIfExists(ctx, ¶ms) - } - m.keeper.SetParams(ctx, params) - m.keeper.Logger(ctx).Info("successfully migrated ica/controller submodule to self-manage params") - } - return nil -} diff --git a/modules/apps/27-interchain-accounts/controller/keeper/migrations_test.go b/modules/apps/27-interchain-accounts/controller/keeper/migrations_test.go deleted file mode 100644 index a75752f3492..00000000000 --- a/modules/apps/27-interchain-accounts/controller/keeper/migrations_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package keeper_test - -import ( - "fmt" - - "github.com/cosmos/cosmos-sdk/runtime" - - icacontrollerkeeper "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/keeper" - icacontrollertypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/controller/types" -) - -func (suite *KeeperTestSuite) TestMigratorMigrateParams() { - testCases := []struct { - msg string - malleate func() - expectedParams icacontrollertypes.Params - }{ - { - "success: default params", - func() { - params := icacontrollertypes.DefaultParams() - subspace := suite.chainA.GetSimApp().GetSubspace(icacontrollertypes.SubModuleName) // get subspace - subspace.SetParamSet(suite.chainA.GetContext(), ¶ms) // set params - }, - icacontrollertypes.DefaultParams(), - }, - { - "success: no legacy params pre-migration", - func() { - suite.chainA.GetSimApp().ICAControllerKeeper = icacontrollerkeeper.NewKeeper( - suite.chainA.Codec, - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(icacontrollertypes.StoreKey)), - nil, // assign a nil legacy param subspace - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), - ) - }, - icacontrollertypes.DefaultParams(), - }, - } - - for _, tc := range testCases { - suite.Run(fmt.Sprintf("case %s", tc.msg), func() { - suite.SetupTest() // reset - - tc.malleate() // explicitly set params - - migrator := icacontrollerkeeper.NewMigrator(&suite.chainA.GetSimApp().ICAControllerKeeper) - err := migrator.MigrateParams(suite.chainA.GetContext()) - suite.Require().NoError(err) - - params := suite.chainA.GetSimApp().ICAControllerKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(tc.expectedParams, params) - }) - } -} diff --git a/modules/apps/27-interchain-accounts/controller/keeper/msg_server.go b/modules/apps/27-interchain-accounts/controller/keeper/msg_server.go index d89347bc50d..6119258c8f4 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/msg_server.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/msg_server.go @@ -81,7 +81,7 @@ func (s msgServer) SendTx(goCtx context.Context, msg *types.MsgSendTx) (*types.M } // UpdateParams defines an rpc handler method for MsgUpdateParams. Updates the ica/controller submodule's parameters. -func (k Keeper) UpdateParams(goCtx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { +func (k *Keeper) UpdateParams(goCtx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { if k.GetAuthority() != msg.Signer { return nil, errorsmod.Wrapf(ibcerrors.ErrUnauthorized, "expected %s, got %s", k.GetAuthority(), msg.Signer) } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/msg_server_test.go b/modules/apps/27-interchain-accounts/controller/keeper/msg_server_test.go index f729b08f173..f2dc87ad387 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/msg_server_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/msg_server_test.go @@ -17,7 +17,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestRegisterInterchainAccount_MsgServer() { +func (s *KeeperTestSuite) TestRegisterInterchainAccount_MsgServer() { var ( msg *types.MsgRegisterInterchainAccount expectedOrderding channeltypes.Order @@ -67,46 +67,46 @@ func (suite *KeeperTestSuite) TestRegisterInterchainAccount_MsgServer() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { expectedOrderding = ordering - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() msg = types.NewMsgRegisterInterchainAccount(ibctesting.FirstConnectionID, ibctesting.TestAccAddress, "", ordering) tc.malleate() - ctx := suite.chainA.GetContext() - msgServer := keeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAControllerKeeper) + ctx := s.chainA.GetContext() + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAControllerKeeper) res, err := msgServer.RegisterInterchainAccount(ctx, msg) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expectedChannelID, res.ChannelId) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expectedChannelID, res.ChannelId) events := ctx.EventManager().Events() - suite.Require().Len(events, 2) - suite.Require().Equal(events[0].Type, channeltypes.EventTypeChannelOpenInit) - suite.Require().Equal(events[1].Type, sdk.EventTypeMessage) + s.Require().Len(events, 2) + s.Require().Equal(events[0].Type, channeltypes.EventTypeChannelOpenInit) + s.Require().Equal(events[1].Type, sdk.EventTypeMessage) path.EndpointA.ChannelConfig.PortID = res.PortId path.EndpointA.ChannelID = res.ChannelId channel := path.EndpointA.GetChannel() - suite.Require().Equal(expectedOrderding, channel.Ordering) + s.Require().Equal(expectedOrderding, channel.Ordering) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Nil(res) } }) } } } -func (suite *KeeperTestSuite) TestSubmitTx() { +func (s *KeeperTestSuite) TestSubmitTx() { var ( path *ibctesting.Path msg *types.MsgSendTx @@ -145,32 +145,32 @@ func (suite *KeeperTestSuite) TestSubmitTx() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() owner := TestOwnerAddress - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, owner) - suite.Require().NoError(err) + s.Require().NoError(err) portID, err := icatypes.NewControllerPortID(TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // get the address of the interchain account stored in state during handshake step - interchainAccountAddr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), path.EndpointA.ConnectionID, portID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), path.EndpointA.ConnectionID, portID) + s.Require().True(found) // create bank transfer message that will execute on the host chain icaMsg := &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{icaMsg}, icatypes.EncodingProtobuf) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{icaMsg}, icatypes.EncodingProtobuf) + s.Require().NoError(err) packetData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -178,23 +178,23 @@ func (suite *KeeperTestSuite) TestSubmitTx() { Memo: "memo", } - timeoutTimestamp := uint64(suite.chainA.GetContext().BlockTime().Add(time.Minute).UnixNano()) + timeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Minute).UnixNano()) connectionID := path.EndpointA.ConnectionID msg = types.NewMsgSendTx(owner, connectionID, timeoutTimestamp, packetData) tc.malleate() // malleate mutates test data - ctx := suite.chainA.GetContext() - msgServer := keeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAControllerKeeper) + ctx := s.chainA.GetContext() + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAControllerKeeper) res, err := msgServer.SendTx(ctx, msg) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Nil(res) } }) } @@ -202,8 +202,8 @@ func (suite *KeeperTestSuite) TestSubmitTx() { } // TestUpdateParams tests UpdateParams rpc handler -func (suite *KeeperTestSuite) TestUpdateParams() { - signer := suite.chainA.GetSimApp().TransferKeeper.GetAuthority() +func (s *KeeperTestSuite) TestUpdateParams() { + signer := s.chainA.GetSimApp().TransferKeeper.GetAuthority() testCases := []struct { name string msg *types.MsgUpdateParams @@ -237,15 +237,15 @@ func (suite *KeeperTestSuite) TestUpdateParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - _, err := suite.chainA.GetSimApp().ICAControllerKeeper.UpdateParams(suite.chainA.GetContext(), tc.msg) + s.Run(tc.name, func() { + s.SetupTest() + _, err := s.chainA.GetSimApp().ICAControllerKeeper.UpdateParams(s.chainA.GetContext(), tc.msg) if tc.expErr == nil { - suite.Require().NoError(err) - p := suite.chainA.GetSimApp().ICAControllerKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(tc.msg.Params, p) + s.Require().NoError(err) + p := s.chainA.GetSimApp().ICAControllerKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(tc.msg.Params, p) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/relay.go b/modules/apps/27-interchain-accounts/controller/keeper/relay.go index 1aad843691b..c27d2b0d48c 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/relay.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/relay.go @@ -20,13 +20,13 @@ import ( // Prior to v6.x.x of ibc-go, the controller module was only functional as middleware, with authentication performed // by the underlying application. For a full summary of the changes in v6.x.x, please see ADR009. // This API will be removed in later releases. -func (k Keeper) SendTx(ctx sdk.Context, connectionID, portID string, +func (k *Keeper) SendTx(ctx sdk.Context, connectionID, portID string, icaPacketData icatypes.InterchainAccountPacketData, timeoutTimestamp uint64, ) (uint64, error) { return k.sendTx(ctx, connectionID, portID, icaPacketData, timeoutTimestamp) } -func (k Keeper) sendTx(ctx sdk.Context, connectionID, portID string, +func (k *Keeper) sendTx(ctx sdk.Context, connectionID, portID string, icaPacketData icatypes.InterchainAccountPacketData, timeoutTimestamp uint64, ) (uint64, error) { if !k.GetParams(ctx).ControllerEnabled { @@ -55,6 +55,6 @@ func (k Keeper) sendTx(ctx sdk.Context, connectionID, portID string, // OnTimeoutPacket removes the active channel associated with the provided packet, the underlying channel end is closed // due to the semantics of ORDERED channels -func (Keeper) OnTimeoutPacket(ctx sdk.Context, packet channeltypes.Packet) error { +func (*Keeper) OnTimeoutPacket(ctx sdk.Context, packet channeltypes.Packet) error { return nil } diff --git a/modules/apps/27-interchain-accounts/controller/keeper/relay_test.go b/modules/apps/27-interchain-accounts/controller/keeper/relay_test.go index 496b149d414..563e35eca5b 100644 --- a/modules/apps/27-interchain-accounts/controller/keeper/relay_test.go +++ b/modules/apps/27-interchain-accounts/controller/keeper/relay_test.go @@ -13,7 +13,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestSendTx() { +func (s *KeeperTestSuite) TestSendTx() { var ( path *ibctesting.Path packetData icatypes.InterchainAccountPacketData @@ -28,17 +28,17 @@ func (suite *KeeperTestSuite) TestSendTx() { { "success", func() { - interchainAccountAddr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), } - data, err := icatypes.SerializeCosmosTx(suite.chainB.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainB.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) + s.Require().NoError(err) packetData = icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -50,24 +50,24 @@ func (suite *KeeperTestSuite) TestSendTx() { { "success with multiple sdk.Msg", func() { - interchainAccountAddr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msgsBankSend := []proto.Message{ &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), }, &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), }, } - data, err := icatypes.SerializeCosmosTx(suite.chainB.GetSimApp().AppCodec(), msgsBankSend, icatypes.EncodingProtobuf) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainB.GetSimApp().AppCodec(), msgsBankSend, icatypes.EncodingProtobuf) + s.Require().NoError(err) packetData = icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -110,31 +110,31 @@ func (suite *KeeperTestSuite) TestSendTx() { { "controller submodule disabled", func() { - suite.chainA.GetSimApp().ICAControllerKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(false)) + s.chainA.GetSimApp().ICAControllerKeeper.SetParams(s.chainA.GetContext(), types.NewParams(false)) }, types.ErrControllerSubModuleDisabled, }, { "timeout timestamp is not in the future", func() { - interchainAccountAddr, found := suite.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainA.GetSimApp().ICAControllerKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), } - data, err := icatypes.SerializeCosmosTx(suite.chainB.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainB.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) + s.Require().NoError(err) packetData = icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, Data: data, } - timeoutTimestamp = uint64(suite.chainA.GetContext().BlockTime().UnixNano()) + timeoutTimestamp = uint64(s.chainA.GetContext().BlockTime().UnixNano()) }, icatypes.ErrInvalidTimeoutTimestamp, }, @@ -142,32 +142,32 @@ func (suite *KeeperTestSuite) TestSendTx() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset timeoutTimestamp = ^uint64(0) // default - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data // nolint: staticcheck // SA1019: ibctesting.FirstConnectionID is deprecated: use path.EndpointA.ConnectionID instead. (staticcheck) - _, err = suite.chainA.GetSimApp().ICAControllerKeeper.SendTx(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, packetData, timeoutTimestamp) + _, err = s.chainA.GetSimApp().ICAControllerKeeper.SendTx(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, packetData, timeoutTimestamp) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *KeeperTestSuite) TestOnTimeoutPacket() { +func (s *KeeperTestSuite) TestOnTimeoutPacket() { var path *ibctesting.Path testCases := []struct { @@ -184,14 +184,14 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data @@ -206,12 +206,12 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { 0, ) - err = suite.chainA.GetSimApp().ICAControllerKeeper.OnTimeoutPacket(suite.chainA.GetContext(), packet) + err = s.chainA.GetSimApp().ICAControllerKeeper.OnTimeoutPacket(s.chainA.GetContext(), packet) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) + s.Require().Error(err) } }) } diff --git a/modules/apps/27-interchain-accounts/controller/types/keys.go b/modules/apps/27-interchain-accounts/controller/types/keys.go index 9be0eb62813..4d11722dfd1 100644 --- a/modules/apps/27-interchain-accounts/controller/types/keys.go +++ b/modules/apps/27-interchain-accounts/controller/types/keys.go @@ -10,3 +10,5 @@ const ( // ParamsKey is the store key for the interchain accounts controller parameters ParamsKey = "params" ) + +var KeyControllerEnabled = []byte("ControllerEnabled") diff --git a/modules/apps/27-interchain-accounts/controller/types/msgs_test.go b/modules/apps/27-interchain-accounts/controller/types/msgs_test.go index 49fd232347c..8757bb32710 100644 --- a/modules/apps/27-interchain-accounts/controller/types/msgs_test.go +++ b/modules/apps/27-interchain-accounts/controller/types/msgs_test.go @@ -250,7 +250,6 @@ func TestMsgUpdateParamsGetSigners(t *testing.T) { } for _, tc := range testCases { - msg := types.MsgUpdateParams{ Signer: tc.address.String(), Params: types.DefaultParams(), @@ -264,6 +263,5 @@ func TestMsgUpdateParamsGetSigners(t *testing.T) { } else { require.ErrorContains(t, err, tc.expErr.Error()) } - } } diff --git a/modules/apps/27-interchain-accounts/controller/types/params_legacy.go b/modules/apps/27-interchain-accounts/controller/types/params_legacy.go deleted file mode 100644 index c6c3ba61d50..00000000000 --- a/modules/apps/27-interchain-accounts/controller/types/params_legacy.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -NOTE: Usage of x/params to manage parameters is deprecated in favor of x/gov -controlled execution of MsgUpdateParams messages. These types remains solely -for migration purposes and will be removed in a future release. -[#3621](https://github.com/cosmos/ibc-go/issues/3621) -*/ -package types - -import ( - "fmt" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" -) - -// KeyControllerEnabled is the store key for ControllerEnabled Params -var KeyControllerEnabled = []byte("ControllerEnabled") - -// ParamKeyTable type declaration for parameters -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -// ParamSetPairs implements params.ParamSet -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair(KeyControllerEnabled, &p.ControllerEnabled, validateEnabledTypeLegacy), - } -} - -func validateEnabledTypeLegacy(i any) error { - _, ok := i.(bool) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - return nil -} diff --git a/modules/apps/27-interchain-accounts/controller/types/query.pb.go b/modules/apps/27-interchain-accounts/controller/types/query.pb.go index fdbdf3ebc11..56ea62cf9fa 100644 --- a/modules/apps/27-interchain-accounts/controller/types/query.pb.go +++ b/modules/apps/27-interchain-accounts/controller/types/query.pb.go @@ -355,6 +355,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.applications.interchain_accounts.controller.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/apps/27-interchain-accounts/controller/types/tx.pb.go b/modules/apps/27-interchain-accounts/controller/types/tx.pb.go index 7fcb1e4a69d..ca7348fa398 100644 --- a/modules/apps/27-interchain-accounts/controller/types/tx.pb.go +++ b/modules/apps/27-interchain-accounts/controller/types/tx.pb.go @@ -468,6 +468,7 @@ func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.applications.interchain_accounts.controller.v1.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/apps/27-interchain-accounts/genesis/types/genesis_test.go b/modules/apps/27-interchain-accounts/genesis/types/genesis_test.go index 4e1296647b4..b85f193bb93 100644 --- a/modules/apps/27-interchain-accounts/genesis/types/genesis_test.go +++ b/modules/apps/27-interchain-accounts/genesis/types/genesis_test.go @@ -29,7 +29,7 @@ func TestGenesisTypesTestSuite(t *testing.T) { testifysuite.Run(t, new(GenesisTypesTestSuite)) } -func (suite *GenesisTypesTestSuite) TestValidateGenesisState() { +func (s *GenesisTypesTestSuite) TestValidateGenesisState() { var genesisState genesistypes.GenesisState testCases := []struct { @@ -66,7 +66,7 @@ func (suite *GenesisTypesTestSuite) TestValidateGenesisState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { genesisState = *genesistypes.DefaultGenesis() tc.malleate() // malleate mutates test data @@ -74,16 +74,16 @@ func (suite *GenesisTypesTestSuite) TestValidateGenesisState() { err := genesisState.Validate() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *GenesisTypesTestSuite) TestValidateControllerGenesisState() { +func (s *GenesisTypesTestSuite) TestValidateControllerGenesisState() { var genesisState genesistypes.ControllerGenesisState testCases := []struct { @@ -190,7 +190,7 @@ func (suite *GenesisTypesTestSuite) TestValidateControllerGenesisState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { genesisState = genesistypes.DefaultControllerGenesis() tc.malleate() // malleate mutates test data @@ -198,16 +198,16 @@ func (suite *GenesisTypesTestSuite) TestValidateControllerGenesisState() { err := genesisState.Validate() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *GenesisTypesTestSuite) TestValidateHostGenesisState() { +func (s *GenesisTypesTestSuite) TestValidateHostGenesisState() { var genesisState genesistypes.HostGenesisState testCases := []struct { @@ -314,7 +314,7 @@ func (suite *GenesisTypesTestSuite) TestValidateHostGenesisState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { genesisState = genesistypes.DefaultHostGenesis() tc.malleate() // malleate mutates test data @@ -322,10 +322,10 @@ func (suite *GenesisTypesTestSuite) TestValidateHostGenesisState() { err := genesisState.Validate() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/apps/27-interchain-accounts/host/client/cli/tx.go b/modules/apps/27-interchain-accounts/host/client/cli/tx.go index c537ec82373..b2e3c8238fb 100644 --- a/modules/apps/27-interchain-accounts/host/client/cli/tx.go +++ b/modules/apps/27-interchain-accounts/host/client/cli/tx.go @@ -86,7 +86,7 @@ otherwise the encoding flag can be used in combination with either "proto3" or " return fmt.Errorf("unsupported encoding type: %s", encoding) } - packetDataBytes, err := generatePacketData(cdc, []byte(args[0]), memo, encoding) + packetDataBytes, err := GeneratePacketData(cdc, []byte(args[0]), memo, encoding) if err != nil { return err } @@ -102,9 +102,9 @@ otherwise the encoding flag can be used in combination with either "proto3" or " return cmd } -// generatePacketData takes in message bytes and a memo and serializes the message into an +// GeneratePacketData takes in message bytes and a memo and serializes the message into an // instance of InterchainAccountPacketData which is returned as bytes. -func generatePacketData(cdc *codec.ProtoCodec, msgBytes []byte, memo string, encoding string) ([]byte, error) { +func GeneratePacketData(cdc *codec.ProtoCodec, msgBytes []byte, memo string, encoding string) ([]byte, error) { protoMessages, err := convertBytesIntoProtoMessages(cdc, msgBytes) if err != nil { return nil, err diff --git a/modules/apps/27-interchain-accounts/host/client/cli/tx_test.go b/modules/apps/27-interchain-accounts/host/client/cli/tx_test.go index 5ddd88fbce5..396190dede5 100644 --- a/modules/apps/27-interchain-accounts/host/client/cli/tx_test.go +++ b/modules/apps/27-interchain-accounts/host/client/cli/tx_test.go @@ -1,4 +1,4 @@ -package cli +package cli_test import ( "fmt" @@ -12,6 +12,7 @@ import ( banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/client/cli" icatypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/types" ) @@ -104,7 +105,6 @@ func TestGeneratePacketData(t *testing.T) { encodings := []string{icatypes.EncodingProtobuf, icatypes.EncodingProto3JSON} for _, encoding := range encodings { for _, tc := range tests { - ir := codectypes.NewInterfaceRegistry() if tc.registerInterfaceFn != nil { tc.registerInterfaceFn(ir) @@ -113,7 +113,7 @@ func TestGeneratePacketData(t *testing.T) { cdc := codec.NewProtoCodec(ir) t.Run(fmt.Sprintf("%s with %s encoding", tc.name, encoding), func(t *testing.T) { - bz, err := generatePacketData(cdc, []byte(tc.message), tc.memo, encoding) + bz, err := cli.GeneratePacketData(cdc, []byte(tc.message), tc.memo, encoding) if tc.expectedPass { require.NoError(t, err) diff --git a/modules/apps/27-interchain-accounts/host/ibc_module.go b/modules/apps/27-interchain-accounts/host/ibc_module.go index 484fc1b451e..7d88dcd5c8f 100644 --- a/modules/apps/27-interchain-accounts/host/ibc_module.go +++ b/modules/apps/27-interchain-accounts/host/ibc_module.go @@ -23,18 +23,18 @@ var ( // IBCModule implements the ICS26 interface for interchain accounts host chains type IBCModule struct { - keeper keeper.Keeper + keeper *keeper.Keeper } // NewIBCModule creates a new IBCModule given the associated keeper -func NewIBCModule(k keeper.Keeper) IBCModule { - return IBCModule{ +func NewIBCModule(k *keeper.Keeper) *IBCModule { + return &IBCModule{ keeper: k, } } // OnChanOpenInit implements the IBCModule interface -func (IBCModule) OnChanOpenInit( +func (*IBCModule) OnChanOpenInit( _ sdk.Context, _ channeltypes.Order, _ []string, @@ -47,7 +47,7 @@ func (IBCModule) OnChanOpenInit( } // OnChanOpenTry implements the IBCModule interface -func (im IBCModule) OnChanOpenTry( +func (im *IBCModule) OnChanOpenTry( ctx sdk.Context, order channeltypes.Order, connectionHops []string, @@ -64,7 +64,7 @@ func (im IBCModule) OnChanOpenTry( } // OnChanOpenAck implements the IBCModule interface -func (IBCModule) OnChanOpenAck( +func (*IBCModule) OnChanOpenAck( _ sdk.Context, _, _ string, @@ -75,7 +75,7 @@ func (IBCModule) OnChanOpenAck( } // OnChanOpenConfirm implements the IBCModule interface -func (im IBCModule) OnChanOpenConfirm( +func (im *IBCModule) OnChanOpenConfirm( ctx sdk.Context, portID, channelID string, @@ -88,7 +88,7 @@ func (im IBCModule) OnChanOpenConfirm( } // OnChanCloseInit implements the IBCModule interface -func (IBCModule) OnChanCloseInit( +func (*IBCModule) OnChanCloseInit( _ sdk.Context, _ string, _ string, @@ -98,7 +98,7 @@ func (IBCModule) OnChanCloseInit( } // OnChanCloseConfirm implements the IBCModule interface -func (im IBCModule) OnChanCloseConfirm( +func (im *IBCModule) OnChanCloseConfirm( ctx sdk.Context, portID, channelID string, @@ -107,7 +107,7 @@ func (im IBCModule) OnChanCloseConfirm( } // OnRecvPacket implements the IBCModule interface -func (im IBCModule) OnRecvPacket( +func (im *IBCModule) OnRecvPacket( ctx sdk.Context, _ string, packet channeltypes.Packet, @@ -136,7 +136,7 @@ func (im IBCModule) OnRecvPacket( } // OnAcknowledgementPacket implements the IBCModule interface -func (IBCModule) OnAcknowledgementPacket( +func (*IBCModule) OnAcknowledgementPacket( _ sdk.Context, _ string, _ channeltypes.Packet, @@ -147,7 +147,7 @@ func (IBCModule) OnAcknowledgementPacket( } // OnTimeoutPacket implements the IBCModule interface -func (IBCModule) OnTimeoutPacket( +func (*IBCModule) OnTimeoutPacket( _ sdk.Context, _ string, _ channeltypes.Packet, @@ -159,7 +159,7 @@ func (IBCModule) OnTimeoutPacket( // UnmarshalPacketData attempts to unmarshal the provided packet data bytes // into an InterchainAccountPacketData. This function implements the optional // PacketDataUnmarshaler interface required for ADR 008 support. -func (im IBCModule) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { +func (im *IBCModule) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { var data icatypes.InterchainAccountPacketData err := data.UnmarshalJSON(bz) if err != nil { @@ -173,3 +173,11 @@ func (im IBCModule) UnmarshalPacketData(ctx sdk.Context, portID string, channelI return data, version, nil } + +// SetICS4Wrapper sets the ICS4Wrapper for the IBCModule. +func (im *IBCModule) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } + im.keeper.WithICS4Wrapper(wrapper) +} diff --git a/modules/apps/27-interchain-accounts/host/ibc_module_test.go b/modules/apps/27-interchain-accounts/host/ibc_module_test.go index 88ea85dd032..fddc39749bb 100644 --- a/modules/apps/27-interchain-accounts/host/ibc_module_test.go +++ b/modules/apps/27-interchain-accounts/host/ibc_module_test.go @@ -55,10 +55,10 @@ func TestICATestSuite(t *testing.T) { testifysuite.Run(t, new(InterchainAccountsTestSuite)) } -func (suite *InterchainAccountsTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func (s *InterchainAccountsTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) } func NewICAPath(chainA, chainB *ibctesting.TestChain, ordering channeltypes.Order) *ibctesting.Path { @@ -112,24 +112,44 @@ func SetupICAPath(path *ibctesting.Path, owner string) error { return path.EndpointB.ChanOpenConfirm() } +func (s *InterchainAccountsTestSuite) TestSetICS4Wrapper() { + s.SetupTest() // reset + + module := icahost.NewIBCModule(s.chainB.GetSimApp().ICAHostKeeper) + + s.Require().Panics(func() { + module.SetICS4Wrapper(nil) + }, "ICS4Wrapper should not be nil") + + // set ICS4Wrapper + s.Require().NotPanics(func() { + module.SetICS4Wrapper(s.chainB.GetSimApp().IBCKeeper.ChannelKeeper) + }) + + // verify ICS4Wrapper is set + ics4Wrapper := s.chainB.GetSimApp().ICAHostKeeper.GetICS4Wrapper() + s.Require().NotNil(ics4Wrapper) + s.Require().Equal(s.chainB.GetSimApp().IBCKeeper.ChannelKeeper, ics4Wrapper) +} + // Test initiating a ChanOpenInit using the host chain instead of the controller chain // ChainA is the controller chain. ChainB is the host chain -func (suite *InterchainAccountsTestSuite) TestChanOpenInit() { +func (s *InterchainAccountsTestSuite) TestChanOpenInit() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + s.SetupTest() // reset + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() // use chainB (host) for ChanOpenInit msg := channeltypes.NewMsgChannelOpenInit(path.EndpointB.ChannelConfig.PortID, icatypes.Version, ordering, []string{path.EndpointB.ConnectionID}, path.EndpointA.ChannelConfig.PortID, icatypes.ModuleName) - handler := suite.chainB.GetSimApp().MsgServiceRouter().Handler(msg) - _, err := handler(suite.chainB.GetContext(), msg) + handler := s.chainB.GetSimApp().MsgServiceRouter().Handler(msg) + _, err := handler(s.chainB.GetContext(), msg) - suite.Require().Error(err) + s.Require().Error(err) } } -func (suite *InterchainAccountsTestSuite) TestOnChanOpenTry() { +func (s *InterchainAccountsTestSuite) TestOnChanOpenTry() { var ( path *ibctesting.Path channel *channeltypes.Channel @@ -145,19 +165,19 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenTry() { }, { "account address generation is block dependent", func() { - icaHostAccount := icatypes.GenerateAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - err := suite.chainB.GetSimApp().BankKeeper.SendCoins(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), icaHostAccount, sdk.Coins{sdk.NewCoin("stake", sdkmath.NewInt(1))}) - suite.Require().NoError(err) - suite.Require().True(suite.chainB.GetSimApp().AccountKeeper.HasAccount(suite.chainB.GetContext(), icaHostAccount)) + icaHostAccount := icatypes.GenerateAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + err := s.chainB.GetSimApp().BankKeeper.SendCoins(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), icaHostAccount, sdk.Coins{sdk.NewCoin("stake", sdkmath.NewInt(1))}) + s.Require().NoError(err) + s.Require().True(s.chainB.GetSimApp().AccountKeeper.HasAccount(s.chainB.GetContext(), icaHostAccount)) // ensure account registration is simulated in a separate block - suite.chainB.NextBlock() + s.chainB.NextBlock() }, nil, }, { "success: ICA auth module callback returns error", func() { // mock module callback should not be called on host side - suite.chainB.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenTry = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, + s.chainB.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenTry = func(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, counterpartyVersion string, ) (string, error) { @@ -167,21 +187,21 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenTry() { }, { "host submodule disabled", func() { - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), types.NewParams(false, []string{})) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), types.NewParams(false, []string{})) }, types.ErrHostSubModuleDisabled, }, } for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.ChannelID = ibctesting.FirstChannelID // default values @@ -197,24 +217,24 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenTry() { tc.malleate() // ensure channel on chainB is set in state - suite.chainB.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, *channel) + s.chainB.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, *channel) - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) - version, err := cbs.OnChanOpenTry(suite.chainB.GetContext(), channel.Ordering, channel.ConnectionHops, + version, err := cbs.OnChanOpenTry(s.chainB.GetContext(), channel.Ordering, channel.ConnectionHops, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel.Counterparty, path.EndpointA.ChannelConfig.Version, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) - addr, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, counterparty.PortId) - suite.Require().True(exists) - suite.Require().NotNil(addr) + addr, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, counterparty.PortId) + s.Require().True(exists) + s.Require().NotNil(addr) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Equal("", version) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Empty(version) } }) } @@ -223,27 +243,27 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenTry() { // Test initiating a ChanOpenAck using the host chain instead of the controller chain // ChainA is the controller chain. ChainB is the host chain -func (suite *InterchainAccountsTestSuite) TestChanOpenAck() { +func (s *InterchainAccountsTestSuite) TestChanOpenAck() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + s.SetupTest() // reset + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) // chainA maliciously sets channel to TRYOPEN channel := channeltypes.NewChannel(channeltypes.TRYOPEN, channeltypes.ORDERED, channeltypes.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, TestVersion) - suite.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) // commit state changes so proof can be created - suite.chainA.NextBlock() + s.chainA.NextBlock() err = path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // query proof from ChainA channelKey := host.ChannelKey(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) @@ -251,14 +271,14 @@ func (suite *InterchainAccountsTestSuite) TestChanOpenAck() { // use chainB (host) for ChanOpenAck msg := channeltypes.NewMsgChannelOpenAck(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelID, TestVersion, tryProof, proofHeight, icatypes.ModuleName) - handler := suite.chainB.GetSimApp().MsgServiceRouter().Handler(msg) - _, err = handler(suite.chainB.GetContext(), msg) + handler := s.chainB.GetSimApp().MsgServiceRouter().Handler(msg) + _, err = handler(s.chainB.GetContext(), msg) - suite.Require().Error(err) + s.Require().Error(err) } } -func (suite *InterchainAccountsTestSuite) TestOnChanOpenConfirm() { +func (s *InterchainAccountsTestSuite) TestOnChanOpenConfirm() { testCases := []struct { name string malleate func() @@ -270,7 +290,7 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenConfirm() { { "success: ICA auth module callback returns error", func() { // mock module callback should not be called on host side - suite.chainB.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenConfirm = func( + s.chainB.GetSimApp().ICAAuthModule.IBCApp.OnChanOpenConfirm = func( ctx sdk.Context, portID, channelID string, ) error { return errors.New("mock ica auth fails") @@ -279,38 +299,38 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenConfirm() { }, { "host submodule disabled", func() { - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), types.NewParams(false, []string{})) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), types.NewParams(false, []string{})) }, types.ErrHostSubModuleDisabled, }, } for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, ordering) + s.Run(tc.name, func() { + s.SetupTest() + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) - err = cbs.OnChanOpenConfirm(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) + err = cbs.OnChanOpenConfirm(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -318,28 +338,28 @@ func (suite *InterchainAccountsTestSuite) TestOnChanOpenConfirm() { } // OnChanCloseInit on host (chainB) -func (suite *InterchainAccountsTestSuite) TestOnChanCloseInit() { +func (s *InterchainAccountsTestSuite) TestOnChanCloseInit() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) err = cbs.OnChanCloseInit( - suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, + s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, ) - suite.Require().Error(err) + s.Require().Error(err) } } -func (suite *InterchainAccountsTestSuite) TestOnChanCloseConfirm() { +func (s *InterchainAccountsTestSuite) TestOnChanCloseConfirm() { var path *ibctesting.Path testCases := []struct { @@ -354,34 +374,34 @@ func (suite *InterchainAccountsTestSuite) TestOnChanCloseConfirm() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, ordering) + path = NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) err = cbs.OnChanCloseConfirm( - suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) + s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { +func (s *InterchainAccountsTestSuite) TestOnRecvPacket() { var packetData []byte testCases := []struct { name string @@ -394,13 +414,13 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { }, { "host submodule disabled", func() { - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), types.NewParams(false, []string{})) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), types.NewParams(false, []string{})) }, false, types.ErrHostSubModuleDisabled.Error(), }, { "success with ICA auth module callback failure", func() { - suite.chainB.GetSimApp().ICAAuthModule.IBCApp.OnRecvPacket = func( + s.chainB.GetSimApp().ICAAuthModule.IBCApp.OnRecvPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress, ) exported.Acknowledgement { return channeltypes.NewErrorAcknowledgement(errors.New("failed OnRecvPacket mock callback")) @@ -418,30 +438,30 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // send 100stake to interchain account wallet amount, _ := sdk.ParseCoinsNormalized("100stake") - interchainAccountAddr, _ := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - bankMsg := &banktypes.MsgSend{FromAddress: suite.chainB.SenderAccount.GetAddress().String(), ToAddress: interchainAccountAddr, Amount: amount} + interchainAccountAddr, _ := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + bankMsg := &banktypes.MsgSend{FromAddress: s.chainB.SenderAccount.GetAddress().String(), ToAddress: interchainAccountAddr, Amount: amount} - _, err = suite.chainB.SendMsgs(bankMsg) - suite.Require().NoError(err) + _, err = s.chainB.SendMsgs(bankMsg) + s.Require().NoError(err) // build packet data msg := &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: amount, } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -451,17 +471,17 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { // build expected ack protoAny, err := codectypes.NewAnyWithValue(&banktypes.MsgSendResponse{}) - suite.Require().NoError(err) + s.Require().NoError(err) expectedTxResponse, err := proto.Marshal(&sdk.TxMsgData{ MsgResponses: []*codectypes.Any{protoAny}, }) - suite.Require().NoError(err) + s.Require().NoError(err) expectedAck := channeltypes.NewResultAcknowledgement(expectedTxResponse) params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) // malleate packetData for test cases tc.malleate() @@ -471,10 +491,10 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { tc.malleate() - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) - ctx := suite.chainB.GetContext() + ctx := s.chainB.GetContext() ack := cbs.OnRecvPacket(ctx, path.EndpointB.GetChannel().Version, packet, nil) expectedAttributes := []sdk.Attribute{ @@ -484,8 +504,8 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { } if tc.expAckSuccess { - suite.Require().True(ack.Success()) - suite.Require().Equal(expectedAck, ack) + s.Require().True(ack.Success()) + s.Require().Equal(expectedAck, ack) expectedEvents := sdk.Events{ sdk.NewEvent( @@ -495,10 +515,9 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) - + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) } else { - suite.Require().False(ack.Success()) + s.Require().False(ack.Success()) expectedAttributes = append(expectedAttributes, sdk.NewAttribute(icatypes.AttributeKeyAckError, tc.eventErrorMsg)) expectedEvents := sdk.Events{ @@ -509,14 +528,14 @@ func (suite *InterchainAccountsTestSuite) TestOnRecvPacket() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) } }) } } } -func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { +func (s *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { testCases := []struct { name string malleate func() @@ -529,23 +548,23 @@ func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) packet := channeltypes.NewPacket( []byte("empty packet data"), - suite.chainA.SenderAccount.GetSequence(), + s.chainA.SenderAccount.GetSequence(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, @@ -554,19 +573,19 @@ func (suite *InterchainAccountsTestSuite) TestOnAcknowledgementPacket() { 0, ) - err = cbs.OnAcknowledgementPacket(suite.chainB.GetContext(), path.EndpointB.GetChannel().Version, packet, []byte("ackBytes"), nil) + err = cbs.OnAcknowledgementPacket(s.chainB.GetContext(), path.EndpointB.GetChannel().Version, packet, []byte("ackBytes"), nil) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { +func (s *InterchainAccountsTestSuite) TestOnTimeoutPacket() { testCases := []struct { name string malleate func() @@ -579,23 +598,23 @@ func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(path.EndpointB.ChannelConfig.PortID) + s.Require().True(ok) packet := channeltypes.NewPacket( []byte("empty packet data"), - suite.chainA.SenderAccount.GetSequence(), + s.chainA.SenderAccount.GetSequence(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, @@ -604,45 +623,45 @@ func (suite *InterchainAccountsTestSuite) TestOnTimeoutPacket() { 0, ) - err = cbs.OnTimeoutPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, nil) + err = cbs.OnTimeoutPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, nil) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *InterchainAccountsTestSuite) fundICAWallet(ctx sdk.Context, portID string, amount sdk.Coins) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(ctx, ibctesting.FirstConnectionID, portID) - suite.Require().True(found) +func (s *InterchainAccountsTestSuite) fundICAWallet(ctx sdk.Context, portID string, amount sdk.Coins) { + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(ctx, ibctesting.FirstConnectionID, portID) + s.Require().True(found) msgBankSend := &banktypes.MsgSend{ - FromAddress: suite.chainB.SenderAccount.GetAddress().String(), + FromAddress: s.chainB.SenderAccount.GetAddress().String(), ToAddress: interchainAccountAddr, Amount: amount, } - res, err := suite.chainB.SendMsgs(msgBankSend) - suite.Require().NotEmpty(res) - suite.Require().NoError(err) + res, err := s.chainB.SendMsgs(msgBankSend) + s.Require().NotEmpty(res) + s.Require().NoError(err) } // TestControlAccountAfterChannelClose tests that a controller chain can control a registered interchain account after the currently active channel for that interchain account has been closed. // A new channel will be opened for the controller portID. The interchain account address should remain unchanged. -func (suite *InterchainAccountsTestSuite) TestControlAccountAfterChannelClose() { +func (s *InterchainAccountsTestSuite) TestControlAccountAfterChannelClose() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // two sends will be performed, one after initial creation of the account and one after channel closure and reopening var ( @@ -653,18 +672,18 @@ func (suite *InterchainAccountsTestSuite) TestControlAccountAfterChannelClose() ) // check that the account is working as expected - suite.fundICAWallet(suite.chainB.GetContext(), path.EndpointA.ChannelConfig.PortID, startingBal) - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + s.fundICAWallet(s.chainB.GetContext(), path.EndpointA.ChannelConfig.PortID, startingBal) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: tokenAmt, } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, icatypes.EncodingProtobuf) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -672,24 +691,24 @@ func (suite *InterchainAccountsTestSuite) TestControlAccountAfterChannelClose() } params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) // nolint: staticcheck // SA1019: ibctesting.FirstConnectionID is deprecated: use path.EndpointA.ConnectionID instead. (staticcheck) - _, err = suite.chainA.GetSimApp().ICAControllerKeeper.SendTx(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, icaPacketData, ^uint64(0)) - suite.Require().NoError(err) + _, err = s.chainA.GetSimApp().ICAControllerKeeper.SendTx(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, icaPacketData, ^uint64(0)) + s.Require().NoError(err) err = path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // relay the packet packetRelay := channeltypes.NewPacket(icaPacketData.GetBytes(), 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.ZeroHeight(), ^uint64(0)) err = path.RelayPacket(packetRelay) - suite.Require().NoError(err) // relay committed + s.Require().NoError(err) // relay committed // check that the ica balance is updated icaAddr, err := sdk.AccAddressFromBech32(interchainAccountAddr) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.assertBalance(icaAddr, expBalAfterFirstSend) + s.assertBalance(icaAddr, expBalAfterFirstSend) // close the channel path.EndpointA.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) @@ -701,35 +720,35 @@ func (suite *InterchainAccountsTestSuite) TestControlAccountAfterChannelClose() path.CreateChannels() // nolint: staticcheck // SA1019: ibctesting.FirstConnectionID is deprecated: use path.EndpointA.ConnectionID instead. (staticcheck) - _, err = suite.chainA.GetSimApp().ICAControllerKeeper.SendTx(suite.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, icaPacketData, ^uint64(0)) - suite.Require().NoError(err) + _, err = s.chainA.GetSimApp().ICAControllerKeeper.SendTx(s.chainA.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, icaPacketData, ^uint64(0)) + s.Require().NoError(err) err = path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // relay the packet packetRelay = channeltypes.NewPacket(icaPacketData.GetBytes(), 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.ZeroHeight(), ^uint64(0)) err = path.RelayPacket(packetRelay) - suite.Require().NoError(err) // relay committed + s.Require().NoError(err) // relay committed - suite.assertBalance(icaAddr, expBalAfterSecondSend) + s.assertBalance(icaAddr, expBalAfterSecondSend) } } // assertBalance asserts that the provided address has exactly the expected balance. // CONTRACT: the expected balance must only contain one coin denom. -func (suite *InterchainAccountsTestSuite) assertBalance(addr sdk.AccAddress, expBalance sdk.Coins) { - balance := suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), addr, sdk.DefaultBondDenom) - suite.Require().Equal(expBalance[0], balance) +func (s *InterchainAccountsTestSuite) assertBalance(addr sdk.AccAddress, expBalance sdk.Coins) { + balance := s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), addr, sdk.DefaultBondDenom) + s.Require().Equal(expBalance[0], balance) } -func (suite *InterchainAccountsTestSuite) TestPacketDataUnmarshalerInterface() { +func (s *InterchainAccountsTestSuite) TestPacketDataUnmarshalerInterface() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() // reset + s.SetupTest() // reset - path := NewICAPath(suite.chainA, suite.chainB, ordering) + path := NewICAPath(s.chainA, s.chainB, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) expPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -738,18 +757,18 @@ func (suite *InterchainAccountsTestSuite) TestPacketDataUnmarshalerInterface() { } // Context, port identifier and channel identifier are unused for host. - icaHostModule := icahost.NewIBCModule(suite.chainA.GetSimApp().ICAHostKeeper) - packetData, version, err := icaHostModule.UnmarshalPacketData(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, expPacketData.GetBytes()) - suite.Require().NoError(err) - suite.Require().Equal(version, path.EndpointA.ChannelConfig.Version) - suite.Require().Equal(expPacketData, packetData) + icaHostModule := icahost.NewIBCModule(s.chainA.GetSimApp().ICAHostKeeper) + packetData, version, err := icaHostModule.UnmarshalPacketData(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, expPacketData.GetBytes()) + s.Require().NoError(err) + s.Require().Equal(version, path.EndpointA.ChannelConfig.Version) + s.Require().Equal(expPacketData, packetData) // test invalid packet data invalidPacketData := []byte("invalid packet data") // Context, port identifier and channel identifier are unused for host. - packetData, version, err = icaHostModule.UnmarshalPacketData(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, invalidPacketData) - suite.Require().Error(err) - suite.Require().Empty(version) - suite.Require().Nil(packetData) + packetData, version, err = icaHostModule.UnmarshalPacketData(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, invalidPacketData) + s.Require().Error(err) + s.Require().Empty(version) + s.Require().Nil(packetData) } } diff --git a/modules/apps/27-interchain-accounts/host/keeper/account.go b/modules/apps/27-interchain-accounts/host/keeper/account.go index b485a6cff45..5511e1241c7 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/account.go +++ b/modules/apps/27-interchain-accounts/host/keeper/account.go @@ -12,7 +12,7 @@ import ( // createInterchainAccount creates a new interchain account. An address is generated using the host connectionID, the controller portID, // and block dependent information. An error is returned if an account already exists for the generated account. // An interchain account type is set in the account keeper and the interchain account address mapping is updated. -func (k Keeper) createInterchainAccount(ctx sdk.Context, connectionID, controllerPortID string) (sdk.AccAddress, +func (k *Keeper) createInterchainAccount(ctx sdk.Context, connectionID, controllerPortID string) (sdk.AccAddress, error, ) { accAddress := icatypes.GenerateAddress(ctx, connectionID, controllerPortID) diff --git a/modules/apps/27-interchain-accounts/host/keeper/export_test.go b/modules/apps/27-interchain-accounts/host/keeper/export_test.go index 9e9aa7343c3..192692e1a52 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/export_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/export_test.go @@ -11,7 +11,7 @@ import ( ) // GetAppMetadata is a wrapper around getAppMetadata to allow the function to be directly called in tests. -func (k Keeper) GetAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { +func (k *Keeper) GetAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { return k.getAppMetadata(ctx, portID, channelID) } diff --git a/modules/apps/27-interchain-accounts/host/keeper/genesis.go b/modules/apps/27-interchain-accounts/host/keeper/genesis.go index 94a1fe283c4..d47e8fb87d9 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/genesis.go +++ b/modules/apps/27-interchain-accounts/host/keeper/genesis.go @@ -22,7 +22,7 @@ func InitGenesis(ctx sdk.Context, keeper Keeper, state genesistypes.HostGenesisS } if err := state.Params.Validate(); err != nil { - panic(fmt.Errorf("could not set ica host params at genesis: %v", err)) + panic(fmt.Errorf("could not set ica host params at genesis: %w", err)) } keeper.SetParams(ctx, state.Params) } diff --git a/modules/apps/27-interchain-accounts/host/keeper/genesis_test.go b/modules/apps/27-interchain-accounts/host/keeper/genesis_test.go index 91ac2c89c9f..41448fee2cb 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/genesis_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/genesis_test.go @@ -9,8 +9,8 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestInitGenesis() { - interchainAccAddr := icatypes.GenerateAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, TestPortID) +func (s *KeeperTestSuite) TestInitGenesis() { + interchainAccAddr := icatypes.GenerateAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, TestPortID) genesisState := genesistypes.HostGenesisState{ ActiveChannels: []genesistypes.ActiveChannel{ { @@ -29,25 +29,25 @@ func (suite *KeeperTestSuite) TestInitGenesis() { Port: icatypes.HostPortID, } - keeper.InitGenesis(suite.chainA.GetContext(), suite.chainA.GetSimApp().ICAHostKeeper, genesisState) + keeper.InitGenesis(s.chainA.GetContext(), *s.chainA.GetSimApp().ICAHostKeeper, genesisState) - channelID, found := suite.chainA.GetSimApp().ICAHostKeeper.GetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) - suite.Require().True(found) - suite.Require().Equal(ibctesting.FirstChannelID, channelID) + channelID, found := s.chainA.GetSimApp().ICAHostKeeper.GetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Require().True(found) + s.Require().Equal(ibctesting.FirstChannelID, channelID) - accountAdrr, found := suite.chainA.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) - suite.Require().True(found) - suite.Require().Equal(interchainAccAddr.String(), accountAdrr) + accountAdrr, found := s.chainA.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Require().True(found) + s.Require().Equal(interchainAccAddr.String(), accountAdrr) expParams := genesisState.GetParams() - params := suite.chainA.GetSimApp().ICAHostKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(expParams, params) + params := s.chainA.GetSimApp().ICAHostKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(expParams, params) - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(types.StoreKey)) - suite.Require().True(store.Has(icatypes.KeyPort(icatypes.HostPortID))) + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(types.StoreKey)) + s.Require().True(store.Has(icatypes.KeyPort(icatypes.HostPortID))) } -func (suite *KeeperTestSuite) TestGenesisParams() { +func (s *KeeperTestSuite) TestGenesisParams() { testCases := []struct { name string input types.Params @@ -61,9 +61,9 @@ func (suite *KeeperTestSuite) TestGenesisParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - interchainAccAddr := icatypes.GenerateAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Run(tc.name, func() { + s.SetupTest() // reset + interchainAccAddr := icatypes.GenerateAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, TestPortID) genesisState := genesistypes.HostGenesisState{ ActiveChannels: []genesistypes.ActiveChannel{ { @@ -83,52 +83,52 @@ func (suite *KeeperTestSuite) TestGenesisParams() { Params: tc.input, } if tc.expPanicMsg == "" { - keeper.InitGenesis(suite.chainA.GetContext(), suite.chainA.GetSimApp().ICAHostKeeper, genesisState) + keeper.InitGenesis(s.chainA.GetContext(), *s.chainA.GetSimApp().ICAHostKeeper, genesisState) - channelID, found := suite.chainA.GetSimApp().ICAHostKeeper.GetActiveChannelID(suite.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) - suite.Require().True(found) - suite.Require().Equal(ibctesting.FirstChannelID, channelID) + channelID, found := s.chainA.GetSimApp().ICAHostKeeper.GetActiveChannelID(s.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Require().True(found) + s.Require().Equal(ibctesting.FirstChannelID, channelID) - accountAdrr, found := suite.chainA.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) - suite.Require().True(found) - suite.Require().Equal(interchainAccAddr.String(), accountAdrr) + accountAdrr, found := s.chainA.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainA.GetContext(), ibctesting.FirstConnectionID, TestPortID) + s.Require().True(found) + s.Require().Equal(interchainAccAddr.String(), accountAdrr) expParams := tc.input - params := suite.chainA.GetSimApp().ICAHostKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(expParams, params) + params := s.chainA.GetSimApp().ICAHostKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(expParams, params) } else { - suite.PanicsWithError(tc.expPanicMsg, func() { - keeper.InitGenesis(suite.chainA.GetContext(), suite.chainA.GetSimApp().ICAHostKeeper, genesisState) + s.PanicsWithError(tc.expPanicMsg, func() { + keeper.InitGenesis(s.chainA.GetContext(), *s.chainA.GetSimApp().ICAHostKeeper, genesisState) }) } }) } } -func (suite *KeeperTestSuite) TestExportGenesis() { +func (s *KeeperTestSuite) TestExportGenesis() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path := NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - interchainAccAddr, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(exists) + interchainAccAddr, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(exists) - genesisState := keeper.ExportGenesis(suite.chainB.GetContext(), suite.chainB.GetSimApp().ICAHostKeeper) + genesisState := keeper.ExportGenesis(s.chainB.GetContext(), *s.chainB.GetSimApp().ICAHostKeeper) - suite.Require().Equal(path.EndpointB.ChannelID, genesisState.ActiveChannels[0].ChannelId) - suite.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.ActiveChannels[0].PortId) + s.Require().Equal(path.EndpointB.ChannelID, genesisState.ActiveChannels[0].ChannelId) + s.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.ActiveChannels[0].PortId) - suite.Require().Equal(interchainAccAddr, genesisState.InterchainAccounts[0].AccountAddress) - suite.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.InterchainAccounts[0].PortId) + s.Require().Equal(interchainAccAddr, genesisState.InterchainAccounts[0].AccountAddress) + s.Require().Equal(path.EndpointA.ChannelConfig.PortID, genesisState.InterchainAccounts[0].PortId) - suite.Require().Equal(icatypes.HostPortID, genesisState.GetPort()) + s.Require().Equal(icatypes.HostPortID, genesisState.GetPort()) expParams := types.DefaultParams() - suite.Require().Equal(expParams, genesisState.GetParams()) + s.Require().Equal(expParams, genesisState.GetParams()) } } diff --git a/modules/apps/27-interchain-accounts/host/keeper/grpc_query.go b/modules/apps/27-interchain-accounts/host/keeper/grpc_query.go index ddabeda2b2e..711fd8b5822 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/grpc_query.go +++ b/modules/apps/27-interchain-accounts/host/keeper/grpc_query.go @@ -11,7 +11,7 @@ import ( var _ types.QueryServer = (*Keeper)(nil) // Params implements the Query/Params gRPC method -func (k Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { +func (k *Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) params := k.GetParams(ctx) diff --git a/modules/apps/27-interchain-accounts/host/keeper/grpc_query_test.go b/modules/apps/27-interchain-accounts/host/keeper/grpc_query_test.go index 354de22487c..c6f3f66a1b3 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/grpc_query_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/grpc_query_test.go @@ -4,9 +4,9 @@ import ( "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" ) -func (suite *KeeperTestSuite) TestQueryParams() { - ctx := suite.chainA.GetContext() +func (s *KeeperTestSuite) TestQueryParams() { + ctx := s.chainA.GetContext() expParams := types.DefaultParams() - res, _ := suite.chainA.GetSimApp().ICAHostKeeper.Params(ctx, &types.QueryParamsRequest{}) - suite.Require().Equal(&expParams, res.Params) + res, _ := s.chainA.GetSimApp().ICAHostKeeper.Params(ctx, &types.QueryParamsRequest{}) + s.Require().Equal(&expParams, res.Params) } diff --git a/modules/apps/27-interchain-accounts/host/keeper/handshake.go b/modules/apps/27-interchain-accounts/host/keeper/handshake.go index 9b0fef255be..d268474e0b4 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/handshake.go +++ b/modules/apps/27-interchain-accounts/host/keeper/handshake.go @@ -15,7 +15,7 @@ import ( // and registers a new interchain account (if it doesn't exist). // The version returned will include the registered interchain // account address. -func (k Keeper) OnChanOpenTry( +func (k *Keeper) OnChanOpenTry( ctx sdk.Context, order channeltypes.Order, connectionHops []string, @@ -73,7 +73,6 @@ func (k Keeper) OnChanOpenTry( if _, ok := k.accountKeeper.GetAccount(ctx, accAddress).(*icatypes.InterchainAccount); !ok { return "", errorsmod.Wrapf(icatypes.ErrInvalidAccountReopening, "existing account address %s, does not have interchain account type", accAddress) } - } else { accAddress, err = k.createInterchainAccount(ctx, metadata.HostConnectionId, counterparty.PortId) if err != nil { @@ -92,7 +91,7 @@ func (k Keeper) OnChanOpenTry( } // OnChanOpenConfirm completes the handshake process by setting the active channel in state on the host chain -func (k Keeper) OnChanOpenConfirm( +func (k *Keeper) OnChanOpenConfirm( ctx sdk.Context, portID, channelID string, @@ -112,7 +111,7 @@ func (k Keeper) OnChanOpenConfirm( } // OnChanCloseConfirm removes the active channel stored in state -func (Keeper) OnChanCloseConfirm( +func (*Keeper) OnChanCloseConfirm( ctx sdk.Context, portID, channelID string, diff --git a/modules/apps/27-interchain-accounts/host/keeper/handshake_test.go b/modules/apps/27-interchain-accounts/host/keeper/handshake_test.go index 618f2a70e24..14f46bc431a 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/handshake_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/handshake_test.go @@ -13,29 +13,29 @@ import ( ) // open and close channel is a helper function for TestOnChanOpenTry for reopening accounts -func (suite *KeeperTestSuite) openAndCloseChannel(path *ibctesting.Path) { +func (s *KeeperTestSuite) openAndCloseChannel(path *ibctesting.Path) { err := path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenConfirm() - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) path.EndpointB.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) path.EndpointA.ChannelID = "" err = RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // bump channel sequence as these test mock core IBC behaviour on ChanOpenTry channelSequence := path.EndpointB.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextChannelSequence(path.EndpointB.Chain.GetContext()) path.EndpointB.ChannelID = channeltypes.FormatChannelIdentifier(channelSequence) } -func (suite *KeeperTestSuite) TestOnChanOpenTry() { +func (s *KeeperTestSuite) TestOnChanOpenTry() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( channel *channeltypes.Channel @@ -60,7 +60,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { // undo setup path.EndpointB.ChannelID = "" - suite.openAndCloseChannel(path) + s.openAndCloseChannel(path) }, nil, }, @@ -71,15 +71,15 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { // undo setup path.EndpointB.ChannelID = "" - suite.openAndCloseChannel(path) + s.openAndCloseChannel(path) // delete interchain account address - store := suite.chainB.GetContext().KVStore(suite.chainB.GetSimApp().GetKey(hosttypes.SubModuleName)) + store := s.chainB.GetContext().KVStore(s.chainB.GetSimApp().GetKey(hosttypes.SubModuleName)) store.Delete(icatypes.KeyOwnerAccount(path.EndpointA.ChannelConfig.PortID, path.EndpointB.ConnectionID)) // assert interchain account address mapping was deleted - _, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().False(found) + _, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().False(found) }, nil, }, @@ -89,7 +89,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { metadata.HostConnectionId = "" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.Version = string(versionBytes) }, @@ -99,14 +99,14 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { "success - previous metadata is different", func() { // set the active channelID in state - suite.chainB.GetSimApp().ICAHostKeeper.SetActiveChannelID(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointB.ChannelID) + s.chainB.GetSimApp().ICAHostKeeper.SetActiveChannelID(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointB.ChannelID) // set the previous encoding to be proto3json. // the new encoding is set to be protobuf in the test below. metadata.Encoding = icatypes.EncodingProto3JSON versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) channel.State = channeltypes.CLOSED channel.Version = string(versionBytes) @@ -129,14 +129,14 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { // undo setup path.EndpointB.ChannelID = "" - suite.openAndCloseChannel(path) + s.openAndCloseChannel(path) // delete existing account - addr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + addr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - acc := suite.chainB.GetSimApp().AccountKeeper.GetAccount(suite.chainB.GetContext(), sdk.MustAccAddressFromBech32(addr)) - suite.chainB.GetSimApp().AccountKeeper.RemoveAccount(suite.chainB.GetContext(), acc) + acc := s.chainB.GetSimApp().AccountKeeper.GetAccount(s.chainB.GetContext(), sdk.MustAccAddressFromBech32(addr)) + s.chainB.GetSimApp().AccountKeeper.RemoveAccount(s.chainB.GetContext(), acc) }, icatypes.ErrInvalidAccountReopening, }, @@ -147,29 +147,29 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { // undo setup path.EndpointB.ChannelID = "" - suite.openAndCloseChannel(path) + s.openAndCloseChannel(path) - addr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + addr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) accAddress := sdk.MustAccAddressFromBech32(addr) - acc := suite.chainB.GetSimApp().AccountKeeper.GetAccount(suite.chainB.GetContext(), accAddress) + acc := s.chainB.GetSimApp().AccountKeeper.GetAccount(s.chainB.GetContext(), accAddress) icaAcc, ok := acc.(*icatypes.InterchainAccount) - suite.Require().True(ok) + s.Require().True(ok) // overwrite existing account with only base account type, not intercahin account type - suite.chainB.GetSimApp().AccountKeeper.SetAccount(suite.chainB.GetContext(), icaAcc.BaseAccount) + s.chainB.GetSimApp().AccountKeeper.SetAccount(s.chainB.GetContext(), icaAcc.BaseAccount) }, icatypes.ErrInvalidAccountReopening, }, { "account already exists", func() { - interchainAccAddr := icatypes.GenerateAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - err := suite.chainB.GetSimApp().BankKeeper.SendCoins(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), interchainAccAddr, sdk.Coins{sdk.NewCoin("stake", sdkmath.NewInt(1))}) - suite.Require().NoError(err) - suite.Require().True(suite.chainB.GetSimApp().AccountKeeper.HasAccount(suite.chainB.GetContext(), interchainAccAddr)) + interchainAccAddr := icatypes.GenerateAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + err := s.chainB.GetSimApp().BankKeeper.SendCoins(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), interchainAccAddr, sdk.Coins{sdk.NewCoin("stake", sdkmath.NewInt(1))}) + s.Require().NoError(err) + s.Require().True(s.chainB.GetSimApp().AccountKeeper.HasAccount(s.chainB.GetContext(), interchainAccAddr)) }, icatypes.ErrAccountAlreadyExist, }, @@ -194,7 +194,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { metadata.Encoding = "invalid-encoding-format" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.Version = string(versionBytes) }, @@ -206,7 +206,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { metadata.TxType = "invalid-tx-types" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.Version = string(versionBytes) }, @@ -218,7 +218,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { metadata.ControllerConnectionId = ibctesting.InvalidID versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.Version = string(versionBytes) }, @@ -230,7 +230,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { metadata.Version = "invalid-version" versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointA.ChannelConfig.Version = string(versionBytes) }, @@ -241,24 +241,24 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { func() { // create a new channel and set it in state ch := channeltypes.NewChannel(channeltypes.OPEN, ordering, channeltypes.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointA.ConnectionID}, ibctesting.DefaultChannelVersion) - suite.chainB.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, ch) + s.chainB.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, ch) // set the active channelID in state - suite.chainB.GetSimApp().ICAHostKeeper.SetActiveChannelID(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointB.ChannelID) + s.chainB.GetSimApp().ICAHostKeeper.SetActiveChannelID(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID, path.EndpointB.ChannelID) }, icatypes.ErrActiveChannelAlreadySet, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path = NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // set the channel id on host channelSequence := path.EndpointB.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextChannelSequence(path.EndpointB.Chain.GetContext()) @@ -267,7 +267,7 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { // default values metadata = icatypes.NewMetadata(icatypes.Version, ibctesting.FirstConnectionID, ibctesting.FirstConnectionID, "", icatypes.EncodingProtobuf, icatypes.TxTypeSDKMultiMsg) versionBytes, err := icatypes.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) expectedMetadata := metadata @@ -282,38 +282,38 @@ func (suite *KeeperTestSuite) TestOnChanOpenTry() { tc.malleate() // malleate mutates test data - version, err := suite.chainB.GetSimApp().ICAHostKeeper.OnChanOpenTry(suite.chainB.GetContext(), channel.Ordering, channel.ConnectionHops, + version, err := s.chainB.GetSimApp().ICAHostKeeper.OnChanOpenTry(s.chainB.GetContext(), channel.Ordering, channel.ConnectionHops, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel.Counterparty, path.EndpointA.ChannelConfig.Version, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) - storedAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + storedAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) interchainAccAddr, err := sdk.AccAddressFromBech32(storedAddr) - suite.Require().NoError(err) + s.Require().NoError(err) // Check if account is created - interchainAccount := suite.chainB.GetSimApp().AccountKeeper.GetAccount(suite.chainB.GetContext(), interchainAccAddr) - suite.Require().Equal(interchainAccount.GetAddress().String(), storedAddr) + interchainAccount := s.chainB.GetSimApp().AccountKeeper.GetAccount(s.chainB.GetContext(), interchainAccAddr) + s.Require().Equal(interchainAccount.GetAddress().String(), storedAddr) expectedMetadata.Address = storedAddr expectedVersionBytes, err := icatypes.ModuleCdc.MarshalJSON(&expectedMetadata) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Equal(string(expectedVersionBytes), version) + s.Require().Equal(string(expectedVersionBytes), version) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Equal("", version) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Equal("", version) } }) } } } -func (suite *KeeperTestSuite) TestOnChanOpenConfirm() { +func (s *KeeperTestSuite) TestOnChanOpenConfirm() { var path *ibctesting.Path testCases := []struct { @@ -336,37 +336,37 @@ func (suite *KeeperTestSuite) TestOnChanOpenConfirm() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path = NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := RegisterInterchainAccount(path.EndpointA, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - err = suite.chainB.GetSimApp().ICAHostKeeper.OnChanOpenConfirm(suite.chainB.GetContext(), + err = s.chainB.GetSimApp().ICAHostKeeper.OnChanOpenConfirm(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } } -func (suite *KeeperTestSuite) TestOnChanCloseConfirm() { +func (s *KeeperTestSuite) TestOnChanCloseConfirm() { var path *ibctesting.Path testCases := []struct { @@ -381,24 +381,24 @@ func (suite *KeeperTestSuite) TestOnChanCloseConfirm() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path = NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() // malleate mutates test data - err = suite.chainB.GetSimApp().ICAHostKeeper.OnChanCloseConfirm(suite.chainB.GetContext(), + err = s.chainB.GetSimApp().ICAHostKeeper.OnChanCloseConfirm(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/apps/27-interchain-accounts/host/keeper/keeper.go b/modules/apps/27-interchain-accounts/host/keeper/keeper.go index 466206933cb..8f4df2f03f0 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/keeper.go +++ b/modules/apps/27-interchain-accounts/host/keeper/keeper.go @@ -31,9 +31,8 @@ import ( // Keeper defines the IBC interchain accounts host keeper type Keeper struct { - storeService corestore.KVStoreService - cdc codec.Codec - legacySubspace icatypes.ParamSubspace + storeService corestore.KVStoreService + cdc codec.Codec ics4Wrapper porttypes.ICS4Wrapper channelKeeper icatypes.ChannelKeeper @@ -52,10 +51,10 @@ type Keeper struct { // NewKeeper creates a new interchain accounts host Keeper instance func NewKeeper( - cdc codec.Codec, storeService corestore.KVStoreService, legacySubspace icatypes.ParamSubspace, - ics4Wrapper porttypes.ICS4Wrapper, channelKeeper icatypes.ChannelKeeper, + cdc codec.Codec, storeService corestore.KVStoreService, + channelKeeper icatypes.ChannelKeeper, accountKeeper icatypes.AccountKeeper, msgRouter icatypes.MessageRouter, queryRouter icatypes.QueryRouter, authority string, -) Keeper { +) *Keeper { // ensure ibc interchain accounts module account is set if addr := accountKeeper.GetModuleAddress(icatypes.ModuleName); addr == nil { panic(errors.New("the Interchain Accounts module account has not been set")) @@ -65,17 +64,18 @@ func NewKeeper( panic(errors.New("authority must be non-empty")) } - return Keeper{ - storeService: storeService, - cdc: cdc, - legacySubspace: legacySubspace, - ics4Wrapper: ics4Wrapper, - channelKeeper: channelKeeper, - accountKeeper: accountKeeper, - msgRouter: msgRouter, - queryRouter: queryRouter, - mqsAllowList: newModuleQuerySafeAllowList(), - authority: authority, + return &Keeper{ + storeService: storeService, + cdc: cdc, + // Defaults to using the channel keeper as the ICS4Wrapper + // This can be overridden later with WithICS4Wrapper (e.g. by the middleware stack wiring) + ics4Wrapper: channelKeeper, + channelKeeper: channelKeeper, + accountKeeper: accountKeeper, + msgRouter: msgRouter, + queryRouter: queryRouter, + mqsAllowList: newModuleQuerySafeAllowList(), + authority: authority, } } @@ -87,17 +87,17 @@ func (k *Keeper) WithICS4Wrapper(wrapper porttypes.ICS4Wrapper) { } // GetICS4Wrapper returns the ICS4Wrapper. -func (k Keeper) GetICS4Wrapper() porttypes.ICS4Wrapper { +func (k *Keeper) GetICS4Wrapper() porttypes.ICS4Wrapper { return k.ics4Wrapper } // Logger returns the application logger, scoped to the associated module -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s-%s", exported.ModuleName, icatypes.ModuleName)) } // setPort sets the provided portID in state. -func (k Keeper) setPort(ctx sdk.Context, portID string) { +func (k *Keeper) setPort(ctx sdk.Context, portID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyPort(portID), []byte{0x01}); err != nil { panic(err) @@ -105,12 +105,12 @@ func (k Keeper) setPort(ctx sdk.Context, portID string) { } // GetAppVersion calls the ICS4Wrapper GetAppVersion function. -func (k Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { +func (k *Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) } // getAppMetadata retrieves the interchain accounts channel metadata from the store associated with the provided portID and channelID -func (k Keeper) getAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { +func (k *Keeper) getAppMetadata(ctx sdk.Context, portID, channelID string) (icatypes.Metadata, error) { appVersion, found := k.GetAppVersion(ctx, portID, channelID) if !found { return icatypes.Metadata{}, errorsmod.Wrapf(ibcerrors.ErrNotFound, "app version not found for port %s and channel %s", portID, channelID) @@ -120,7 +120,7 @@ func (k Keeper) getAppMetadata(ctx sdk.Context, portID, channelID string) (icaty } // GetActiveChannelID retrieves the active channelID from the store keyed by the provided connectionID and portID -func (k Keeper) GetActiveChannelID(ctx sdk.Context, connectionID, portID string) (string, bool) { +func (k *Keeper) GetActiveChannelID(ctx sdk.Context, connectionID, portID string) (string, bool) { store := k.storeService.OpenKVStore(ctx) key := icatypes.KeyActiveChannel(portID, connectionID) @@ -136,7 +136,7 @@ func (k Keeper) GetActiveChannelID(ctx sdk.Context, connectionID, portID string) } // GetOpenActiveChannel retrieves the active channelID from the store, keyed by the provided connectionID and portID & checks if the channel in question is in state OPEN -func (k Keeper) GetOpenActiveChannel(ctx sdk.Context, connectionID, portID string) (string, bool) { +func (k *Keeper) GetOpenActiveChannel(ctx sdk.Context, connectionID, portID string) (string, bool) { channelID, found := k.GetActiveChannelID(ctx, connectionID, portID) if !found { return "", false @@ -152,7 +152,7 @@ func (k Keeper) GetOpenActiveChannel(ctx sdk.Context, connectionID, portID strin } // GetAllActiveChannels returns a list of all active interchain accounts host channels and their associated connection and port identifiers -func (k Keeper) GetAllActiveChannels(ctx sdk.Context) []genesistypes.ActiveChannel { +func (k *Keeper) GetAllActiveChannels(ctx sdk.Context) []genesistypes.ActiveChannel { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(icatypes.ActiveChannelKeyPrefix)) defer sdk.LogDeferred(k.Logger(ctx), func() error { return iterator.Close() }) @@ -174,7 +174,7 @@ func (k Keeper) GetAllActiveChannels(ctx sdk.Context) []genesistypes.ActiveChann } // SetActiveChannelID stores the active channelID, keyed by the provided connectionID and portID -func (k Keeper) SetActiveChannelID(ctx sdk.Context, connectionID, portID, channelID string) { +func (k *Keeper) SetActiveChannelID(ctx sdk.Context, connectionID, portID, channelID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyActiveChannel(portID, connectionID), []byte(channelID)); err != nil { panic(err) @@ -182,13 +182,13 @@ func (k Keeper) SetActiveChannelID(ctx sdk.Context, connectionID, portID, channe } // IsActiveChannel returns true if there exists an active channel for the provided connectionID and portID, otherwise false -func (k Keeper) IsActiveChannel(ctx sdk.Context, connectionID, portID string) bool { +func (k *Keeper) IsActiveChannel(ctx sdk.Context, connectionID, portID string) bool { _, ok := k.GetActiveChannelID(ctx, connectionID, portID) return ok } // GetInterchainAccountAddress retrieves the InterchainAccount address from the store associated with the provided connectionID and portID -func (k Keeper) GetInterchainAccountAddress(ctx sdk.Context, connectionID, portID string) (string, bool) { +func (k *Keeper) GetInterchainAccountAddress(ctx sdk.Context, connectionID, portID string) (string, bool) { store := k.storeService.OpenKVStore(ctx) key := icatypes.KeyOwnerAccount(portID, connectionID) @@ -204,7 +204,7 @@ func (k Keeper) GetInterchainAccountAddress(ctx sdk.Context, connectionID, portI } // GetAllInterchainAccounts returns a list of all registered interchain account addresses and their associated connection and controller port identifiers -func (k Keeper) GetAllInterchainAccounts(ctx sdk.Context) []genesistypes.RegisteredInterchainAccount { +func (k *Keeper) GetAllInterchainAccounts(ctx sdk.Context) []genesistypes.RegisteredInterchainAccount { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(icatypes.OwnerKeyPrefix)) @@ -225,7 +225,7 @@ func (k Keeper) GetAllInterchainAccounts(ctx sdk.Context) []genesistypes.Registe } // SetInterchainAccountAddress stores the InterchainAccount address, keyed by the associated connectionID and portID -func (k Keeper) SetInterchainAccountAddress(ctx sdk.Context, connectionID, portID, address string) { +func (k *Keeper) SetInterchainAccountAddress(ctx sdk.Context, connectionID, portID, address string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(icatypes.KeyOwnerAccount(portID, connectionID), []byte(address)); err != nil { panic(err) @@ -233,12 +233,12 @@ func (k Keeper) SetInterchainAccountAddress(ctx sdk.Context, connectionID, portI } // GetAuthority returns the 27-interchain-accounts host submodule's authority. -func (k Keeper) GetAuthority() string { +func (k *Keeper) GetAuthority() string { return k.authority } // GetParams returns the total set of the host submodule parameters. -func (k Keeper) GetParams(ctx sdk.Context) types.Params { +func (k *Keeper) GetParams(ctx sdk.Context) types.Params { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get([]byte(types.ParamsKey)) if err != nil { @@ -254,7 +254,7 @@ func (k Keeper) GetParams(ctx sdk.Context) types.Params { } // SetParams sets the total set of the host submodule parameters. -func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { +func (k *Keeper) SetParams(ctx sdk.Context, params types.Params) { store := k.storeService.OpenKVStore(ctx) bz := k.cdc.MustMarshal(¶ms) if err := store.Set([]byte(types.ParamsKey), bz); err != nil { diff --git a/modules/apps/27-interchain-accounts/host/keeper/keeper_test.go b/modules/apps/27-interchain-accounts/host/keeper/keeper_test.go index e1442f5dede..df670a38ac9 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/keeper_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/keeper_test.go @@ -56,11 +56,11 @@ type KeeperTestSuite struct { chainC *ibctesting.TestChain } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } func NewICAPath(chainA, chainB *ibctesting.TestChain, encoding string, ordering channeltypes.Order) *ibctesting.Path { @@ -132,7 +132,7 @@ func TestKeeperTestSuite(t *testing.T) { testifysuite.Run(t, new(KeeperTestSuite)) } -func (suite *KeeperTestSuite) TestNewKeeper() { +func (s *KeeperTestSuite) TestNewKeeper() { testCases := []struct { name string instantiateFn func() @@ -140,56 +140,49 @@ func (suite *KeeperTestSuite) TestNewKeeper() { }{ {"success", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.SubModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().AccountKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().GRPCQueryRouter(), - suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().AccountKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().GRPCQueryRouter(), + s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), ) }, ""}, {"failure: interchain accounts module account does not exist", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.SubModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, authkeeper.AccountKeeper{}, // empty account keeper - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().GRPCQueryRouter(), - suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), + s.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().GRPCQueryRouter(), + s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), ) }, "the Interchain Accounts module account has not been set"}, {"failure: empty mock staking keeper", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.SubModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().AccountKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().GRPCQueryRouter(), + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().AccountKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().GRPCQueryRouter(), "", // authority ) }, "authority must be non-empty"}, } for _, tc := range testCases { + s.SetupTest() - suite.SetupTest() - - suite.Run(tc.name, func() { + s.Run(tc.name, func() { if tc.panicMsg == "" { - suite.Require().NotPanics( + s.Require().NotPanics( tc.instantiateFn, ) } else { - suite.Require().PanicsWithError( + s.Require().PanicsWithError( tc.panicMsg, tc.instantiateFn, ) @@ -198,67 +191,67 @@ func (suite *KeeperTestSuite) TestNewKeeper() { } } -func (suite *KeeperTestSuite) TestNewModuleQuerySafeAllowList() { +func (s *KeeperTestSuite) TestNewModuleQuerySafeAllowList() { // Currently, all queries in bank, staking, auth, and circuit are marked safe // Notably, the gov and distribution modules are not marked safe var allowList []string - suite.Require().NotPanics(func() { + s.Require().NotPanics(func() { allowList = keeper.NewModuleQuerySafeAllowList() }) - suite.Require().NotEmpty(allowList) - suite.Require().Contains(allowList, "/cosmos.bank.v1beta1.Query/Balance") - suite.Require().Contains(allowList, "/cosmos.bank.v1beta1.Query/AllBalances") - suite.Require().Contains(allowList, "/cosmos.staking.v1beta1.Query/Validator") - suite.Require().Contains(allowList, "/cosmos.staking.v1beta1.Query/Validators") - suite.Require().Contains(allowList, "/cosmos.auth.v1beta1.Query/Accounts") - suite.Require().Contains(allowList, "/cosmos.auth.v1beta1.Query/ModuleAccountByName") - suite.Require().Contains(allowList, "/ibc.core.client.v1.Query/VerifyMembership") - suite.Require().NotContains(allowList, "/cosmos.gov.v1beta1.Query/Proposals") - suite.Require().NotContains(allowList, "/cosmos.gov.v1.Query/Proposals") - suite.Require().NotContains(allowList, "/cosmos.distribution.v1beta1.Query/Params") - suite.Require().NotContains(allowList, "/cosmos.distribution.v1beta1.Query/DelegationRewards") + s.Require().NotEmpty(allowList) + s.Require().Contains(allowList, "/cosmos.bank.v1beta1.Query/Balance") + s.Require().Contains(allowList, "/cosmos.bank.v1beta1.Query/AllBalances") + s.Require().Contains(allowList, "/cosmos.staking.v1beta1.Query/Validator") + s.Require().Contains(allowList, "/cosmos.staking.v1beta1.Query/Validators") + s.Require().Contains(allowList, "/cosmos.auth.v1beta1.Query/Accounts") + s.Require().Contains(allowList, "/cosmos.auth.v1beta1.Query/ModuleAccountByName") + s.Require().Contains(allowList, "/ibc.core.client.v1.Query/VerifyMembership") + s.Require().NotContains(allowList, "/cosmos.gov.v1beta1.Query/Proposals") + s.Require().NotContains(allowList, "/cosmos.gov.v1.Query/Proposals") + s.Require().NotContains(allowList, "/cosmos.distribution.v1beta1.Query/Params") + s.Require().NotContains(allowList, "/cosmos.distribution.v1beta1.Query/DelegationRewards") } -func (suite *KeeperTestSuite) TestGetInterchainAccountAddress() { +func (s *KeeperTestSuite) TestGetInterchainAccountAddress() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path := NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) counterpartyPortID := path.EndpointA.ChannelConfig.PortID - retrievedAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, counterpartyPortID) - suite.Require().True(found) - suite.Require().NotEmpty(retrievedAddr) + retrievedAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, counterpartyPortID) + s.Require().True(found) + s.Require().NotEmpty(retrievedAddr) - retrievedAddr, found = suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, "invalid port") - suite.Require().False(found) - suite.Require().Empty(retrievedAddr) + retrievedAddr, found = s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, "invalid port") + s.Require().False(found) + s.Require().Empty(retrievedAddr) } } -func (suite *KeeperTestSuite) TestGetAllActiveChannels() { +func (s *KeeperTestSuite) TestGetAllActiveChannels() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( expectedChannelID = "test-channel" expectedPortID = "test-port" ) - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path := NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.chainB.GetSimApp().ICAHostKeeper.SetActiveChannelID(suite.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedChannelID) + s.chainB.GetSimApp().ICAHostKeeper.SetActiveChannelID(s.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedChannelID) expectedChannels := []genesistypes.ActiveChannel{ { @@ -273,31 +266,31 @@ func (suite *KeeperTestSuite) TestGetAllActiveChannels() { }, } - activeChannels := suite.chainB.GetSimApp().ICAHostKeeper.GetAllActiveChannels(suite.chainB.GetContext()) - suite.Require().Len(activeChannels, len(expectedChannels)) - suite.Require().Equal(expectedChannels, activeChannels) + activeChannels := s.chainB.GetSimApp().ICAHostKeeper.GetAllActiveChannels(s.chainB.GetContext()) + s.Require().Len(activeChannels, len(expectedChannels)) + s.Require().Equal(expectedChannels, activeChannels) } } -func (suite *KeeperTestSuite) TestGetAllInterchainAccounts() { +func (s *KeeperTestSuite) TestGetAllInterchainAccounts() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { var ( expectedAccAddr = "test-acc-addr" expectedPortID = "test-port" ) - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path := NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - interchainAccAddr, exists := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(exists) + interchainAccAddr, exists := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), path.EndpointB.ConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(exists) - suite.chainB.GetSimApp().ICAHostKeeper.SetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) + s.chainB.GetSimApp().ICAHostKeeper.SetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) expectedAccounts := []genesistypes.RegisteredInterchainAccount{ { @@ -312,56 +305,56 @@ func (suite *KeeperTestSuite) TestGetAllInterchainAccounts() { }, } - interchainAccounts := suite.chainB.GetSimApp().ICAHostKeeper.GetAllInterchainAccounts(suite.chainB.GetContext()) - suite.Require().Len(interchainAccounts, len(expectedAccounts)) - suite.Require().Equal(expectedAccounts, interchainAccounts) + interchainAccounts := s.chainB.GetSimApp().ICAHostKeeper.GetAllInterchainAccounts(s.chainB.GetContext()) + s.Require().Len(interchainAccounts, len(expectedAccounts)) + s.Require().Equal(expectedAccounts, interchainAccounts) } } -func (suite *KeeperTestSuite) TestIsActiveChannel() { +func (s *KeeperTestSuite) TestIsActiveChannel() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { - suite.SetupTest() + s.SetupTest() - path := NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProtobuf, ordering) + path := NewICAPath(s.chainA, s.chainB, icatypes.EncodingProtobuf, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) - isActive := suite.chainB.GetSimApp().ICAHostKeeper.IsActiveChannel(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(isActive) + isActive := s.chainB.GetSimApp().ICAHostKeeper.IsActiveChannel(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(isActive) } } -func (suite *KeeperTestSuite) TestSetInterchainAccountAddress() { +func (s *KeeperTestSuite) TestSetInterchainAccountAddress() { var ( expectedAccAddr = "test-acc-addr" expectedPortID = "test-port" ) - suite.chainB.GetSimApp().ICAHostKeeper.SetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) + s.chainB.GetSimApp().ICAHostKeeper.SetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID, expectedAccAddr) - retrievedAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID) - suite.Require().True(found) - suite.Require().Equal(expectedAccAddr, retrievedAddr) + retrievedAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, expectedPortID) + s.Require().True(found) + s.Require().Equal(expectedAccAddr, retrievedAddr) } -func (suite *KeeperTestSuite) TestMetadataNotFound() { +func (s *KeeperTestSuite) TestMetadataNotFound() { var ( invalidPortID = "invalid-port" invalidChannelID = "invalid-channel" ) - _, err := suite.chainB.GetSimApp().ICAHostKeeper.GetAppMetadata(suite.chainB.GetContext(), invalidPortID, invalidChannelID) - suite.Require().ErrorIs(err, ibcerrors.ErrNotFound) - suite.Require().Contains(err.Error(), fmt.Sprintf("app version not found for port %s and channel %s", invalidPortID, invalidChannelID)) + _, err := s.chainB.GetSimApp().ICAHostKeeper.GetAppMetadata(s.chainB.GetContext(), invalidPortID, invalidChannelID) + s.Require().ErrorIs(err, ibcerrors.ErrNotFound) + s.Require().Contains(err.Error(), fmt.Sprintf("app version not found for port %s and channel %s", invalidPortID, invalidChannelID)) } -func (suite *KeeperTestSuite) TestParams() { +func (s *KeeperTestSuite) TestParams() { expParams := types.DefaultParams() - params := suite.chainA.GetSimApp().ICAHostKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(expParams, params) + params := s.chainA.GetSimApp().ICAHostKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(expParams, params) testCases := []struct { name string @@ -376,46 +369,46 @@ func (suite *KeeperTestSuite) TestParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(tc.name, func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() err := tc.input.Validate() - suite.chainA.GetSimApp().ICAHostKeeper.SetParams(ctx, tc.input) + s.chainA.GetSimApp().ICAHostKeeper.SetParams(ctx, tc.input) if tc.errMsg == "" { - suite.Require().NoError(err) + s.Require().NoError(err) expected := tc.input - p := suite.chainA.GetSimApp().ICAHostKeeper.GetParams(ctx) - suite.Require().Equal(expected, p) + p := s.chainA.GetSimApp().ICAHostKeeper.GetParams(ctx) + s.Require().Equal(expected, p) } else { - suite.Require().ErrorContains(err, tc.errMsg) + s.Require().ErrorContains(err, tc.errMsg) } }) } } -func (suite *KeeperTestSuite) TestUnsetParams() { - suite.SetupTest() - ctx := suite.chainA.GetContext() - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(types.SubModuleName)) +func (s *KeeperTestSuite) TestUnsetParams() { + s.SetupTest() + ctx := s.chainA.GetContext() + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(types.SubModuleName)) store.Delete([]byte(types.ParamsKey)) - suite.Require().Panics(func() { - suite.chainA.GetSimApp().ICAHostKeeper.GetParams(ctx) + s.Require().Panics(func() { + s.chainA.GetSimApp().ICAHostKeeper.GetParams(ctx) }) } -func (suite *KeeperTestSuite) TestWithICS4Wrapper() { - suite.SetupTest() +func (s *KeeperTestSuite) TestWithICS4Wrapper() { + s.SetupTest() // test if the ics4 wrapper is the channel keeper initially - ics4Wrapper := suite.chainA.GetSimApp().ICAHostKeeper.GetICS4Wrapper() + ics4Wrapper := s.chainA.GetSimApp().ICAHostKeeper.GetICS4Wrapper() _, isChannelKeeper := ics4Wrapper.(*channelkeeper.Keeper) - suite.Require().True(isChannelKeeper) - suite.Require().IsType((*channelkeeper.Keeper)(nil), ics4Wrapper) + s.Require().True(isChannelKeeper) + s.Require().IsType((*channelkeeper.Keeper)(nil), ics4Wrapper) // set the ics4 wrapper to the channel keeper - suite.chainA.GetSimApp().ICAHostKeeper.WithICS4Wrapper(nil) - ics4Wrapper = suite.chainA.GetSimApp().ICAHostKeeper.GetICS4Wrapper() - suite.Require().Nil(ics4Wrapper) + s.chainA.GetSimApp().ICAHostKeeper.WithICS4Wrapper(nil) + ics4Wrapper = s.chainA.GetSimApp().ICAHostKeeper.GetICS4Wrapper() + s.Require().Nil(ics4Wrapper) } diff --git a/modules/apps/27-interchain-accounts/host/keeper/migrations.go b/modules/apps/27-interchain-accounts/host/keeper/migrations.go deleted file mode 100644 index 9798119ce00..00000000000 --- a/modules/apps/27-interchain-accounts/host/keeper/migrations.go +++ /dev/null @@ -1,35 +0,0 @@ -package keeper - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" -) - -// Migrator is a struct for handling in-place state migrations. -type Migrator struct { - keeper *Keeper -} - -// NewMigrator returns Migrator instance for the state migration. -func NewMigrator(k *Keeper) Migrator { - return Migrator{ - keeper: k, - } -} - -// MigrateParams migrates the host submodule's parameters from the x/params to self store. -func (m Migrator) MigrateParams(ctx sdk.Context) error { - if m.keeper != nil { - params := types.DefaultParams() - if m.keeper.legacySubspace != nil { - m.keeper.legacySubspace.GetParamSetIfExists(ctx, ¶ms) - } - if err := params.Validate(); err != nil { - return err - } - m.keeper.SetParams(ctx, params) - m.keeper.Logger(ctx).Info("successfully migrated ica/host submodule to self-manage params") - } - return nil -} diff --git a/modules/apps/27-interchain-accounts/host/keeper/migrations_test.go b/modules/apps/27-interchain-accounts/host/keeper/migrations_test.go deleted file mode 100644 index 22810abfcbd..00000000000 --- a/modules/apps/27-interchain-accounts/host/keeper/migrations_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package keeper_test - -import ( - "fmt" - - "github.com/cosmos/cosmos-sdk/runtime" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - - icahostkeeper "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/keeper" - icahosttypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" -) - -func (suite *KeeperTestSuite) TestMigratorMigrateParams() { - testCases := []struct { - msg string - malleate func() - expectedParams icahosttypes.Params - }{ - { - "success: default params", - func() { - params := icahosttypes.DefaultParams() - subspace := suite.chainA.GetSimApp().GetSubspace(icahosttypes.SubModuleName) // get subspace - subspace.SetParamSet(suite.chainA.GetContext(), ¶ms) // set params - }, - icahosttypes.DefaultParams(), - }, - { - "success: no legacy params pre-migration", - func() { - suite.chainA.GetSimApp().ICAHostKeeper = icahostkeeper.NewKeeper( - suite.chainA.Codec, - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(icahosttypes.StoreKey)), - nil, // assign a nil legacy param subspace - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().AccountKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().GRPCQueryRouter(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - }, - icahosttypes.DefaultParams(), - }, - } - - for _, tc := range testCases { - suite.Run(fmt.Sprintf("case %s", tc.msg), func() { - suite.SetupTest() // reset - - tc.malleate() // explicitly set params - - migrator := icahostkeeper.NewMigrator(&suite.chainA.GetSimApp().ICAHostKeeper) - err := migrator.MigrateParams(suite.chainA.GetContext()) - suite.Require().NoError(err) - - params := suite.chainA.GetSimApp().ICAHostKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(tc.expectedParams, params) - }) - } -} diff --git a/modules/apps/27-interchain-accounts/host/keeper/msg_server_test.go b/modules/apps/27-interchain-accounts/host/keeper/msg_server_test.go index 5fa7cfaf476..903d835a365 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/msg_server_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/msg_server_test.go @@ -11,7 +11,7 @@ import ( ibcerrors "github.com/cosmos/ibc-go/v10/modules/core/errors" ) -func (suite *KeeperTestSuite) TestModuleQuerySafe() { +func (s *KeeperTestSuite) TestModuleQuerySafe() { var ( msg *types.MsgModuleQuerySafe expResponses [][]byte @@ -24,21 +24,21 @@ func (suite *KeeperTestSuite) TestModuleQuerySafe() { { "success", func() { - balanceQueryBz, err := banktypes.NewQueryBalanceRequest(suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() - suite.Require().NoError(err) + balanceQueryBz, err := banktypes.NewQueryBalanceRequest(s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() + s.Require().NoError(err) queryReq := types.QueryRequest{ Path: "/cosmos.bank.v1beta1.Query/Balance", Data: balanceQueryBz, } - msg = types.NewMsgModuleQuerySafe(suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq}) + msg = types.NewMsgModuleQuerySafe(s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq}) - balance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) + balance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) expResp := banktypes.QueryBalanceResponse{Balance: &balance} expRespBz, err := expResp.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) expResponses = [][]byte{expRespBz} }, @@ -47,8 +47,8 @@ func (suite *KeeperTestSuite) TestModuleQuerySafe() { { "success: multiple queries", func() { - balanceQueryBz, err := banktypes.NewQueryBalanceRequest(suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() - suite.Require().NoError(err) + balanceQueryBz, err := banktypes.NewQueryBalanceRequest(s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() + s.Require().NoError(err) queryReq := types.QueryRequest{ Path: "/cosmos.bank.v1beta1.Query/Balance", @@ -57,26 +57,26 @@ func (suite *KeeperTestSuite) TestModuleQuerySafe() { paramsQuery := stakingtypes.QueryParamsRequest{} paramsQueryBz, err := paramsQuery.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) paramsQueryReq := types.QueryRequest{ Path: "/cosmos.staking.v1beta1.Query/Params", Data: paramsQueryBz, } - msg = types.NewMsgModuleQuerySafe(suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq, paramsQueryReq}) + msg = types.NewMsgModuleQuerySafe(s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq, paramsQueryReq}) - balance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) + balance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) expResp := banktypes.QueryBalanceResponse{Balance: &balance} expRespBz, err := expResp.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) - params, err := suite.chainA.GetSimApp().StakingKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().NoError(err) + params, err := s.chainA.GetSimApp().StakingKeeper.GetParams(s.chainA.GetContext()) + s.Require().NoError(err) expParamsResp := stakingtypes.QueryParamsResponse{Params: params} expParamsRespBz, err := expParamsResp.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) expResponses = [][]byte{expRespBz, expParamsRespBz} }, @@ -85,8 +85,8 @@ func (suite *KeeperTestSuite) TestModuleQuerySafe() { { "failure: not module query safe", func() { - balanceQueryBz, err := banktypes.NewQueryBalanceRequest(suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() - suite.Require().NoError(err) + balanceQueryBz, err := banktypes.NewQueryBalanceRequest(s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() + s.Require().NoError(err) queryReq := types.QueryRequest{ Path: "/cosmos.bank.v1beta1.Query/Balance", @@ -95,37 +95,37 @@ func (suite *KeeperTestSuite) TestModuleQuerySafe() { paramsQuery := transfertypes.QueryParamsRequest{} paramsQueryBz, err := paramsQuery.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) paramsQueryReq := types.QueryRequest{ Path: "/ibc.applications.transfer.v1.Query/Params", Data: paramsQueryBz, } - msg = types.NewMsgModuleQuerySafe(suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq, paramsQueryReq}) + msg = types.NewMsgModuleQuerySafe(s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq, paramsQueryReq}) }, ibcerrors.ErrInvalidRequest, }, { "failure: invalid query path", func() { - balanceQueryBz, err := banktypes.NewQueryBalanceRequest(suite.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() - suite.Require().NoError(err) + balanceQueryBz, err := banktypes.NewQueryBalanceRequest(s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom).Marshal() + s.Require().NoError(err) queryReq := types.QueryRequest{ Path: "/cosmos.invalid.Query/Invalid", Data: balanceQueryBz, } - msg = types.NewMsgModuleQuerySafe(suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq}) + msg = types.NewMsgModuleQuerySafe(s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), []types.QueryRequest{queryReq}) }, ibcerrors.ErrInvalidRequest, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() // reset msg = nil @@ -133,24 +133,24 @@ func (suite *KeeperTestSuite) TestModuleQuerySafe() { tc.malleate() - ctx := suite.chainA.GetContext() - msgServer := keeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAHostKeeper) + ctx := s.chainA.GetContext() + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAHostKeeper) res, err := msgServer.ModuleQuerySafe(ctx, msg) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) - suite.Require().ElementsMatch(expResponses, res.Responses) + s.Require().ElementsMatch(expResponses, res.Responses) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Nil(res) } }) } } -func (suite *KeeperTestSuite) TestUpdateParams() { +func (s *KeeperTestSuite) TestUpdateParams() { testCases := []struct { name string msg *types.MsgUpdateParams @@ -158,7 +158,7 @@ func (suite *KeeperTestSuite) TestUpdateParams() { }{ { "success", - types.NewMsgUpdateParams(suite.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), types.DefaultParams()), + types.NewMsgUpdateParams(s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), types.DefaultParams()), nil, }, { @@ -169,19 +169,19 @@ func (suite *KeeperTestSuite) TestUpdateParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - ctx := suite.chainA.GetContext() - msgServer := keeper.NewMsgServerImpl(&suite.chainA.GetSimApp().ICAHostKeeper) + ctx := s.chainA.GetContext() + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().ICAHostKeeper) res, err := msgServer.UpdateParams(ctx, tc.msg) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Nil(res) } }) } diff --git a/modules/apps/27-interchain-accounts/host/keeper/relay.go b/modules/apps/27-interchain-accounts/host/keeper/relay.go index bca17ffc342..619fd381ae1 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/relay.go +++ b/modules/apps/27-interchain-accounts/host/keeper/relay.go @@ -16,7 +16,7 @@ import ( // OnRecvPacket handles a given interchain accounts packet on a destination host chain. // If the transaction is successfully executed, the transaction response bytes will be returned. -func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet) ([]byte, error) { +func (k *Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet) ([]byte, error) { var data icatypes.InterchainAccountPacketData err := data.UnmarshalJSON(packet.GetData()) if err != nil { @@ -50,7 +50,7 @@ func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet) ([]byt // If authentication succeeds, it does basic validation of the messages before attempting to deliver each message // into state. The state changes will only be committed if all messages in the transaction succeed. Thus the // execution of the transaction is atomic, all state changes are reverted if a single message fails. -func (k Keeper) executeTx(ctx sdk.Context, sourcePort, destPort, destChannel string, msgs []sdk.Msg) ([]byte, error) { +func (k *Keeper) executeTx(ctx sdk.Context, sourcePort, destPort, destChannel string, msgs []sdk.Msg) ([]byte, error) { channel, found := k.channelKeeper.GetChannel(ctx, destPort, destChannel) if !found { return nil, channeltypes.ErrChannelNotFound @@ -94,7 +94,7 @@ func (k Keeper) executeTx(ctx sdk.Context, sourcePort, destPort, destChannel str // authenticateTx ensures the provided msgs contain the correct interchain account signer address retrieved // from state using the provided controller port identifier -func (k Keeper) authenticateTx(ctx sdk.Context, msgs []sdk.Msg, connectionID, portID string) error { +func (k *Keeper) authenticateTx(ctx sdk.Context, msgs []sdk.Msg, connectionID, portID string) error { interchainAccountAddr, found := k.GetInterchainAccountAddress(ctx, connectionID, portID) if !found { return errorsmod.Wrapf(icatypes.ErrInterchainAccountNotFound, "failed to retrieve interchain account on port %s", portID) @@ -128,7 +128,7 @@ func (k Keeper) authenticateTx(ctx sdk.Context, msgs []sdk.Msg, connectionID, po // Attempts to get the message handler from the router and if found will then execute the message. // If the message execution is successful, the proto marshaled message response will be returned. -func (k Keeper) executeMsg(ctx sdk.Context, msg sdk.Msg) (*codectypes.Any, error) { +func (k *Keeper) executeMsg(ctx sdk.Context, msg sdk.Msg) (*codectypes.Any, error) { handler := k.msgRouter.Handler(msg) if handler == nil { return nil, icatypes.ErrInvalidRoute diff --git a/modules/apps/27-interchain-accounts/host/keeper/relay_test.go b/modules/apps/27-interchain-accounts/host/keeper/relay_test.go index f7b495c8252..3f29224d2b8 100644 --- a/modules/apps/27-interchain-accounts/host/keeper/relay_test.go +++ b/modules/apps/27-interchain-accounts/host/keeper/relay_test.go @@ -25,7 +25,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestOnRecvPacket() { +func (s *KeeperTestSuite) TestOnRecvPacket() { testedOrderings := []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} testedEncodings := []string{icatypes.EncodingProtobuf, icatypes.EncodingProto3JSON} @@ -42,16 +42,16 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { { "interchain account successfully executes an arbitrary message type using the * (allow all message types) param", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) proposal, err := govtypesv1.NewProposal([]sdk.Msg{getTestProposalMessage()}, govtypesv1.DefaultStartingProposalID, time.Now(), time.Now().Add(time.Hour), "test proposal", "title", "Description", sdk.AccAddress(interchainAccountAddr), false) - suite.Require().NoError(err) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.SetProposal(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.SetProposal(s.chainB.GetContext(), proposal) + s.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(s.chainB.GetContext(), proposal) + s.Require().NoError(err) msg := &govtypesv1.MsgVote{ ProposalId: govtypesv1.DefaultStartingProposalID, @@ -59,8 +59,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { Option: govtypesv1.OptionYes, } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -70,24 +70,24 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{"*"}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes banktypes.MsgSend", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := &banktypes.MsgSend{ FromAddress: interchainAccountAddr, - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -97,25 +97,25 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes stakingtypes.MsgDelegate", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - validatorAddr := (sdk.ValAddress)(suite.chainB.Vals.Validators[0].Address) + validatorAddr := (sdk.ValAddress)(s.chainB.Vals.Validators[0].Address) msg := &stakingtypes.MsgDelegate{ DelegatorAddress: interchainAccountAddr, ValidatorAddress: validatorAddr.String(), Amount: sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(5000)), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -125,17 +125,17 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes stakingtypes.MsgDelegate and stakingtypes.MsgUndelegate sequentially", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - validatorAddr := (sdk.ValAddress)(suite.chainB.Vals.Validators[0].Address) + validatorAddr := (sdk.ValAddress)(s.chainB.Vals.Validators[0].Address) msgDelegate := &stakingtypes.MsgDelegate{ DelegatorAddress: interchainAccountAddr, ValidatorAddress: validatorAddr.String(), @@ -148,8 +148,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { Amount: sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(5000)), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msgDelegate, msgUndelegate}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msgDelegate, msgUndelegate}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -159,21 +159,21 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msgDelegate), sdk.MsgTypeURL(msgUndelegate)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes govtypesv1.MsgSubmitProposal", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg, err := govtypesv1.NewMsgSubmitProposal([]sdk.Msg{}, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(100000))), interchainAccountAddr, "metadata", "title", "summary", false) - suite.Require().NoError(err) + s.Require().NoError(err) - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -183,23 +183,23 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes govtypesv1.MsgVote", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) proposal, err := govtypesv1.NewProposal([]sdk.Msg{getTestProposalMessage()}, govtypesv1.DefaultStartingProposalID, time.Now(), time.Now().Add(time.Hour), "test proposal", "title", "Description", sdk.AccAddress(interchainAccountAddr), false) - suite.Require().NoError(err) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.SetProposal(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.SetProposal(s.chainB.GetContext(), proposal) + s.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(s.chainB.GetContext(), proposal) + s.Require().NoError(err) msg := &govtypesv1.MsgVote{ ProposalId: govtypesv1.DefaultStartingProposalID, @@ -207,8 +207,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { Option: govtypesv1.OptionYes, } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -218,23 +218,23 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes disttypes.MsgFundCommunityPool", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := &disttypes.MsgFundCommunityPool{ Amount: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(5000))), Depositor: interchainAccountAddr, } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -244,19 +244,19 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes icahosttypes.MsgModuleQuerySafe", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) - balanceQuery := banktypes.NewQueryBalanceRequest(suite.chainB.SenderAccount.GetAddress(), sdk.DefaultBondDenom) + balanceQuery := banktypes.NewQueryBalanceRequest(s.chainB.SenderAccount.GetAddress(), sdk.DefaultBondDenom) queryBz, err := balanceQuery.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) msg := types.NewMsgModuleQuerySafe(interchainAccountAddr, []types.QueryRequest{ { @@ -265,8 +265,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { }, }) - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -276,23 +276,23 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes disttypes.MsgSetWithdrawAddress", func(encoding string) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := &disttypes.MsgSetWithdrawAddress{ DelegatorAddress: interchainAccountAddr, - WithdrawAddress: suite.chainB.SenderAccount.GetAddress().String(), + WithdrawAddress: s.chainB.SenderAccount.GetAddress().String(), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -302,33 +302,33 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes transfertypes.MsgTransfer", func(encoding string) { - transferPath := ibctesting.NewTransferPath(suite.chainB, suite.chainC) + transferPath := ibctesting.NewTransferPath(s.chainB, s.chainC) transferPath.Setup() - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := transfertypes.NewMsgTransfer( transferPath.EndpointA.ChannelConfig.PortID, transferPath.EndpointA.ChannelID, ibctesting.TestCoin, interchainAccountAddr, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.GetTimeoutHeight(), + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.GetTimeoutHeight(), 0, "", ) - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -338,18 +338,18 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "Msg fails its ValidateBasic: MsgTransfer has an empty receiver", func(encoding string) { - transferPath := ibctesting.NewTransferPath(suite.chainB, suite.chainC) + transferPath := ibctesting.NewTransferPath(s.chainB, s.chainC) transferPath.Setup() - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) - suite.Require().True(found) + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, path.EndpointA.ChannelConfig.PortID) + s.Require().True(found) msg := transfertypes.NewMsgTransfer( transferPath.EndpointA.ChannelConfig.PortID, @@ -357,13 +357,13 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { ibctesting.TestCoin, interchainAccountAddr, "", - suite.chainB.GetTimeoutHeight(), + s.chainB.GetTimeoutHeight(), 0, "", ) - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -373,7 +373,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, ibcerrors.ErrInvalidAddress, }, @@ -382,8 +382,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { func(encoding string) { msg := &banktypes.MsgSendResponse{} - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -393,7 +393,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{"/" + proto.MessageName(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, ibcerrors.ErrInvalidType, }, @@ -421,8 +421,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { { "invalid packet type - UNSPECIFIED", func(encoding string) { - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{&banktypes.MsgSend{}}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{&banktypes.MsgSend{}}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.UNSPECIFIED, @@ -438,8 +438,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { func(encoding string) { path.EndpointA.ChannelConfig.PortID = "invalid-port-id" //nolint:goconst - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{&banktypes.MsgSend{}}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{&banktypes.MsgSend{}}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -454,13 +454,13 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { "unauthorised: message type not allowed", // NOTE: do not update params to explicitly force the error func(encoding string) { msg := &banktypes.MsgSend{ - FromAddress: suite.chainB.SenderAccount.GetAddress().String(), - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + FromAddress: s.chainB.SenderAccount.GetAddress().String(), + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -475,13 +475,13 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { "unauthorised: signer address is not the interchain account associated with the controller portID", func(encoding string) { msg := &banktypes.MsgSend{ - FromAddress: suite.chainB.SenderAccount.GetAddress().String(), // unexpected signer - ToAddress: suite.chainB.SenderAccount.GetAddress().String(), + FromAddress: s.chainB.SenderAccount.GetAddress().String(), // unexpected signer + ToAddress: s.chainB.SenderAccount.GetAddress().String(), Amount: sdk.NewCoins(ibctesting.TestCoin), } - data, err := icatypes.SerializeCosmosTx(suite.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) - suite.Require().NoError(err) + data, err := icatypes.SerializeCosmosTx(s.chainA.GetSimApp().AppCodec(), []proto.Message{msg}, encoding) + s.Require().NoError(err) icaPacketData := icatypes.InterchainAccountPacketData{ Type: icatypes.EXECUTE_TX, @@ -491,7 +491,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { packetData = icaPacketData.GetBytes() params := types.NewParams(true, []string{sdk.MsgTypeURL(msg)}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, ibcerrors.ErrUnauthorized, }, @@ -500,52 +500,52 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { for _, ordering := range testedOrderings { for _, encoding := range testedEncodings { for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, encoding, ordering) + path = NewICAPath(s.chainA, s.chainB, encoding, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) portID, err := icatypes.NewControllerPortID(TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // Get the address of the interchain account stored in state during handshake step - storedAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, portID) - suite.Require().True(found) + storedAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, portID) + s.Require().True(found) icaAddr, err := sdk.AccAddressFromBech32(storedAddr) - suite.Require().NoError(err) + s.Require().NoError(err) // Check if account is created - interchainAccount := suite.chainB.GetSimApp().AccountKeeper.GetAccount(suite.chainB.GetContext(), icaAddr) - suite.Require().Equal(interchainAccount.GetAddress().String(), storedAddr) + interchainAccount := s.chainB.GetSimApp().AccountKeeper.GetAccount(s.chainB.GetContext(), icaAddr) + s.Require().Equal(interchainAccount.GetAddress().String(), storedAddr) - suite.fundICAWallet(suite.chainB.GetContext(), path.EndpointA.ChannelConfig.PortID, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(1000000)))) + s.fundICAWallet(s.chainB.GetContext(), path.EndpointA.ChannelConfig.PortID, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(1000000)))) tc.malleate(encoding) // malleate mutates test data packet := channeltypes.NewPacket( packetData, - suite.chainA.SenderAccount.GetSequence(), + s.chainA.SenderAccount.GetSequence(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, - suite.chainB.GetTimeoutHeight(), + s.chainB.GetTimeoutHeight(), 0, ) - txResponse, err := suite.chainB.GetSimApp().ICAHostKeeper.OnRecvPacket(suite.chainB.GetContext(), packet) + txResponse, err := s.chainB.GetSimApp().ICAHostKeeper.OnRecvPacket(s.chainB.GetContext(), packet) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(txResponse) + s.Require().NoError(err) + s.Require().NotNil(txResponse) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Nil(txResponse) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Nil(txResponse) } }) } @@ -553,7 +553,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket() { } } -func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { +func (s *KeeperTestSuite) TestJSONOnRecvPacket() { var ( path *ibctesting.Path packetData []byte @@ -568,13 +568,13 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { { "interchain account successfully executes an arbitrary message type using the * (allow all message types) param", func(icaAddress string) { - proposal, err := govtypesv1.NewProposal([]sdk.Msg{getTestProposalMessage()}, govtypesv1.DefaultStartingProposalID, suite.chainA.GetContext().BlockTime(), suite.chainA.GetContext().BlockTime(), "test proposal", "title", "Description", sdk.AccAddress(interchainAccountAddr), false) - suite.Require().NoError(err) + proposal, err := govtypesv1.NewProposal([]sdk.Msg{getTestProposalMessage()}, govtypesv1.DefaultStartingProposalID, s.chainA.GetContext().BlockTime(), s.chainA.GetContext().BlockTime(), "test proposal", "title", "Description", sdk.AccAddress(interchainAccountAddr), false) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.SetProposal(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.SetProposal(s.chainB.GetContext(), proposal) + s.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(s.chainB.GetContext(), proposal) + s.Require().NoError(err) msgBytes := []byte(`{ "messages": [ @@ -596,7 +596,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{"*"}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, @@ -621,7 +621,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*banktypes.MsgSend)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, @@ -650,20 +650,20 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*govtypesv1.MsgSubmitProposal)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes govtypesv1.MsgVote", func(icaAddress string) { - proposal, err := govtypesv1.NewProposal([]sdk.Msg{getTestProposalMessage()}, govtypesv1.DefaultStartingProposalID, suite.chainA.GetContext().BlockTime(), suite.chainA.GetContext().BlockTime(), "test proposal", "title", "Description", sdk.AccAddress(interchainAccountAddr), false) - suite.Require().NoError(err) + proposal, err := govtypesv1.NewProposal([]sdk.Msg{getTestProposalMessage()}, govtypesv1.DefaultStartingProposalID, s.chainA.GetContext().BlockTime(), s.chainA.GetContext().BlockTime(), "test proposal", "title", "Description", sdk.AccAddress(interchainAccountAddr), false) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.SetProposal(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(suite.chainB.GetContext(), proposal) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.SetProposal(s.chainB.GetContext(), proposal) + s.Require().NoError(err) + err = s.chainB.GetSimApp().GovKeeper.ActivateVotingPeriod(s.chainB.GetContext(), proposal) + s.Require().NoError(err) msgBytes := []byte(`{ "messages": [ @@ -683,7 +683,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*govtypesv1.MsgVote)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, @@ -724,14 +724,14 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*govtypesv1.MsgSubmitProposal)(nil)), sdk.MsgTypeURL((*govtypesv1.MsgDeposit)(nil)), sdk.MsgTypeURL((*govtypesv1.MsgVote)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, { "interchain account successfully executes transfertypes.MsgTransfer", func(icaAddress string) { - transferPath := ibctesting.NewTransferPath(suite.chainB, suite.chainC) + transferPath := ibctesting.NewTransferPath(s.chainB, s.chainC) transferPath.Setup() @@ -758,7 +758,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*transfertypes.MsgTransfer)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, nil, }, @@ -774,7 +774,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{"*"}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, ibcerrors.ErrInvalidType, }, @@ -799,7 +799,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*transfertypes.MsgTransfer)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, ibcerrors.ErrUnauthorized, }, @@ -810,7 +810,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { "messages": [ { "@type": "/cosmos.bank.v1beta1.MsgSend", - "from_address": "` + suite.chainB.SenderAccount.GetAddress().String() + `", // unexpected signer + "from_address": "` + s.chainB.SenderAccount.GetAddress().String() + `", // unexpected signer "to_address": "cosmos17dtl0mjt3t77kpuhg2edqzjpszulwhgzuj9ljs", "amount": [{ "denom": "stake", "amount": "100" }] } @@ -824,7 +824,7 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { }`) params := types.NewParams(true, []string{sdk.MsgTypeURL((*banktypes.MsgSend)(nil))}) - suite.chainB.GetSimApp().ICAHostKeeper.SetParams(suite.chainB.GetContext(), params) + s.chainB.GetSimApp().ICAHostKeeper.SetParams(s.chainB.GetContext(), params) }, ibcerrors.ErrInvalidType, }, @@ -832,64 +832,64 @@ func (suite *KeeperTestSuite) TestJSONOnRecvPacket() { for _, ordering := range []channeltypes.Order{channeltypes.UNORDERED, channeltypes.ORDERED} { for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset - path = NewICAPath(suite.chainA, suite.chainB, icatypes.EncodingProto3JSON, ordering) + path = NewICAPath(s.chainA, s.chainB, icatypes.EncodingProto3JSON, ordering) path.SetupConnections() err := SetupICAPath(path, TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) portID, err := icatypes.NewControllerPortID(TestOwnerAddress) - suite.Require().NoError(err) + s.Require().NoError(err) // Get the address of the interchain account stored in state during handshake step - icaAddress, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(suite.chainB.GetContext(), ibctesting.FirstConnectionID, portID) - suite.Require().True(found) + icaAddress, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(s.chainB.GetContext(), ibctesting.FirstConnectionID, portID) + s.Require().True(found) - suite.fundICAWallet(suite.chainB.GetContext(), path.EndpointA.ChannelConfig.PortID, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(100000000)))) + s.fundICAWallet(s.chainB.GetContext(), path.EndpointA.ChannelConfig.PortID, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(100000000)))) tc.malleate(icaAddress) // malleate mutates test data packet := channeltypes.NewPacket( packetData, - suite.chainA.SenderAccount.GetSequence(), + s.chainA.SenderAccount.GetSequence(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, - suite.chainB.GetTimeoutHeight(), + s.chainB.GetTimeoutHeight(), 0, ) - txResponse, err := suite.chainB.GetSimApp().ICAHostKeeper.OnRecvPacket(suite.chainB.GetContext(), packet) + txResponse, err := s.chainB.GetSimApp().ICAHostKeeper.OnRecvPacket(s.chainB.GetContext(), packet) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(txResponse) + s.Require().NoError(err) + s.Require().NotNil(txResponse) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Nil(txResponse) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Nil(txResponse) } }) } } } -func (suite *KeeperTestSuite) fundICAWallet(ctx sdk.Context, portID string, amount sdk.Coins) { - interchainAccountAddr, found := suite.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(ctx, ibctesting.FirstConnectionID, portID) - suite.Require().True(found) +func (s *KeeperTestSuite) fundICAWallet(ctx sdk.Context, portID string, amount sdk.Coins) { + interchainAccountAddr, found := s.chainB.GetSimApp().ICAHostKeeper.GetInterchainAccountAddress(ctx, ibctesting.FirstConnectionID, portID) + s.Require().True(found) msgBankSend := &banktypes.MsgSend{ - FromAddress: suite.chainB.SenderAccount.GetAddress().String(), + FromAddress: s.chainB.SenderAccount.GetAddress().String(), ToAddress: interchainAccountAddr, Amount: amount, } - res, err := suite.chainB.SendMsgs(msgBankSend) - suite.Require().NotEmpty(res) - suite.Require().NoError(err) + res, err := s.chainB.SendMsgs(msgBankSend) + s.Require().NotEmpty(res) + s.Require().NoError(err) } func getTestProposalMessage() sdk.Msg { diff --git a/modules/apps/27-interchain-accounts/host/types/keys.go b/modules/apps/27-interchain-accounts/host/types/keys.go index 16759f555d4..4b8dd574ac1 100644 --- a/modules/apps/27-interchain-accounts/host/types/keys.go +++ b/modules/apps/27-interchain-accounts/host/types/keys.go @@ -20,6 +20,13 @@ const ( AllowAllHostMsgs = "*" ) +var ( + // KeyHostEnabled is the store key for HostEnabled Params + KeyHostEnabled = []byte("HostEnabled") + // KeyAllowMessages is the store key for the AllowMessages Params + KeyAllowMessages = []byte("AllowMessages") +) + // ContainsMsgType returns true if the sdk.Msg TypeURL is present in allowMsgs, otherwise false func ContainsMsgType(allowMsgs []string, msg sdk.Msg) bool { // check that wildcard * option for allowing all message types is the only string in the array, if so, return true diff --git a/modules/apps/27-interchain-accounts/host/types/msgs_test.go b/modules/apps/27-interchain-accounts/host/types/msgs_test.go index 2cc03e92f58..9ccee626105 100644 --- a/modules/apps/27-interchain-accounts/host/types/msgs_test.go +++ b/modules/apps/27-interchain-accounts/host/types/msgs_test.go @@ -40,7 +40,6 @@ func TestMsgUpdateParamsValidateBasic(t *testing.T) { } for _, tc := range testCases { - err := tc.msg.ValidateBasic() if tc.expErr == nil { require.NoError(t, err) @@ -61,7 +60,6 @@ func TestMsgUpdateParamsGetSigners(t *testing.T) { } for _, tc := range testCases { - msg := types.NewMsgUpdateParams(tc.address.String(), types.DefaultParams()) encodingCfg := moduletestutil.MakeTestEncodingConfig(ica.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) diff --git a/modules/apps/27-interchain-accounts/host/types/params_legacy.go b/modules/apps/27-interchain-accounts/host/types/params_legacy.go deleted file mode 100644 index ecf879d2ff0..00000000000 --- a/modules/apps/27-interchain-accounts/host/types/params_legacy.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -NOTE: Usage of x/params to manage parameters is deprecated in favor of x/gov -controlled execution of MsgUpdateParams messages. These types remains solely -for migration purposes and will be removed in a future release. -[#3621](https://github.com/cosmos/ibc-go/issues/3621) -*/ - -package types - -import ( - "fmt" - "strings" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" -) - -var ( - // KeyHostEnabled is the store key for HostEnabled Params - KeyHostEnabled = []byte("HostEnabled") - // KeyAllowMessages is the store key for the AllowMessages Params - KeyAllowMessages = []byte("AllowMessages") -) - -// ParamKeyTable type declaration for parameters -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -// ParamSetPairs implements params.ParamSet -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair(KeyHostEnabled, &p.HostEnabled, validateEnabledType), - paramtypes.NewParamSetPair(KeyAllowMessages, &p.AllowMessages, validateAllowlistLegacy), - } -} - -func validateEnabledType(i any) error { - _, ok := i.(bool) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - return nil -} - -func validateAllowlistLegacy(i any) error { - allowMsgs, ok := i.([]string) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - for _, typeURL := range allowMsgs { - if strings.TrimSpace(typeURL) == "" { - return fmt.Errorf("parameter must not contain empty strings: %s", allowMsgs) - } - } - - return nil -} diff --git a/modules/apps/27-interchain-accounts/host/types/query.pb.go b/modules/apps/27-interchain-accounts/host/types/query.pb.go index 85880abf009..ee722bdd76f 100644 --- a/modules/apps/27-interchain-accounts/host/types/query.pb.go +++ b/modules/apps/27-interchain-accounts/host/types/query.pb.go @@ -213,6 +213,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.applications.interchain_accounts.host.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/apps/27-interchain-accounts/host/types/tx.pb.go b/modules/apps/27-interchain-accounts/host/types/tx.pb.go index 7670951900c..976d064edbd 100644 --- a/modules/apps/27-interchain-accounts/host/types/tx.pb.go +++ b/modules/apps/27-interchain-accounts/host/types/tx.pb.go @@ -352,6 +352,7 @@ func _Msg_ModuleQuerySafe_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.applications.interchain_accounts.host.v1.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/apps/27-interchain-accounts/module.go b/modules/apps/27-interchain-accounts/module.go index 6b31334d135..6e5cf6f9b7c 100644 --- a/modules/apps/27-interchain-accounts/module.go +++ b/modules/apps/27-interchain-accounts/module.go @@ -132,17 +132,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { hosttypes.RegisterMsgServer(cfg.MsgServer(), hostkeeper.NewMsgServerImpl(am.hostKeeper)) hosttypes.RegisterQueryServer(cfg.QueryServer(), am.hostKeeper) } - - controllerMigrator := controllerkeeper.NewMigrator(am.controllerKeeper) - hostMigrator := hostkeeper.NewMigrator(am.hostKeeper) - if err := cfg.RegisterMigration(types.ModuleName, 2, func(ctx sdk.Context) error { - if err := hostMigrator.MigrateParams(ctx); err != nil { - return err - } - return controllerMigrator.MigrateParams(ctx) - }); err != nil { - panic(fmt.Errorf("failed to migrate interchainaccounts app from version 2 to 3 (self-managed params migration): %v", err)) - } } // InitGenesis performs genesis initialization for the interchain accounts module. diff --git a/modules/apps/27-interchain-accounts/module_test.go b/modules/apps/27-interchain-accounts/module_test.go index 190a608b7ce..fdbf4e30b53 100644 --- a/modules/apps/27-interchain-accounts/module_test.go +++ b/modules/apps/27-interchain-accounts/module_test.go @@ -18,6 +18,6 @@ func TestICATestSuite(t *testing.T) { testifysuite.Run(t, new(InterchainAccountsTestSuite)) } -func (suite *InterchainAccountsTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func (s *InterchainAccountsTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) } diff --git a/modules/apps/27-interchain-accounts/simulation/proposals_test.go b/modules/apps/27-interchain-accounts/simulation/proposals_test.go index 183287ca932..c81ac74f6a0 100644 --- a/modules/apps/27-interchain-accounts/simulation/proposals_test.go +++ b/modules/apps/27-interchain-accounts/simulation/proposals_test.go @@ -79,7 +79,7 @@ func TestProposalMsgs(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // execute ProposalMsgs function weightedProposalMsgs := simulation.ProposalMsgs(tc.controller, tc.host) - require.Equal(t, len(tc.expMsgs), len(weightedProposalMsgs)) + require.Len(t, weightedProposalMsgs, len(tc.expMsgs)) for idx, weightedMsg := range weightedProposalMsgs { // tests weighted interface: diff --git a/modules/apps/27-interchain-accounts/types/account_test.go b/modules/apps/27-interchain-accounts/types/account_test.go index 8b87ea774fc..91cffa1524c 100644 --- a/modules/apps/27-interchain-accounts/types/account_test.go +++ b/modules/apps/27-interchain-accounts/types/account_test.go @@ -32,26 +32,26 @@ type TypesTestSuite struct { chainB *ibctesting.TestChain } -func (suite *TypesTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func (s *TypesTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) } func TestTypesTestSuite(t *testing.T) { testifysuite.Run(t, new(TypesTestSuite)) } -func (suite *TypesTestSuite) TestGenerateAddress() { - addr := types.GenerateAddress(suite.chainA.GetContext(), "test-connection-id", "test-port-id") +func (s *TypesTestSuite) TestGenerateAddress() { + addr := types.GenerateAddress(s.chainA.GetContext(), "test-connection-id", "test-port-id") accAddr, err := sdk.AccAddressFromBech32(addr.String()) - suite.Require().NoError(err, "TestGenerateAddress failed") - suite.Require().NotEmpty(accAddr) + s.Require().NoError(err, "TestGenerateAddress failed") + s.Require().NotEmpty(accAddr) } -func (suite *TypesTestSuite) TestValidateAccountAddress() { +func (s *TypesTestSuite) TestValidateAccountAddress() { testCases := []struct { name string address string @@ -85,19 +85,19 @@ func (suite *TypesTestSuite) TestValidateAccountAddress() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := types.ValidateAccountAddress(tc.address) if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expError, tc.name) + s.Require().ErrorIs(err, tc.expError, tc.name) } }) } } -func (suite *TypesTestSuite) TestInterchainAccount() { +func (s *TypesTestSuite) TestInterchainAccount() { pubkey := secp256k1.GenPrivKey().PubKey() addr := sdk.AccAddress(pubkey.Address()) baseAcc := authtypes.NewBaseAccountWithAddress(addr) @@ -105,12 +105,12 @@ func (suite *TypesTestSuite) TestInterchainAccount() { // should fail when trying to set the public key or sequence of an interchain account err := interchainAcc.SetPubKey(pubkey) - suite.Require().Error(err) + s.Require().Error(err) err = interchainAcc.SetSequence(1) - suite.Require().Error(err) + s.Require().Error(err) } -func (suite *TypesTestSuite) TestGenesisAccountValidate() { +func (s *TypesTestSuite) TestGenesisAccountValidate() { pubkey := secp256k1.GenPrivKey().PubKey() addr := sdk.AccAddress(pubkey.Address()) baseAcc := authtypes.NewBaseAccountWithAddress(addr) @@ -135,44 +135,43 @@ func (suite *TypesTestSuite) TestGenesisAccountValidate() { } for _, tc := range testCases { - err := tc.acc.Validate() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } } } -func (suite *TypesTestSuite) TestInterchainAccountMarshalYAML() { - addr := suite.chainA.SenderAccount.GetAddress() +func (s *TypesTestSuite) TestInterchainAccountMarshalYAML() { + addr := s.chainA.SenderAccount.GetAddress() baseAcc := authtypes.NewBaseAccountWithAddress(addr) - interchainAcc := types.NewInterchainAccount(baseAcc, suite.chainB.SenderAccount.GetAddress().String()) + interchainAcc := types.NewInterchainAccount(baseAcc, s.chainB.SenderAccount.GetAddress().String()) bz, err := interchainAcc.MarshalYAML() - suite.Require().NoError(err) + s.Require().NoError(err) - expected := fmt.Sprintf("address: %s\npublic_key: \"\"\naccount_number: 0\nsequence: 0\naccount_owner: %s\n", suite.chainA.SenderAccount.GetAddress(), suite.chainB.SenderAccount.GetAddress()) - suite.Require().Equal(expected, string(bz)) + expected := fmt.Sprintf("address: %s\npublic_key: \"\"\naccount_number: 0\nsequence: 0\naccount_owner: %s\n", s.chainA.SenderAccount.GetAddress(), s.chainB.SenderAccount.GetAddress()) + s.Require().Equal(expected, string(bz)) } -func (suite *TypesTestSuite) TestInterchainAccountJSON() { - addr := suite.chainA.SenderAccount.GetAddress() +func (s *TypesTestSuite) TestInterchainAccountJSON() { + addr := s.chainA.SenderAccount.GetAddress() ba := authtypes.NewBaseAccountWithAddress(addr) - interchainAcc := types.NewInterchainAccount(ba, suite.chainB.SenderAccount.GetAddress().String()) + interchainAcc := types.NewInterchainAccount(ba, s.chainB.SenderAccount.GetAddress().String()) bz, err := json.Marshal(interchainAcc) - suite.Require().NoError(err) + s.Require().NoError(err) bz1, err := interchainAcc.MarshalJSON() - suite.Require().NoError(err) - suite.Require().Equal(string(bz), string(bz1)) + s.Require().NoError(err) + s.Require().Equal(string(bz), string(bz1)) var a types.InterchainAccount - suite.Require().NoError(json.Unmarshal(bz, &a)) - suite.Require().Equal(a.String(), interchainAcc.String()) + s.Require().NoError(json.Unmarshal(bz, &a)) + s.Require().Equal(a.String(), interchainAcc.String()) } diff --git a/modules/apps/27-interchain-accounts/types/codec_test.go b/modules/apps/27-interchain-accounts/types/codec_test.go index d89fb757256..a89edca00fe 100644 --- a/modules/apps/27-interchain-accounts/types/codec_test.go +++ b/modules/apps/27-interchain-accounts/types/codec_test.go @@ -43,7 +43,7 @@ func (mockSdkMsg) ValidateBasic() error { // expPass set to false means that: // - the test case is expected to fail on deserialization for protobuf encoding. // - the test case is expected to fail on serialization for proto3 json encoding. -func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { +func (s *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { testedEncodings := []string{types.EncodingProtobuf, types.EncodingProto3JSON} // each test case will have a corresponding expected errors in case of failures: expSerializeErrorStrings := make([]string, len(testedEncodings)) @@ -112,7 +112,7 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { Description: "tokens for all!", } content, err := codectypes.NewAnyWithValue(testProposal) - suite.Require().NoError(err) + s.Require().NoError(err) msgs = []proto.Message{ &govtypes.MsgSubmitProposal{ @@ -133,21 +133,21 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { Amount: sdk.NewCoins(sdk.NewCoin("bananas", sdkmath.NewInt(100))), } sendAny, err := codectypes.NewAnyWithValue(sendMsg) - suite.Require().NoError(err) + s.Require().NoError(err) testProposal := &govtypes.TextProposal{ Title: "IBC Gov Proposal", Description: "tokens for all!", } content, err := codectypes.NewAnyWithValue(testProposal) - suite.Require().NoError(err) + s.Require().NoError(err) legacyPropMsg := &govtypes.MsgSubmitProposal{ Content: content, InitialDeposit: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(5000))), Proposer: TestOwnerAddress, } legacyPropAny, err := codectypes.NewAnyWithValue(legacyPropMsg) - suite.Require().NoError(err) + s.Require().NoError(err) delegateMsg := &stakingtypes.MsgDelegate{ DelegatorAddress: TestOwnerAddress, @@ -155,7 +155,7 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { Amount: sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(5000)), } delegateAny, err := codectypes.NewAnyWithValue(delegateMsg) - suite.Require().NoError(err) + s.Require().NoError(err) messages := []*codectypes.Any{sendAny, legacyPropAny, delegateAny} @@ -210,7 +210,7 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { func() { mockMsg := &mockSdkMsg{} mockAny, err := codectypes.NewAnyWithValue(mockMsg) - suite.Require().NoError(err) + s.Require().NoError(err) msgs = []proto.Message{ &govtypes.MsgSubmitProposal{ @@ -230,7 +230,7 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { func() { mockMsg := &mockSdkMsg{} mockAny, err := codectypes.NewAnyWithValue(mockMsg) - suite.Require().NoError(err) + s.Require().NoError(err) messages := []*codectypes.Any{mockAny, mockAny, mockAny} @@ -254,30 +254,30 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { for i, encoding := range testedEncodings { for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() expPass := tc.expErr == nil - bz, err := types.SerializeCosmosTx(suite.chainA.Codec, msgs, encoding) + bz, err := types.SerializeCosmosTx(s.chainA.Codec, msgs, encoding) if encoding == types.EncodingProto3JSON && !expPass { - suite.Require().Error(err, tc.name) - suite.Require().Contains(err.Error(), expSerializeErrorStrings[1], tc.name) + s.Require().Error(err, tc.name) + s.Require().Contains(err.Error(), expSerializeErrorStrings[1], tc.name) } else { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } - deserializedMsgs, err := types.DeserializeCosmosTx(suite.chainA.Codec, bz, encoding) + deserializedMsgs, err := types.DeserializeCosmosTx(s.chainA.Codec, bz, encoding) if expPass { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().Contains(err.Error(), expDeserializeErrorStrings[i], tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().Contains(err.Error(), expDeserializeErrorStrings[i], tc.name) + s.Require().ErrorIs(err, tc.expErr) } if expPass { for i, msg := range msgs { - // We're using proto.CompactTextString() for comparison instead of suite.Require().Equal() or proto.Equal() + // We're using proto.CompactTextString() for comparison instead of s.Require().Equal() or proto.Equal() // for two main reasons: // // 1. When deserializing from JSON, the `Any` type has private fields and cached values @@ -287,32 +287,32 @@ func (suite *TypesTestSuite) TestSerializeAndDeserializeCosmosTx() { // // Using proto.CompactTextString() mitigates these issues by focusing on serialized string representation, // rather than internal details of the types. - suite.Require().Equal(proto.CompactTextString(msg), proto.CompactTextString(deserializedMsgs[i])) + s.Require().Equal(proto.CompactTextString(msg), proto.CompactTextString(deserializedMsgs[i])) } } }) } // test serializing non sdk.Msg type - bz, err := types.SerializeCosmosTx(suite.chainA.Codec, []proto.Message{&banktypes.MsgSendResponse{}}, encoding) - suite.Require().NoError(err) - suite.Require().NotEmpty(bz) + bz, err := types.SerializeCosmosTx(s.chainA.Codec, []proto.Message{&banktypes.MsgSendResponse{}}, encoding) + s.Require().NoError(err) + s.Require().NotEmpty(bz) // test deserializing unknown bytes - msgs, err := types.DeserializeCosmosTx(suite.chainA.Codec, bz, encoding) - suite.Require().Error(err) // unregistered type - suite.Require().Contains(err.Error(), expDeserializeErrorStrings[i]) - suite.Require().Empty(msgs) + msgs, err := types.DeserializeCosmosTx(s.chainA.Codec, bz, encoding) + s.Require().Error(err) // unregistered type + s.Require().Contains(err.Error(), expDeserializeErrorStrings[i]) + s.Require().Empty(msgs) // test deserializing unknown bytes - msgs, err = types.DeserializeCosmosTx(suite.chainA.Codec, []byte("invalid"), encoding) - suite.Require().Error(err) - suite.Require().Contains(err.Error(), expDeserializeErrorStrings[i]) - suite.Require().Empty(msgs) + msgs, err = types.DeserializeCosmosTx(s.chainA.Codec, []byte("invalid"), encoding) + s.Require().Error(err) + s.Require().Contains(err.Error(), expDeserializeErrorStrings[i]) + s.Require().Empty(msgs) } } -func (suite *TypesTestSuite) TestJSONDeserializeCosmosTx() { +func (s *TypesTestSuite) TestJSONDeserializeCosmosTx() { testCases := []struct { name string jsonBytes []byte @@ -431,21 +431,21 @@ func (suite *TypesTestSuite) TestJSONDeserializeCosmosTx() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - msgs, errDeserialize := types.DeserializeCosmosTx(suite.chainA.Codec, tc.jsonBytes, types.EncodingProto3JSON) + s.Run(tc.name, func() { + msgs, errDeserialize := types.DeserializeCosmosTx(s.chainA.Codec, tc.jsonBytes, types.EncodingProto3JSON) if tc.expError == nil { - suite.Require().NoError(errDeserialize, tc.name) + s.Require().NoError(errDeserialize, tc.name) for i, msg := range msgs { - suite.Require().Equal(tc.expMsgs[i], msg) + s.Require().Equal(tc.expMsgs[i], msg) } } else { - suite.Require().ErrorIs(errDeserialize, tc.expError, tc.name) + s.Require().ErrorIs(errDeserialize, tc.expError, tc.name) } }) } } -func (suite *TypesTestSuite) TestUnsupportedEncodingType() { +func (s *TypesTestSuite) TestUnsupportedEncodingType() { msgs := []proto.Message{ &banktypes.MsgSend{ FromAddress: TestOwnerAddress, @@ -454,17 +454,17 @@ func (suite *TypesTestSuite) TestUnsupportedEncodingType() { }, } - bz, err := types.SerializeCosmosTx(suite.chainA.Codec, msgs, "unsupported") - suite.Require().ErrorIs(err, types.ErrInvalidCodec) - suite.Require().Nil(bz) + bz, err := types.SerializeCosmosTx(s.chainA.Codec, msgs, "unsupported") + s.Require().ErrorIs(err, types.ErrInvalidCodec) + s.Require().Nil(bz) - data, err := types.SerializeCosmosTx(suite.chainA.Codec, msgs, types.EncodingProtobuf) - suite.Require().NoError(err) + data, err := types.SerializeCosmosTx(s.chainA.Codec, msgs, types.EncodingProtobuf) + s.Require().NoError(err) - _, err = types.DeserializeCosmosTx(suite.chainA.Codec, data, "unsupported") - suite.Require().ErrorIs(err, types.ErrInvalidCodec) + _, err = types.DeserializeCosmosTx(s.chainA.Codec, data, "unsupported") + s.Require().ErrorIs(err, types.ErrInvalidCodec) // verify that protobuf encoding still works otherwise: - _, err = types.DeserializeCosmosTx(suite.chainA.Codec, data, types.EncodingProtobuf) - suite.Require().NoError(err) + _, err = types.DeserializeCosmosTx(s.chainA.Codec, data, types.EncodingProtobuf) + s.Require().NoError(err) } diff --git a/modules/apps/27-interchain-accounts/types/expected_keepers.go b/modules/apps/27-interchain-accounts/types/expected_keepers.go index 941f7531f76..4f4c194d153 100644 --- a/modules/apps/27-interchain-accounts/types/expected_keepers.go +++ b/modules/apps/27-interchain-accounts/types/expected_keepers.go @@ -4,10 +4,10 @@ import ( "context" sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" connectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" ) // AccountKeeper defines the expected account keeper @@ -21,14 +21,9 @@ type AccountKeeper interface { // ChannelKeeper defines the expected IBC channel keeper type ChannelKeeper interface { + porttypes.ICS4Wrapper GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) GetConnection(ctx sdk.Context, connectionID string) (connectiontypes.ConnectionEnd, error) GetAllChannelsWithPortPrefix(ctx sdk.Context, portPrefix string) []channeltypes.IdentifiedChannel } - -// ParamSubspace defines the expected Subspace interface for module parameters. -type ParamSubspace interface { - GetParamSet(ctx sdk.Context, ps paramtypes.ParamSet) - GetParamSetIfExists(ctx sdk.Context, ps paramtypes.ParamSet) -} diff --git a/modules/apps/27-interchain-accounts/types/keys_test.go b/modules/apps/27-interchain-accounts/types/keys_test.go index 6729e89eb64..c2c480f1ed7 100644 --- a/modules/apps/27-interchain-accounts/types/keys_test.go +++ b/modules/apps/27-interchain-accounts/types/keys_test.go @@ -7,17 +7,17 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TypesTestSuite) TestKeyActiveChannel() { +func (s *TypesTestSuite) TestKeyActiveChannel() { key := types.KeyActiveChannel("port-id", "connection-id") - suite.Require().Equal("activeChannel/port-id/connection-id", string(key)) + s.Require().Equal("activeChannel/port-id/connection-id", string(key)) } -func (suite *TypesTestSuite) TestKeyOwnerAccount() { +func (s *TypesTestSuite) TestKeyOwnerAccount() { key := types.KeyOwnerAccount("port-id", "connection-id") - suite.Require().Equal("owner/port-id/connection-id", string(key)) + s.Require().Equal("owner/port-id/connection-id", string(key)) } -func (suite *TypesTestSuite) TestKeyIsMiddlewareEnabled() { +func (s *TypesTestSuite) TestKeyIsMiddlewareEnabled() { key := types.KeyIsMiddlewareEnabled(ibctesting.MockPort, ibctesting.FirstChannelID) - suite.Require().Equal(fmt.Sprintf("%s/%s/%s", types.IsMiddlewareEnabledPrefix, ibctesting.MockPort, ibctesting.FirstChannelID), string(key)) + s.Require().Equal(fmt.Sprintf("%s/%s/%s", types.IsMiddlewareEnabledPrefix, ibctesting.MockPort, ibctesting.FirstChannelID), string(key)) } diff --git a/modules/apps/27-interchain-accounts/types/metadata_test.go b/modules/apps/27-interchain-accounts/types/metadata_test.go index bec7fa72731..21a3afcbf61 100644 --- a/modules/apps/27-interchain-accounts/types/metadata_test.go +++ b/modules/apps/27-interchain-accounts/types/metadata_test.go @@ -7,7 +7,7 @@ import ( ) // use TestVersion as metadata being compared against -func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { +func (s *TypesTestSuite) TestIsPreviousMetadataEqual() { var ( metadata types.Metadata previousVersion string @@ -22,7 +22,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { "success", func() { versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, true, @@ -33,7 +33,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.Address = "" versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, true, @@ -51,7 +51,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.Encoding = "invalid-encoding-format" versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, false, @@ -62,7 +62,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.Encoding = types.EncodingProto3JSON versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, false, @@ -73,7 +73,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.TxType = "invalid-tx-type" versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, false, @@ -84,7 +84,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.ControllerConnectionId = "connection-10" versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, false, @@ -95,7 +95,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.HostConnectionId = "connection-10" versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, false, @@ -106,7 +106,7 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { metadata.Version = "invalid version" versionBytes, err := types.ModuleCdc.MarshalJSON(&metadata) - suite.Require().NoError(err) + s.Require().NoError(err) previousVersion = string(versionBytes) }, false, @@ -114,10 +114,10 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() expectedMetadata := types.NewMetadata(types.Version, ibctesting.FirstConnectionID, ibctesting.FirstConnectionID, TestOwnerAddress, types.EncodingProtobuf, types.TxTypeSDKMultiMsg) @@ -128,15 +128,15 @@ func (suite *TypesTestSuite) TestIsPreviousMetadataEqual() { equal := types.IsPreviousMetadataEqual(previousVersion, expectedMetadata) if tc.expEqual { - suite.Require().True(equal) + s.Require().True(equal) } else { - suite.Require().False(equal) + s.Require().False(equal) } }) } } -func (suite *TypesTestSuite) TestValidateControllerMetadata() { +func (s *TypesTestSuite) TestValidateControllerMetadata() { var metadata types.Metadata testCases := []struct { @@ -264,10 +264,10 @@ func (suite *TypesTestSuite) TestValidateControllerMetadata() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() metadata = types.NewMetadata(types.Version, ibctesting.FirstConnectionID, ibctesting.FirstConnectionID, TestOwnerAddress, types.EncodingProtobuf, types.TxTypeSDKMultiMsg) @@ -275,23 +275,23 @@ func (suite *TypesTestSuite) TestValidateControllerMetadata() { tc.malleate() // malleate mutates test data err := types.ValidateControllerMetadata( - suite.chainA.GetContext(), - suite.chainA.App.GetIBCKeeper().ChannelKeeper, + s.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper, []string{ibctesting.FirstConnectionID}, metadata, ) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *TypesTestSuite) TestValidateHostMetadata() { +func (s *TypesTestSuite) TestValidateHostMetadata() { var metadata types.Metadata testCases := []struct { @@ -419,10 +419,10 @@ func (suite *TypesTestSuite) TestValidateHostMetadata() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() metadata = types.NewMetadata(types.Version, ibctesting.FirstConnectionID, ibctesting.FirstConnectionID, TestOwnerAddress, types.EncodingProtobuf, types.TxTypeSDKMultiMsg) @@ -430,16 +430,16 @@ func (suite *TypesTestSuite) TestValidateHostMetadata() { tc.malleate() // malleate mutates test data err := types.ValidateHostMetadata( - suite.chainA.GetContext(), - suite.chainA.App.GetIBCKeeper().ChannelKeeper, + s.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper, []string{ibctesting.FirstConnectionID}, metadata, ) if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } diff --git a/modules/apps/27-interchain-accounts/types/packet_test.go b/modules/apps/27-interchain-accounts/types/packet_test.go index 1dd1075a887..694ec172d6a 100644 --- a/modules/apps/27-interchain-accounts/types/packet_test.go +++ b/modules/apps/27-interchain-accounts/types/packet_test.go @@ -7,7 +7,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TypesTestSuite) TestValidateBasic() { +func (s *TypesTestSuite) TestValidateBasic() { testCases := []struct { name string packetData types.InterchainAccountPacketData @@ -69,22 +69,22 @@ func (suite *TypesTestSuite) TestValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset err := tc.packetData.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *TypesTestSuite) TestGetPacketSender() { +func (s *TypesTestSuite) TestGetPacketSender() { testCases := []struct { name string srcPortID string @@ -108,13 +108,12 @@ func (suite *TypesTestSuite) TestGetPacketSender() { } for _, tc := range testCases { - packetData := types.InterchainAccountPacketData{} - suite.Require().Equal(tc.expSender, packetData.GetPacketSender(tc.srcPortID)) + s.Require().Equal(tc.expSender, packetData.GetPacketSender(tc.srcPortID)) } } -func (suite *TypesTestSuite) TestPacketDataProvider() { +func (s *TypesTestSuite) TestPacketDataProvider() { expCallbackAddr := ibctesting.TestAccAddress testCases := []struct { @@ -175,13 +174,12 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { } for _, tc := range testCases { - customData := tc.packetData.GetCustomPacketData("src_callback") - suite.Require().Equal(tc.expCustomData, customData) + s.Require().Equal(tc.expCustomData, customData) } } -func (suite *TypesTestSuite) TestPacketDataUnmarshalerInterface() { +func (s *TypesTestSuite) TestPacketDataUnmarshalerInterface() { expPacketData := types.InterchainAccountPacketData{ Type: types.EXECUTE_TX, Data: []byte("data"), @@ -190,14 +188,14 @@ func (suite *TypesTestSuite) TestPacketDataUnmarshalerInterface() { var packetData types.InterchainAccountPacketData err := packetData.UnmarshalJSON(expPacketData.GetBytes()) - suite.Require().NoError(err) - suite.Require().Equal(expPacketData, packetData) + s.Require().NoError(err) + s.Require().Equal(expPacketData, packetData) // test invalid packet data invalidPacketDataBytes := []byte("invalid packet data") var invalidPacketData types.InterchainAccountPacketData err = packetData.UnmarshalJSON(invalidPacketDataBytes) - suite.Require().Error(err) - suite.Require().Equal(types.InterchainAccountPacketData{}, invalidPacketData) + s.Require().Error(err) + s.Require().Equal(types.InterchainAccountPacketData{}, invalidPacketData) } diff --git a/modules/apps/27-interchain-accounts/types/port_test.go b/modules/apps/27-interchain-accounts/types/port_test.go index b49c088fc20..75e33e6efba 100644 --- a/modules/apps/27-interchain-accounts/types/port_test.go +++ b/modules/apps/27-interchain-accounts/types/port_test.go @@ -5,7 +5,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TypesTestSuite) TestNewControllerPortID() { +func (s *TypesTestSuite) TestNewControllerPortID() { var ( path *ibctesting.Path owner = TestOwnerAddress @@ -34,10 +34,10 @@ func (suite *TypesTestSuite) TestNewControllerPortID() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() tc.malleate() // malleate mutates test data @@ -45,12 +45,12 @@ func (suite *TypesTestSuite) TestNewControllerPortID() { portID, err := types.NewControllerPortID(owner) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) - suite.Require().Equal(tc.expValue, portID) + s.Require().NoError(err, tc.name) + s.Require().Equal(tc.expValue, portID) } else { - suite.Require().Error(err, tc.name) - suite.Require().Empty(portID) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().Empty(portID) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/apps/callbacks/CHANGELOG.md b/modules/apps/callbacks/CHANGELOG.md deleted file mode 100644 index 03614679a1e..00000000000 --- a/modules/apps/callbacks/CHANGELOG.md +++ /dev/null @@ -1,69 +0,0 @@ - - -# Changelog - -## [Unreleased] - -### Dependencies - -* [\#6828](https://github.com/cosmos/ibc-go/pull/6828) Bump Cosmos SDK to v0.50.9. -* [\#6193](https://github.com/cosmos/ibc-go/pull/6193) Bump `cosmossdk.io/store` to v1.1.0. -* [\#7247](https://github.com/cosmos/ibc-go/pull/7247) Bump CometBFT to v0.38.12. - -### API Breaking - -* (apps/callbacks) [\#7000](https://github.com/cosmos/ibc-go/pull/7000) Add base application version to contract keeper callbacks. - -### State Machine Breaking - -### Improvements - -### Features - -### Bug Fixes - - -## [v0.2.0+ibc-go-v8.0](https://github.com/cosmos/ibc-go/releases/tag/modules%2Fapps%2Fcallbacks%2Fv0.2.0%2Bibc-go-v8.0) - 2023-11-15 - -### Bug Fixes - -* [\#4568](https://github.com/cosmos/ibc-go/pull/4568) Include error in event that is emitted when the callback cannot be executed due to a panic or an out of gas error. Packet is only sent if the `IBCSendPacketCallback` returns nil explicitly. - - -## [v0.1.0+ibc-go-v7.3](https://github.com/cosmos/ibc-go/releases/tag/modules%2Fapps%2Fcallbacks%2Fv0.1.0%2Bibc-go-v7.3) - 2023-08-31 - -### Features - -* [\#3939](https://github.com/cosmos/ibc-go/pull/3939) feat(callbacks): ADR8 implementation. diff --git a/modules/apps/callbacks/callbacks_test.go b/modules/apps/callbacks/callbacks_test.go index 8e18c9f114b..c288583e1cc 100644 --- a/modules/apps/callbacks/callbacks_test.go +++ b/modules/apps/callbacks/callbacks_test.go @@ -183,8 +183,8 @@ func (s *CallbacksTestSuite) AssertCallbackCounters(callbackType types.CallbackT switch callbackType { case "none": - s.Require().Len(sourceCounters, 0) - s.Require().Len(destCounters, 0) + s.Require().Empty(sourceCounters) + s.Require().Empty(destCounters) case types.CallbackTypeSendPacket: s.Require().Len(sourceCounters, 1) @@ -195,10 +195,10 @@ func (s *CallbacksTestSuite) AssertCallbackCounters(callbackType types.CallbackT s.Require().Equal(1, sourceCounters[types.CallbackTypeSendPacket]) s.Require().Equal(1, sourceCounters[types.CallbackTypeAcknowledgementPacket]) - s.Require().Len(destCounters, 0) + s.Require().Empty(destCounters) case types.CallbackTypeReceivePacket: - s.Require().Len(sourceCounters, 0) + s.Require().Empty(sourceCounters) s.Require().Len(destCounters, 1) s.Require().Equal(1, destCounters[types.CallbackTypeReceivePacket]) @@ -207,7 +207,7 @@ func (s *CallbacksTestSuite) AssertCallbackCounters(callbackType types.CallbackT s.Require().Equal(1, sourceCounters[types.CallbackTypeSendPacket]) s.Require().Equal(1, sourceCounters[types.CallbackTypeTimeoutPacket]) - s.Require().Len(destCounters, 0) + s.Require().Empty(destCounters) default: s.FailNow(fmt.Sprintf("invalid callback type %s", callbackType)) diff --git a/modules/apps/callbacks/ibc_middleware.go b/modules/apps/callbacks/ibc_middleware.go index 3c5e84be64d..df36f7171a1 100644 --- a/modules/apps/callbacks/ibc_middleware.go +++ b/modules/apps/callbacks/ibc_middleware.go @@ -22,7 +22,7 @@ var ( // IBCMiddleware implements the ICS26 callbacks for the ibc-callbacks middleware given // the underlying application. type IBCMiddleware struct { - app types.CallbacksCompatibleModule + app porttypes.PacketUnmarshalerModule ics4Wrapper porttypes.ICS4Wrapper contractKeeper types.ContractKeeper @@ -37,18 +37,8 @@ type IBCMiddleware struct { // NewIBCMiddleware creates a new IBCMiddleware given the keeper and underlying application. // The underlying application must implement the required callback interfaces. func NewIBCMiddleware( - app porttypes.IBCModule, ics4Wrapper porttypes.ICS4Wrapper, contractKeeper types.ContractKeeper, maxCallbackGas uint64, -) IBCMiddleware { - packetDataUnmarshalerApp, ok := app.(types.CallbacksCompatibleModule) - if !ok { - panic(fmt.Errorf("underlying application does not implement %T", (*types.CallbacksCompatibleModule)(nil))) - } - - if ics4Wrapper == nil { - panic(errors.New("ICS4Wrapper cannot be nil")) - } - +) *IBCMiddleware { if contractKeeper == nil { panic(errors.New("contract keeper cannot be nil")) } @@ -57,21 +47,39 @@ func NewIBCMiddleware( panic(errors.New("maxCallbackGas cannot be zero")) } - return IBCMiddleware{ - app: packetDataUnmarshalerApp, - ics4Wrapper: ics4Wrapper, + return &IBCMiddleware{ contractKeeper: contractKeeper, maxCallbackGas: maxCallbackGas, } } -// WithICS4Wrapper sets the ICS4Wrapper. This function may be used after the +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after the // middleware's creation to set the middleware which is above this module in // the IBC application stack. -func (im *IBCMiddleware) WithICS4Wrapper(wrapper porttypes.ICS4Wrapper) { +func (im *IBCMiddleware) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } im.ics4Wrapper = wrapper } +// SetUnderlyingApplication sets the underlying IBC module. This function may be used after +// the middleware's creation to set the ibc module which is below this middleware. +func (im *IBCMiddleware) SetUnderlyingApplication(app porttypes.IBCModule) { + if app == nil { + panic(errors.New("underlying application cannot be nil")) + } + if im.app != nil { + panic(errors.New("underlying application already set")) + } + // the underlying application must implement the PacketUnmarshalerModule interface + pdApp, ok := app.(porttypes.PacketUnmarshalerModule) + if !ok { + panic(fmt.Errorf("underlying application must implement PacketUnmarshalerModule, got %T", app)) + } + im.app = pdApp +} + // GetICS4Wrapper returns the ICS4Wrapper. func (im *IBCMiddleware) GetICS4Wrapper() porttypes.ICS4Wrapper { return im.ics4Wrapper @@ -81,7 +89,7 @@ func (im *IBCMiddleware) GetICS4Wrapper() porttypes.ICS4Wrapper { // It defers to the underlying application and then calls the contract callback. // If the contract callback returns an error, panics, or runs out of gas, then // the packet send is rejected. -func (im IBCMiddleware) SendPacket( +func (im *IBCMiddleware) SendPacket( ctx sdk.Context, sourcePort string, sourceChannel string, @@ -128,7 +136,7 @@ func (im IBCMiddleware) SendPacket( // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) OnAcknowledgementPacket( +func (im *IBCMiddleware) OnAcknowledgementPacket( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, @@ -175,7 +183,7 @@ func (im IBCMiddleware) OnAcknowledgementPacket( // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) error { +func (im *IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) error { err := im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer) if err != nil { return err @@ -214,7 +222,7 @@ func (im IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, channelVersion string, // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { +func (im *IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { ack := im.app.OnRecvPacket(ctx, channelVersion, packet, relayer) // if ack is nil (asynchronous acknowledgements), then the callback will be handled in WriteAcknowledgement // if ack is not successful, all state changes are reverted. If a packet cannot be received, then there is @@ -261,7 +269,7 @@ func (im IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, pac // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) WriteAcknowledgement( +func (im *IBCMiddleware) WriteAcknowledgement( ctx sdk.Context, packet ibcexported.PacketI, ack ibcexported.Acknowledgement, @@ -303,7 +311,7 @@ func (im IBCMiddleware) WriteAcknowledgement( } // OnChanOpenInit defers to the underlying application -func (im IBCMiddleware) OnChanOpenInit( +func (im *IBCMiddleware) OnChanOpenInit( ctx sdk.Context, channelOrdering channeltypes.Order, connectionHops []string, @@ -316,7 +324,7 @@ func (im IBCMiddleware) OnChanOpenInit( } // OnChanOpenTry defers to the underlying application -func (im IBCMiddleware) OnChanOpenTry( +func (im *IBCMiddleware) OnChanOpenTry( ctx sdk.Context, channelOrdering channeltypes.Order, connectionHops []string, portID, @@ -328,7 +336,7 @@ func (im IBCMiddleware) OnChanOpenTry( } // OnChanOpenAck defers to the underlying application -func (im IBCMiddleware) OnChanOpenAck( +func (im *IBCMiddleware) OnChanOpenAck( ctx sdk.Context, portID, channelID, @@ -339,28 +347,28 @@ func (im IBCMiddleware) OnChanOpenAck( } // OnChanOpenConfirm defers to the underlying application -func (im IBCMiddleware) OnChanOpenConfirm(ctx sdk.Context, portID, channelID string) error { +func (im *IBCMiddleware) OnChanOpenConfirm(ctx sdk.Context, portID, channelID string) error { return im.app.OnChanOpenConfirm(ctx, portID, channelID) } // OnChanCloseInit defers to the underlying application -func (im IBCMiddleware) OnChanCloseInit(ctx sdk.Context, portID, channelID string) error { +func (im *IBCMiddleware) OnChanCloseInit(ctx sdk.Context, portID, channelID string) error { return im.app.OnChanCloseInit(ctx, portID, channelID) } // OnChanCloseConfirm defers to the underlying application -func (im IBCMiddleware) OnChanCloseConfirm(ctx sdk.Context, portID, channelID string) error { +func (im *IBCMiddleware) OnChanCloseConfirm(ctx sdk.Context, portID, channelID string) error { return im.app.OnChanCloseConfirm(ctx, portID, channelID) } // GetAppVersion implements the ICS4Wrapper interface. Callbacks has no version, // so the call is deferred to the underlying application. -func (im IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { +func (im *IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { return im.ics4Wrapper.GetAppVersion(ctx, portID, channelID) } // UnmarshalPacketData defers to the underlying app to unmarshal the packet data. // This function implements the optional PacketDataUnmarshaler interface. -func (im IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { +func (im *IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { return im.app.UnmarshalPacketData(ctx, portID, channelID, bz) } diff --git a/modules/apps/callbacks/ibc_middleware_test.go b/modules/apps/callbacks/ibc_middleware_test.go index 4aa7f87bb92..49c2986e5e1 100644 --- a/modules/apps/callbacks/ibc_middleware_test.go +++ b/modules/apps/callbacks/ibc_middleware_test.go @@ -35,35 +35,21 @@ func (s *CallbacksTestSuite) TestNewIBCMiddleware() { { "success", func() { - _ = ibccallbacks.NewIBCMiddleware(ibcmock.IBCModule{}, &channelkeeper.Keeper{}, simapp.ContractKeeper{}, maxCallbackGas) + _ = ibccallbacks.NewIBCMiddleware(simapp.ContractKeeper{}, maxCallbackGas) }, nil, }, - { - "panics with nil underlying app", - func() { - _ = ibccallbacks.NewIBCMiddleware(nil, &channelkeeper.Keeper{}, simapp.ContractKeeper{}, maxCallbackGas) - }, - fmt.Errorf("underlying application does not implement %T", (*types.CallbacksCompatibleModule)(nil)), - }, { "panics with nil contract keeper", func() { - _ = ibccallbacks.NewIBCMiddleware(ibcmock.IBCModule{}, &channelkeeper.Keeper{}, nil, maxCallbackGas) + _ = ibccallbacks.NewIBCMiddleware(nil, maxCallbackGas) }, errors.New("contract keeper cannot be nil"), }, - { - "panics with nil ics4Wrapper", - func() { - _ = ibccallbacks.NewIBCMiddleware(ibcmock.IBCModule{}, nil, simapp.ContractKeeper{}, maxCallbackGas) - }, - errors.New("ICS4Wrapper cannot be nil"), - }, { "panics with zero maxCallbackGas", func() { - _ = ibccallbacks.NewIBCMiddleware(ibcmock.IBCModule{}, &channelkeeper.Keeper{}, simapp.ContractKeeper{}, uint64(0)) + _ = ibccallbacks.NewIBCMiddleware(simapp.ContractKeeper{}, uint64(0)) }, errors.New("maxCallbackGas cannot be zero"), }, @@ -80,20 +66,41 @@ func (s *CallbacksTestSuite) TestNewIBCMiddleware() { } } -func (s *CallbacksTestSuite) TestWithICS4Wrapper() { +func (s *CallbacksTestSuite) TestSetICS4Wrapper() { s.setupChains() cbsMiddleware := ibccallbacks.IBCMiddleware{} s.Require().Nil(cbsMiddleware.GetICS4Wrapper()) - cbsMiddleware.WithICS4Wrapper(s.chainA.App.GetIBCKeeper().ChannelKeeper) + s.Require().Panics(func() { + cbsMiddleware.SetICS4Wrapper(nil) + }, "expected panic when setting nil ICS4Wrapper") + + cbsMiddleware.SetICS4Wrapper(s.chainA.App.GetIBCKeeper().ChannelKeeper) ics4Wrapper := cbsMiddleware.GetICS4Wrapper() s.Require().IsType((*channelkeeper.Keeper)(nil), ics4Wrapper) } +func (s *CallbacksTestSuite) TestSetUnderlyingApplication() { + s.setupChains() + + cbsMiddleware := ibccallbacks.IBCMiddleware{} + + s.Require().Panics(func() { + cbsMiddleware.SetUnderlyingApplication(nil) + }, "expected panic when setting nil underlying application") + + cbsMiddleware.SetUnderlyingApplication(&ibcmock.IBCModule{}) + + s.Require().Panics(func() { + cbsMiddleware.SetUnderlyingApplication(&ibcmock.IBCModule{}) + }, "expected panic when setting underlying application a second time") +} + func (s *CallbacksTestSuite) TestSendPacket() { var packetData transfertypes.FungibleTokenPacketData + var callbackExecuted bool testCases := []struct { name string @@ -127,7 +134,7 @@ func (s *CallbacksTestSuite) TestSendPacket() { }, "none", // improperly formatted callback data should result in no callback execution false, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: ics4Wrapper SendPacket call fails", @@ -165,6 +172,46 @@ func (s *CallbacksTestSuite) TestSendPacket() { false, errorsmod.Wrapf(types.ErrCallbackOutOfGas, "ibc %s callback out of gas", types.CallbackTypeSendPacket), }, + { + "failure: callback address invalid", + func() { + packetData.Memo = fmt.Sprintf(`{"src_callback": {"address":%d}}`, 50) + callbackExecuted = false // callback should not be executed + }, + types.CallbackTypeSendPacket, + false, + types.ErrInvalidCallbackData, + }, + { + "failure: callback gas limit invalid", + func() { + packetData.Memo = fmt.Sprintf(`{"src_callback": {"address":"%s", "gas_limit":%d}}`, simapp.SuccessContract, 50) + callbackExecuted = false // callback should not be executed + }, + types.CallbackTypeSendPacket, + false, + types.ErrInvalidCallbackData, + }, + { + "failure: callback calldata invalid", + func() { + packetData.Memo = fmt.Sprintf(`{"src_callback": {"address":"%s", "gas_limit":"%d", "calldata":%d}}`, simapp.SuccessContract, 50, 50) + callbackExecuted = false // callback should not be executed + }, + types.CallbackTypeSendPacket, + false, + types.ErrInvalidCallbackData, + }, + { + "failure: callback calldata hex invalid", + func() { + packetData.Memo = fmt.Sprintf(`{"src_callback": {"address":"%s", "gas_limit":"%d", "calldata":"%s"}}`, simapp.SuccessContract, 50, "calldata") + callbackExecuted = false // callback should not be executed + }, + types.CallbackTypeSendPacket, + false, + types.ErrInvalidCallbackData, + }, } for _, tc := range testCases { @@ -180,6 +227,7 @@ func (s *CallbacksTestSuite) TestSendPacket() { ibctesting.TestAccAddress, fmt.Sprintf(`{"src_callback": {"address": "%s"}}`, simapp.SuccessContract), ) + callbackExecuted = true tc.malleate() @@ -214,11 +262,13 @@ func (s *CallbacksTestSuite) TestSendPacket() { default: sendPacket() - s.Require().ErrorIs(err, tc.expValue.(error)) + s.Require().ErrorIs(tc.expValue.(error), err) s.Require().Equal(uint64(0), seq) } - s.AssertHasExecutedExpectedCallback(tc.callbackType, expPass) + if callbackExecuted { + s.AssertHasExecutedExpectedCallback(tc.callbackType, expPass) + } }) } } @@ -279,7 +329,7 @@ func (s *CallbacksTestSuite) TestOnAcknowledgementPacket() { packet.Data = packetData.GetBytes() }, noExecution, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: callback execution reach out of gas, but sufficient gas provided by relayer", @@ -351,18 +401,14 @@ func (s *CallbacksTestSuite) TestOnAcknowledgementPacket() { return transferStack.OnAcknowledgementPacket(ctx, s.path.EndpointA.GetChannel().Version, packet, ack, s.chainA.SenderAccount.GetAddress()) } - switch tc.expError { - case nil: + switch { + case tc.expError == nil: err := onAcknowledgementPacket() s.Require().Nil(err) - - case panicError: - s.Require().PanicsWithValue(storetypes.ErrorOutOfGas{ - Descriptor: fmt.Sprintf("ibc %s callback out of gas; commitGasLimit: %d", types.CallbackTypeAcknowledgementPacket, userGasLimit), - }, func() { + case errors.Is(tc.expError, panicError): + s.Require().PanicsWithValue(storetypes.ErrorOutOfGas{Descriptor: fmt.Sprintf("ibc %s callback out of gas; commitGasLimit: %d", types.CallbackTypeAcknowledgementPacket, userGasLimit)}, func() { _ = onAcknowledgementPacket() }) - default: err := onAcknowledgementPacket() s.Require().ErrorIs(err, tc.expError) @@ -392,6 +438,9 @@ func (s *CallbacksTestSuite) TestOnAcknowledgementPacket() { ) s.Require().True(exists) s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) + + default: + s.T().Fatalf("unexpected expResult: %v", tc.expResult) } }) } @@ -449,7 +498,7 @@ func (s *CallbacksTestSuite) TestOnTimeoutPacket() { packet.Data = packetData.GetBytes() }, noExecution, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: callback execution reach out of gas, but sufficient gas provided by relayer", @@ -563,6 +612,9 @@ func (s *CallbacksTestSuite) TestOnTimeoutPacket() { ) s.Require().True(exists) s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) + + default: + s.T().Fatalf("unexpected expResult: %v", tc.expResult) } }) } @@ -624,7 +676,7 @@ func (s *CallbacksTestSuite) TestOnRecvPacket() { packet.Data = packetData.GetBytes() }, noExecution, - channeltypes.NewErrorAcknowledgement(types.ErrCallbackAddressNotFound), + channeltypes.NewErrorAcknowledgement(types.ErrInvalidCallbackData), }, { "failure: callback execution reach out of gas, but sufficient gas provided by relayer", @@ -736,6 +788,9 @@ func (s *CallbacksTestSuite) TestOnRecvPacket() { ) s.Require().True(exists) s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) + + default: + s.T().Fatalf("unexpected expResult: %v", tc.expResult) } }) } @@ -782,7 +837,7 @@ func (s *CallbacksTestSuite) TestWriteAcknowledgement() { packet.Data = packetData.GetBytes() }, "none", // improperly formatted callback data should result in no callback execution - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: ics4Wrapper WriteAcknowledgement call fails", @@ -841,7 +896,6 @@ func (s *CallbacksTestSuite) TestWriteAcknowledgement() { if exists { s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) } - } else { s.Require().ErrorIs(err, tc.expError) } @@ -998,7 +1052,7 @@ func (s *CallbacksTestSuite) TestUnmarshalPacketDataV1() { transferStack, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(transfertypes.ModuleName) s.Require().True(ok) - unmarshalerStack, ok := transferStack.(types.CallbacksCompatibleModule) + unmarshalerStack, ok := transferStack.(porttypes.PacketUnmarshalerModule) s.Require().True(ok) expPacketDataICS20V1 := transfertypes.FungibleTokenPacketData{ diff --git a/modules/apps/callbacks/testing/simapp/app.go b/modules/apps/callbacks/testing/simapp/app.go index fa502fd59ff..33c8f7d2dd1 100644 --- a/modules/apps/callbacks/testing/simapp/app.go +++ b/modules/apps/callbacks/testing/simapp/app.go @@ -66,10 +66,6 @@ import ( "github.com/cosmos/cosmos-sdk/x/mint" mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - "github.com/cosmos/cosmos-sdk/x/params" - paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" - paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" "github.com/cosmos/cosmos-sdk/x/slashing" slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" @@ -139,7 +135,6 @@ type SimApp struct { // keys to access the substores keys map[string]*storetypes.KVStoreKey - tkeys map[string]*storetypes.TransientStoreKey memKeys map[string]*storetypes.MemoryStoreKey // keepers @@ -151,11 +146,10 @@ type SimApp struct { DistrKeeper distrkeeper.Keeper GovKeeper govkeeper.Keeper UpgradeKeeper *upgradekeeper.Keeper - ParamsKeeper paramskeeper.Keeper IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly - ICAControllerKeeper icacontrollerkeeper.Keeper - ICAHostKeeper icahostkeeper.Keeper - TransferKeeper ibctransferkeeper.Keeper + ICAControllerKeeper *icacontrollerkeeper.Keeper + ICAHostKeeper *icahostkeeper.Keeper + TransferKeeper *ibctransferkeeper.Keeper ConsensusParamsKeeper consensusparamkeeper.Keeper // mock contract keeper used for testing @@ -247,7 +241,7 @@ func NewSimApp( keys := storetypes.NewKVStoreKeys( authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, + govtypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, consensusparamtypes.StoreKey, ) @@ -257,7 +251,6 @@ func NewSimApp( panic(err) } - tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey) memKeys := storetypes.NewMemoryStoreKeys(ibcmock.MemStoreKey) app := &SimApp{ @@ -267,12 +260,9 @@ func NewSimApp( txConfig: txConfig, interfaceRegistry: interfaceRegistry, keys: keys, - tkeys: tkeys, memKeys: memKeys, } - app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) - // set the BaseApp's parameter store app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), runtime.EventService{}) bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore) @@ -317,7 +307,7 @@ func NewSimApp( app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.GetSubspace(ibcexported.ModuleName), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) // NOTE: The mock ContractKeeper is only created for testing. @@ -342,8 +332,7 @@ func NewSimApp( // ICA Controller keeper app.ICAControllerKeeper = icacontrollerkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.GetSubspace(icacontrollertypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), @@ -351,8 +340,7 @@ func NewSimApp( // ICA Host keeper app.ICAHostKeeper = icahostkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.GetSubspace(icahosttypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.AccountKeeper, app.MsgServiceRouter(), app.GRPCQueryRouter(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), @@ -368,8 +356,9 @@ func NewSimApp( // Create Transfer Keeper // NOTE: the Transfer Keeper's ICS4Wrapper can later be replaced. app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.GetSubspace(ibctransfertypes.ModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, + app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), app.AccountKeeper, app.BankKeeper, @@ -399,41 +388,27 @@ func NewSimApp( // - Transfer // create IBC module from bottom to top of stack - var transferStack porttypes.IBCModule - transferStack = transfer.NewIBCModule(app.TransferKeeper) - transferStack = ibccallbacks.NewIBCMiddleware(transferStack, app.IBCKeeper.ChannelKeeper, app.MockContractKeeper, maxCallbackGas) - var transferICS4Wrapper porttypes.ICS4Wrapper - transferICS4Wrapper, ok := transferStack.(porttypes.ICS4Wrapper) - if !ok { - panic(fmt.Errorf("cannot convert %T to %T", transferStack, transferICS4Wrapper)) - } - - // Since the callbacks middleware itself is an ics4wrapper, it needs to be passed to the transfer keeper - app.TransferKeeper.WithICS4Wrapper(transferICS4Wrapper) + transferStack := porttypes.NewIBCStackBuilder(app.IBCKeeper.ChannelKeeper) + transferStack.Base(transfer.NewIBCModule(app.TransferKeeper)).Next( + ibccallbacks.NewIBCMiddleware(app.MockContractKeeper, maxCallbackGas), + ) // Add transfer stack to IBC Router - ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) + ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack.Build()) // Create Interchain Accounts Stack // SendPacket, since it is originating from the application to core IBC: // icaControllerKeeper.SendTx -> callbacks.SendPacket -> channel.SendPacket // initialize ICA module with mock module as the authentication module on the controller side - var icaControllerStack porttypes.IBCModule - icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp("")) - app.ICAAuthModule, ok = icaControllerStack.(ibcmock.IBCModule) - if !ok { - panic(fmt.Errorf("cannot convert %T to %T", icaControllerStack, app.ICAAuthModule)) - } - icaControllerStack = icacontroller.NewIBCMiddlewareWithAuth(icaControllerStack, app.ICAControllerKeeper) - icaControllerStack = ibccallbacks.NewIBCMiddleware(icaControllerStack, app.IBCKeeper.ChannelKeeper, app.MockContractKeeper, maxCallbackGas) - var icaICS4Wrapper porttypes.ICS4Wrapper - icaICS4Wrapper, ok = icaControllerStack.(porttypes.ICS4Wrapper) - if !ok { - panic(fmt.Errorf("cannot convert %T to %T", icaControllerStack, icaICS4Wrapper)) - } - // Since the callbacks middleware itself is an ics4wrapper, it needs to be passed to the ica controller keeper - app.ICAControllerKeeper.WithICS4Wrapper(icaICS4Wrapper) + icaControllerStack := porttypes.NewIBCStackBuilder(app.IBCKeeper.ChannelKeeper) + + icaControllerStack.Base(ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp(""))).Next( + icacontroller.NewIBCMiddleware(app.ICAControllerKeeper), + ).Next( + ibccallbacks.NewIBCMiddleware(app.MockContractKeeper, maxCallbackGas), + ) + icaControllerApp := icaControllerStack.Build() // RecvPacket, message that originates from core IBC and goes down to app, the flow is: // channel.RecvPacket -> icaHost.OnRecvPacket @@ -442,9 +417,9 @@ func NewSimApp( // Add host, controller & ica auth modules to IBC router ibcRouter. - AddRoute(icacontrollertypes.SubModuleName, icaControllerStack). + AddRoute(icacontrollertypes.SubModuleName, icaControllerApp). AddRoute(icahosttypes.SubModuleName, icaHostStack). - AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerStack) // ica with mock auth module stack route to ica (top level of middleware stack) + AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerApp) // ica with mock auth module stack route to ica (top level of middleware stack) // OnRecvPacket, message that originates from core IBC and goes down to app, the flow is the otherway // channel.RecvPacket -> callbacks.OnRecvPacket -> mockModule.OnRecvPacket @@ -478,22 +453,21 @@ func NewSimApp( app.AccountKeeper, app.StakingKeeper, app, txConfig, ), - auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), - bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, app.GetSubspace(banktypes.ModuleName)), - gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(govtypes.ModuleName)), - mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, app.GetSubspace(minttypes.ModuleName)), - slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(slashingtypes.ModuleName), app.interfaceRegistry), - distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(distrtypes.ModuleName)), - staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName)), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil), + gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry), + distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil), upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()), - params.NewAppModule(app.ParamsKeeper), consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper), // IBC modules ibc.NewAppModule(app.IBCKeeper), transfer.NewAppModule(app.TransferKeeper), - ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), + ica.NewAppModule(app.ICAControllerKeeper, app.ICAHostKeeper), mockModule, // IBC light clients @@ -509,11 +483,7 @@ func NewSimApp( app.ModuleManager, map[string]module.AppModuleBasic{ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator), - govtypes.ModuleName: gov.NewAppModuleBasic( - []govclient.ProposalHandler{ - paramsclient.ProposalHandler, - }, - ), + govtypes.ModuleName: gov.NewAppModuleBasic([]govclient.ProposalHandler{}), }) app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino) app.BasicModuleManager.RegisterInterfaces(interfaceRegistry) @@ -557,7 +527,7 @@ func NewSimApp( banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, ibcexported.ModuleName, genutiltypes.ModuleName, ibctransfertypes.ModuleName, - icatypes.ModuleName, ibcmock.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, + icatypes.ModuleName, ibcmock.ModuleName, upgradetypes.ModuleName, vestingtypes.ModuleName, consensusparamtypes.ModuleName, } app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...) @@ -580,7 +550,7 @@ func NewSimApp( // NOTE: this is not required apps that don't use the simulator for fuzz testing // transactions overrideModules := map[string]module.AppModuleSimulation{ - authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), } app.simulationManager = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules) @@ -588,7 +558,6 @@ func NewSimApp( // initialize stores app.MountKVStores(keys) - app.MountTransientStores(tkeys) app.MountMemoryStores(memKeys) // initialize BaseApp @@ -762,14 +731,6 @@ func (app *SimApp) GetStoreKeys() []storetypes.StoreKey { return keys } -// GetSubspace returns a param subspace for a given module name. -// -// NOTE: This is solely to be used for testing purposes. -func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace { - subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) - return subspace -} - // SimulationManager implements the SimulationApp interface func (app *SimApp) SimulationManager() *module.SimulationManager { return app.simulationManager @@ -841,20 +802,6 @@ func BlockedAddresses() map[string]bool { return modAccAddrs } -// initParamsKeeper init params keeper and its subspaces -func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { - paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) - - // TODO: ibc module subspaces can be removed after migration of params - // https://github.com/cosmos/ibc-go/issues/2010 - paramsKeeper.Subspace(ibctransfertypes.ModuleName) - paramsKeeper.Subspace(ibcexported.ModuleName) - paramsKeeper.Subspace(icacontrollertypes.SubModuleName) - paramsKeeper.Subspace(icahosttypes.SubModuleName) - - return paramsKeeper -} - // IBC TestingApp functions // GetBaseApp implements the TestingApp interface. diff --git a/modules/apps/callbacks/testing/simapp/export.go b/modules/apps/callbacks/testing/simapp/export.go index 6ce02a08309..b42545c3433 100644 --- a/modules/apps/callbacks/testing/simapp/export.go +++ b/modules/apps/callbacks/testing/simapp/export.go @@ -69,7 +69,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] /* Handle fee distribution state. */ // withdraw all validator commission - err := app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + err := app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) bool { valBz, err := app.StakingKeeper.ValidatorAddressCodec().StringToBytes(val.GetOperator()) if err != nil { panic(err) @@ -111,7 +111,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] ctx = ctx.WithBlockHeight(0) // reinitialize all validators - err = app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + err = app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) bool { valBz, err := sdk.ValAddressFromBech32(val.GetOperator()) if err != nil { panic(err) @@ -162,7 +162,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] /* Handle staking state. */ // iterate through redelegations, reset creation height - err = app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) { + err = app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) bool { for i := range red.Entries { red.Entries[i].CreationHeight = 0 } @@ -177,7 +177,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] } // iterate through unbonding delegations, reset creation height - err = app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) { + err = app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) bool { for i := range ubd.Entries { ubd.Entries[i].CreationHeight = 0 } @@ -228,7 +228,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] // reset start height on signing infos err = app.SlashingKeeper.IterateValidatorSigningInfos( ctx, - func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { + func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) bool { info.StartHeight = 0 err = app.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info) if err != nil { diff --git a/modules/apps/callbacks/types/callbacks.go b/modules/apps/callbacks/types/callbacks.go index edb7b1e7ba1..7e4b182155c 100644 --- a/modules/apps/callbacks/types/callbacks.go +++ b/modules/apps/callbacks/types/callbacks.go @@ -1,14 +1,16 @@ package types import ( + "encoding/hex" "strconv" "strings" + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" - "github.com/cosmos/ibc-go/v10/modules/core/api" ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" ) @@ -43,20 +45,6 @@ keeper to verify that the packet sender is the same as the callback address if d */ -// CallbacksCompatibleModule is an interface that combines the IBCModule and PacketDataUnmarshaler -// interfaces to assert that the underlying application supports both. -type CallbacksCompatibleModule interface { - porttypes.IBCModule - porttypes.PacketDataUnmarshaler -} - -// CallbacksCompatibleModuleV2 is an interface that combines the IBCModuleV2 and PacketDataUnmarshaler -// interfaces to assert that the underlying application supports both. -type CallbacksCompatibleModuleV2 interface { - api.IBCModule - api.PacketDataUnmarshaler -} - // CallbackData is the callback data parsed from the packet. type CallbackData struct { // CallbackAddress is the address of the callback actor. @@ -74,6 +62,9 @@ type CallbackData struct { CommitGasLimit uint64 // ApplicationVersion is the base application version. ApplicationVersion string + // Calldata is the calldata to be passed to the callback actor. + // This may be empty but if it is not empty, it should be the calldata sent to the callback actor. + Calldata []byte } // GetSourceCallbackData parses the packet data and returns the source callback data. @@ -117,7 +108,7 @@ func GetCallbackData( version, srcPortID string, remainingGas, maxGas uint64, callbackKey string, -) (cbData CallbackData, isCbPacket bool, err error) { +) (CallbackData, bool, error) { packetDataProvider, ok := packetData.(ibcexported.PacketDataProvider) if !ok { return CallbackData{}, false, ErrNotPacketDataProvider @@ -129,9 +120,9 @@ func GetCallbackData( } // get the callback address from the callback data - callbackAddress := getCallbackAddress(callbackData) - if strings.TrimSpace(callbackAddress) == "" { - return CallbackData{}, true, ErrCallbackAddressNotFound + callbackAddress, err := getCallbackAddress(callbackData) + if err != nil || strings.TrimSpace(callbackAddress) == "" { + return CallbackData{}, true, ErrInvalidCallbackData } // retrieve packet sender from packet data if possible and if needed @@ -144,7 +135,15 @@ func GetCallbackData( } // get the gas limit from the callback data - executionGasLimit, commitGasLimit := computeExecAndCommitGasLimit(callbackData, remainingGas, maxGas) + executionGasLimit, commitGasLimit, err := computeExecAndCommitGasLimit(callbackData, remainingGas, maxGas) + if err != nil { + return CallbackData{}, true, err + } + + callData, err := getCalldata(callbackData) + if err != nil { + return CallbackData{}, true, err + } return CallbackData{ CallbackAddress: callbackAddress, @@ -152,12 +151,16 @@ func GetCallbackData( SenderAddress: packetSender, CommitGasLimit: commitGasLimit, ApplicationVersion: version, + Calldata: callData, }, true, nil } -func computeExecAndCommitGasLimit(callbackData map[string]any, remainingGas, maxGas uint64) (uint64, uint64) { +func computeExecAndCommitGasLimit(callbackData map[string]any, remainingGas, maxGas uint64) (uint64, uint64, error) { // get the gas limit from the callback data - commitGasLimit := getUserDefinedGasLimit(callbackData) + commitGasLimit, err := getUserDefinedGasLimit(callbackData) + if err != nil { + return 0, 0, err + } // ensure user defined gas limit does not exceed the max gas limit if commitGasLimit == 0 || commitGasLimit > maxGas { @@ -168,7 +171,7 @@ func computeExecAndCommitGasLimit(callbackData map[string]any, remainingGas, max // in this case, the callback execution may be retried upon failure executionGasLimit := min(remainingGas, commitGasLimit) - return executionGasLimit, commitGasLimit + return executionGasLimit, commitGasLimit, nil } // getUserDefinedGasLimit returns the custom gas limit provided for callbacks if it is @@ -179,19 +182,26 @@ func computeExecAndCommitGasLimit(callbackData map[string]any, remainingGas, max // { "{callbackKey}": { ... , "gas_limit": {stringForCallback} } // // Note: the user defined gas limit must be set as a string and not a json number. -func getUserDefinedGasLimit(callbackData map[string]any) uint64 { +func getUserDefinedGasLimit(callbackData map[string]any) (uint64, error) { // the gas limit must be specified as a string and not a json number - gasLimit, ok := callbackData[UserDefinedGasLimitKey].(string) + gasLimit, ok := callbackData[UserDefinedGasLimitKey] if !ok { - return 0 + return 0, nil + } + gasLimitStr, ok := gasLimit.(string) + if !ok { + return 0, errorsmod.Wrapf(ErrInvalidCallbackData, "gas limit [%v] must be a string", gasLimit) + } + if gasLimitStr == "" { + return 0, nil } - userGas, err := strconv.ParseUint(gasLimit, 10, 64) + userGas, err := strconv.ParseUint(gasLimitStr, 10, 64) if err != nil { - return 0 + return 0, errorsmod.Wrapf(ErrInvalidCallbackData, "gas limit must be a valid uint64: %s", err) } - return userGas + return userGas, nil } // getCallbackAddress returns the callback address if it is specified in the callback data. @@ -203,13 +213,34 @@ func getUserDefinedGasLimit(callbackData map[string]any) uint64 { // // ADR-8 middleware should callback on the returned address if it is a PacketActor // (i.e. smart contract that accepts IBC callbacks). -func getCallbackAddress(callbackData map[string]any) string { +func getCallbackAddress(callbackData map[string]any) (string, error) { callbackAddress, ok := callbackData[CallbackAddressKey].(string) if !ok { - return "" + return "", errorsmod.Wrapf(ErrInvalidCallbackData, "callback address must be a string") } - return callbackAddress + return callbackAddress, nil +} + +// getCalldata returns the calldata if it is specified in the callback data. +func getCalldata(callbackData map[string]any) ([]byte, error) { + calldataAny, ok := callbackData[CalldataKey] + if !ok { + return nil, nil + } + calldataStr, ok := calldataAny.(string) + if !ok { + return nil, errorsmod.Wrapf(ErrInvalidCallbackData, "calldata must be a string") + } + if calldataStr == "" { + return nil, nil + } + + calldata, err := hex.DecodeString(calldataStr) + if err != nil { + return nil, errorsmod.Wrapf(ErrInvalidCallbackData, "calldata must be a valid hex string: %s", err) + } + return calldata, nil } // AllowRetry returns true if the callback execution gas limit is less than the commit gas limit. diff --git a/modules/apps/callbacks/types/callbacks_test.go b/modules/apps/callbacks/types/callbacks_test.go index d8ac06d5c46..470351f1f26 100644 --- a/modules/apps/callbacks/types/callbacks_test.go +++ b/modules/apps/callbacks/types/callbacks_test.go @@ -228,6 +228,79 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackData() { true, nil, }, + { + "success: source callback with calldata", + func() { + remainingGas = 2_000_000 + version = transfertypes.V1 + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"src_callback": {"address": "%s", "calldata": "%x"}}`, sender, []byte("calldata")), + } + }, + types.CallbackData{ + CallbackAddress: sender, + SenderAddress: sender, + ExecutionGasLimit: 1_000_000, + CommitGasLimit: 1_000_000, + ApplicationVersion: transfertypes.V1, + Calldata: []byte("calldata"), + }, + true, + nil, + }, + { + "success: source callback with empty calldata", + func() { + remainingGas = 2_000_000 + version = transfertypes.V1 + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"src_callback": {"address": "%s", "calldata": ""}}`, sender), + } + }, + types.CallbackData{ + CallbackAddress: sender, + SenderAddress: sender, + ExecutionGasLimit: 1_000_000, + CommitGasLimit: 1_000_000, + ApplicationVersion: transfertypes.V1, + Calldata: nil, + }, + true, + nil, + }, + { + "success: dest callback with calldata", + func() { + callbackKey = types.DestinationCallbackKey + remainingGas = 2_000_000 + version = transfertypes.V1 + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"dest_callback": {"address": "%s", "calldata": "%x"}}`, sender, []byte("calldata")), + } + }, + types.CallbackData{ + CallbackAddress: sender, + SenderAddress: "", + ExecutionGasLimit: 1_000_000, + CommitGasLimit: 1_000_000, + ApplicationVersion: transfertypes.V1, + Calldata: []byte("calldata"), + }, + true, + nil, + }, { "failure: packet data does not implement PacketDataProvider", func() { @@ -267,7 +340,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackData() { }, types.CallbackData{}, true, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: space address", @@ -283,7 +356,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackData() { }, types.CallbackData{}, true, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { @@ -343,6 +416,57 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackData() { false, types.ErrNotPacketDataProvider, }, + { + "failure: invalid gasLimit", + func() { + remainingGas = 2_000_000 + version = transfertypes.V1 + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"src_callback": {"address": "%s", "gas_limit": "invalid"}}`, sender), + } + }, + types.CallbackData{}, + true, + types.ErrInvalidCallbackData, + }, + { + "failure: invalid calldata", + func() { + remainingGas = 2_000_000 + version = transfertypes.V1 + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"src_callback": {"address": "%s", "calldata": "invalid"}}`, sender), + } + }, + types.CallbackData{}, + true, + types.ErrInvalidCallbackData, + }, + { + "failure: invalid calldata is number", + func() { + remainingGas = 2_000_000 + version = transfertypes.V1 + packetData = transfertypes.FungibleTokenPacketData{ + Denom: ibctesting.TestCoin.Denom, + Amount: ibctesting.TestCoin.Amount.String(), + Sender: sender, + Receiver: receiver, + Memo: fmt.Sprintf(`{"src_callback": {"address": "%s", "calldata": 10}}`, sender), + } + }, + types.CallbackData{}, + true, + types.ErrInvalidCallbackData, + }, } for _, tc := range testCases { @@ -365,6 +489,11 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackData() { expAllowRetry := tc.expCallbackData.ExecutionGasLimit < tc.expCallbackData.CommitGasLimit s.Require().Equal(expAllowRetry, callbackData.AllowRetry(), tc.name) + + // check if the callback calldata is correctly unmarshalled + if len(tc.expCallbackData.Calldata) > 0 { + s.Require().Equal([]byte("calldata"), callbackData.Calldata, tc.name) + } } else { s.Require().ErrorIs(err, tc.expError, tc.name) } @@ -465,7 +594,7 @@ func (s *CallbacksTypesTestSuite) TestGetDestSourceCallbackDataTransfer() { transferStack, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(transfertypes.ModuleName) s.Require().True(ok) - packetUnmarshaler, ok := transferStack.(types.CallbacksCompatibleModule) + packetUnmarshaler, ok := transferStack.(porttypes.PacketUnmarshalerModule) s.Require().True(ok) s.path.Setup() @@ -496,6 +625,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { name string packetData ibcexported.PacketDataProvider expAddress string + expError error }{ { "success: memo has callbacks in json struct and properly formatted src_callback_address which does not match packet sender", @@ -507,6 +637,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: fmt.Sprintf(`{"src_callback": {"address": "%s"}}`, receiver), }, receiver, + nil, }, { "success: valid src_callback address specified in memo that matches sender", @@ -518,6 +649,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: fmt.Sprintf(`{"src_callback": {"address": "%s"}}`, sender), }, sender, + nil, }, { "failure: memo is empty", @@ -529,6 +661,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: "", }, "", + nil, }, { "failure: memo is not json string", @@ -540,6 +673,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: "memo", }, "", + nil, }, { "failure: memo has empty src_callback object", @@ -551,6 +685,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: `{"src_callback": {}}`, }, "", + types.ErrInvalidCallbackData, }, { "failure: memo does not have callbacks in json struct", @@ -562,6 +697,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: `{"Key": 10}`, }, "", + nil, }, { "failure: memo has src_callback in json struct but does not have address key", @@ -573,6 +709,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: `{"src_callback": {"Key": 10}}`, }, "", + types.ErrInvalidCallbackData, }, { "failure: memo has src_callback in json struct but does not have string value for address key", @@ -584,6 +721,7 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { Memo: `{"src_callback": {"address": 10}}`, }, "", + types.ErrInvalidCallbackData, }, } @@ -591,7 +729,15 @@ func (s *CallbacksTypesTestSuite) TestGetCallbackAddress() { s.Run(tc.name, func() { callbackData, ok := tc.packetData.GetCustomPacketData(types.SourceCallbackKey).(map[string]any) s.Require().Equal(ok, callbackData != nil) - s.Require().Equal(tc.expAddress, types.GetCallbackAddress(callbackData), tc.name) + if ok { + address, err := types.GetCallbackAddress(callbackData) + if tc.expError != nil { + s.Require().ErrorIs(err, tc.expError, tc.name) + } else { + s.Require().NoError(err, tc.name) + s.Require().Equal(tc.expAddress, address, tc.name) + } + } }) } } @@ -606,6 +752,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { name string packetData ibcexported.PacketDataProvider expUserGas uint64 + expError error }{ { "success: memo is empty", @@ -617,6 +764,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: "", }, 0, + nil, }, { "success: memo has user defined gas limit", @@ -628,6 +776,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {"gas_limit": "100"}}`, }, 100, + nil, }, { "success: user defined gas limit is zero", @@ -639,6 +788,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {"gas_limit": "0"}}`, }, 0, + nil, }, { "failure: memo has empty src_callback object", @@ -650,6 +800,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {}}`, }, 0, + nil, }, { "failure: memo has user defined gas limit as json number", @@ -661,6 +812,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {"gas_limit": 100}}`, }, 0, + types.ErrInvalidCallbackData, }, { "failure: memo has user defined gas limit as negative", @@ -672,6 +824,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {"gas_limit": "-100"}}`, }, 0, + types.ErrInvalidCallbackData, }, { "failure: memo has user defined gas limit as string", @@ -683,6 +836,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {"gas_limit": "invalid"}}`, }, 0, + types.ErrInvalidCallbackData, }, { "failure: memo has user defined gas limit as empty string", @@ -694,6 +848,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `{"src_callback": {"gas_limit": ""}}`, }, 0, + nil, }, { "failure: malformed memo", @@ -705,6 +860,7 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { Memo: `invalid`, }, 0, + nil, }, } @@ -712,7 +868,13 @@ func (s *CallbacksTypesTestSuite) TestUserDefinedGasLimit() { s.Run(tc.name, func() { callbackData, ok := tc.packetData.GetCustomPacketData(types.SourceCallbackKey).(map[string]any) s.Require().Equal(ok, callbackData != nil) - s.Require().Equal(tc.expUserGas, types.GetUserDefinedGasLimit(callbackData), tc.name) + userGas, err := types.GetUserDefinedGasLimit(callbackData) + if tc.expError != nil { + s.Require().ErrorIs(err, tc.expError, tc.name) + } else { + s.Require().NoError(err, tc.name) + s.Require().Equal(tc.expUserGas, userGas, tc.name) + } }) } } diff --git a/modules/apps/callbacks/types/errors.go b/modules/apps/callbacks/types/errors.go index df2d2ef2938..07d5a5c9ef5 100644 --- a/modules/apps/callbacks/types/errors.go +++ b/modules/apps/callbacks/types/errors.go @@ -11,4 +11,5 @@ var ( ErrCallbackAddressNotFound = errorsmod.Register(ModuleName, 5, "callback address not found in packet data") ErrCallbackOutOfGas = errorsmod.Register(ModuleName, 6, "callback out of gas") ErrCallbackPanic = errorsmod.Register(ModuleName, 7, "callback panic") + ErrInvalidCallbackData = errorsmod.Register(ModuleName, 8, "invalid callback data") ) diff --git a/modules/apps/callbacks/types/export_test.go b/modules/apps/callbacks/types/export_test.go index 382ff6e6231..e55fdf3c854 100644 --- a/modules/apps/callbacks/types/export_test.go +++ b/modules/apps/callbacks/types/export_test.go @@ -5,11 +5,11 @@ package types */ // GetCallbackAddress is a wrapper around getCallbackAddress to allow the function to be directly called in tests. -func GetCallbackAddress(callbackData map[string]any) string { +func GetCallbackAddress(callbackData map[string]any) (string, error) { return getCallbackAddress(callbackData) } // GetUserDefinedGasLimit is a wrapper around getUserDefinedGasLimit to allow the function to be directly called in tests. -func GetUserDefinedGasLimit(callbackData map[string]any) uint64 { +func GetUserDefinedGasLimit(callbackData map[string]any) (uint64, error) { return getUserDefinedGasLimit(callbackData) } diff --git a/modules/apps/callbacks/types/keys.go b/modules/apps/callbacks/types/keys.go index d07613cc466..9ab3f5c323c 100644 --- a/modules/apps/callbacks/types/keys.go +++ b/modules/apps/callbacks/types/keys.go @@ -28,4 +28,6 @@ const ( // The expected format for ICS20 and ICS27 memo field is as follows: // { "{callbackKey}": { ... , "gas_limit": {stringForCallback} } UserDefinedGasLimitKey = "gas_limit" + // CalldataKey is the key used to store the calldata in the callback packet data. + CalldataKey = "calldata" ) diff --git a/modules/apps/callbacks/v2/ibc_middleware.go b/modules/apps/callbacks/v2/ibc_middleware.go index f15b8230890..d31f5d628f9 100644 --- a/modules/apps/callbacks/v2/ibc_middleware.go +++ b/modules/apps/callbacks/v2/ibc_middleware.go @@ -42,7 +42,7 @@ func (rack RecvAcknowledgement) Acknowledgement() []byte { // IBCMiddleware implements the IBC v2 middleware interface // with the underlying application. type IBCMiddleware struct { - app types.CallbacksCompatibleModuleV2 + app api.PacketUnmarshalerModuleV2 writeAckWrapper api.WriteAcknowledgementWrapper contractKeeper types.ContractKeeper @@ -60,10 +60,10 @@ type IBCMiddleware struct { func NewIBCMiddleware( app api.IBCModule, writeAckWrapper api.WriteAcknowledgementWrapper, contractKeeper types.ContractKeeper, chanKeeperV2 types.ChannelKeeperV2, maxCallbackGas uint64, -) IBCMiddleware { - packetDataUnmarshalerApp, ok := app.(types.CallbacksCompatibleModuleV2) +) *IBCMiddleware { + packetDataUnmarshalerApp, ok := app.(api.PacketUnmarshalerModuleV2) if !ok { - panic(fmt.Errorf("underlying application does not implement %T", (*types.CallbacksCompatibleModule)(nil))) + panic(fmt.Errorf("underlying application does not implement %T", (*api.PacketUnmarshalerModuleV2)(nil))) } if contractKeeper == nil { @@ -82,7 +82,7 @@ func NewIBCMiddleware( panic(errors.New("maxCallbackGas cannot be zero")) } - return IBCMiddleware{ + return &IBCMiddleware{ app: packetDataUnmarshalerApp, writeAckWrapper: writeAckWrapper, contractKeeper: contractKeeper, @@ -105,7 +105,7 @@ func (im *IBCMiddleware) GetWriteAckWrapper() api.WriteAcknowledgementWrapper { // It defers to the underlying application and then calls the contract callback. // If the contract callback returns an error, panics, or runs out of gas, then // the packet send is rejected. -func (im IBCMiddleware) OnSendPacket( +func (im *IBCMiddleware) OnSendPacket( ctx sdk.Context, sourceClient string, destinationClient string, @@ -158,7 +158,7 @@ func (im IBCMiddleware) OnSendPacket( // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) OnRecvPacket( +func (im *IBCMiddleware) OnRecvPacket( ctx sdk.Context, sourceClient string, destinationClient string, @@ -233,7 +233,7 @@ func (im IBCMiddleware) OnRecvPacket( // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) OnAcknowledgementPacket( +func (im *IBCMiddleware) OnAcknowledgementPacket( ctx sdk.Context, sourceClient string, destinationClient string, @@ -306,7 +306,7 @@ func (im IBCMiddleware) OnAcknowledgementPacket( // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. // OnTimeoutPacket is executed when a packet has timed out on the receiving chain. -func (im IBCMiddleware) OnTimeoutPacket( +func (im *IBCMiddleware) OnTimeoutPacket( ctx sdk.Context, sourceClient string, destinationClient string, @@ -371,7 +371,7 @@ func (im IBCMiddleware) OnTimeoutPacket( // It defers to the underlying application and then calls the contract callback. // If the contract callback runs out of gas and may be retried with a higher gas limit then the state changes are // reverted via a panic. -func (im IBCMiddleware) WriteAcknowledgement( +func (im *IBCMiddleware) WriteAcknowledgement( ctx sdk.Context, clientID string, sequence uint64, diff --git a/modules/apps/callbacks/v2/ibc_middleware_test.go b/modules/apps/callbacks/v2/ibc_middleware_test.go index 417e4052f79..750a41f955b 100644 --- a/modules/apps/callbacks/v2/ibc_middleware_test.go +++ b/modules/apps/callbacks/v2/ibc_middleware_test.go @@ -12,7 +12,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/apps/callbacks/testing/simapp" "github.com/cosmos/ibc-go/v10/modules/apps/callbacks/types" - v2 "github.com/cosmos/ibc-go/v10/modules/apps/callbacks/v2" + callbacksv2 "github.com/cosmos/ibc-go/v10/modules/apps/callbacks/v2" transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" channelkeeperv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/keeper" @@ -33,42 +33,42 @@ func (s *CallbacksTestSuite) TestNewIBCMiddleware() { { "success", func() { - _ = v2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, maxCallbackGas) + _ = callbacksv2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, maxCallbackGas) }, nil, }, { "panics with nil ics4wrapper", func() { - _ = v2.NewIBCMiddleware(ibcmockv2.IBCModule{}, nil, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, maxCallbackGas) + _ = callbacksv2.NewIBCMiddleware(ibcmockv2.IBCModule{}, nil, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, maxCallbackGas) }, errors.New("write acknowledgement wrapper cannot be nil"), }, { "panics with nil underlying app", func() { - _ = v2.NewIBCMiddleware(nil, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, maxCallbackGas) + _ = callbacksv2.NewIBCMiddleware(nil, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, maxCallbackGas) }, - fmt.Errorf("underlying application does not implement %T", (*types.CallbacksCompatibleModule)(nil)), + fmt.Errorf("underlying application does not implement %T", (*api.PacketUnmarshalerModuleV2)(nil)), }, { "panics with nil contract keeper", func() { - _ = v2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, nil, &channelkeeperv2.Keeper{}, maxCallbackGas) + _ = callbacksv2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, nil, &channelkeeperv2.Keeper{}, maxCallbackGas) }, errors.New("contract keeper cannot be nil"), }, { "panics with nil channel v2 keeper", func() { - _ = v2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, nil, maxCallbackGas) + _ = callbacksv2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, nil, maxCallbackGas) }, errors.New("channel keeper v2 cannot be nil"), }, { "panics with zero maxCallbackGas", func() { - _ = v2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, uint64(0)) + _ = callbacksv2.NewIBCMiddleware(ibcmockv2.IBCModule{}, &channelkeeperv2.Keeper{}, simapp.ContractKeeper{}, &channelkeeperv2.Keeper{}, uint64(0)) }, errors.New("maxCallbackGas cannot be zero"), }, @@ -88,7 +88,7 @@ func (s *CallbacksTestSuite) TestNewIBCMiddleware() { func (s *CallbacksTestSuite) TestWithWriteAckWrapper() { s.setupChains() - cbsMiddleware := v2.IBCMiddleware{} + cbsMiddleware := callbacksv2.IBCMiddleware{} s.Require().Nil(cbsMiddleware.GetWriteAckWrapper()) cbsMiddleware.WithWriteAckWrapper(s.chainA.App.GetIBCKeeper().ChannelKeeperV2) @@ -132,7 +132,7 @@ func (s *CallbacksTestSuite) TestSendPacket() { }, "none", // improperly formatted callback data should result in no callback execution false, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: callback execution fails", @@ -274,7 +274,7 @@ func (s *CallbacksTestSuite) TestOnAcknowledgementPacket() { packetData.Memo = `{"src_callback": {"address": ""}}` }, noExecution, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: callback execution reach out of gas, but sufficient gas provided by relayer", @@ -338,18 +338,14 @@ func (s *CallbacksTestSuite) TestOnAcknowledgementPacket() { return cbs.OnAcknowledgementPacket(ctx, s.path.EndpointA.ClientID, s.path.EndpointB.ClientID, 1, ack, payload, s.chainA.SenderAccount.GetAddress()) } - switch tc.expError { - case nil: + switch { + case tc.expError == nil: err := onAcknowledgementPacket() s.Require().Nil(err) - - case panicError: - s.Require().PanicsWithValue(storetypes.ErrorOutOfGas{ - Descriptor: fmt.Sprintf("ibc %s callback out of gas; commitGasLimit: %d", types.CallbackTypeAcknowledgementPacket, userGasLimit), - }, func() { + case errors.Is(tc.expError, panicError): + s.Require().PanicsWithValue(storetypes.ErrorOutOfGas{Descriptor: fmt.Sprintf("ibc %s callback out of gas; commitGasLimit: %d", types.CallbackTypeAcknowledgementPacket, userGasLimit)}, func() { _ = onAcknowledgementPacket() }) - default: err := onAcknowledgementPacket() s.Require().ErrorIs(err, tc.expError) @@ -379,6 +375,9 @@ func (s *CallbacksTestSuite) TestOnAcknowledgementPacket() { ) s.Require().True(exists) s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) + + default: + s.T().Fatalf("unexpected expResult: %v", tc.expResult) } }) } @@ -433,7 +432,7 @@ func (s *CallbacksTestSuite) TestOnTimeoutPacket() { packetData.Memo = `{"src_callback": {"address": ""}}` }, noExecution, - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: callback execution reach out of gas, but sufficient gas provided by relayer", @@ -545,6 +544,9 @@ func (s *CallbacksTestSuite) TestOnTimeoutPacket() { ) s.Require().True(exists) s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) + + default: + s.T().Fatalf("unexpected expResult: %v", tc.expResult) } }) } @@ -713,6 +715,9 @@ func (s *CallbacksTestSuite) TestOnRecvPacket() { ) s.Require().True(exists) s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) + + default: + s.T().Fatalf("unexpected expResult: %v", tc.expResult) } }) } @@ -758,7 +763,7 @@ func (s *CallbacksTestSuite) TestWriteAcknowledgement() { packetData.Memo = `{"dest_callback": {"address": ""}}` }, "none", // improperly formatted callback data should result in no callback execution - types.ErrCallbackAddressNotFound, + types.ErrInvalidCallbackData, }, { "failure: ics4Wrapper WriteAcknowledgement call fails", @@ -839,7 +844,6 @@ func (s *CallbacksTestSuite) TestWriteAcknowledgement() { if exists { s.Require().Contains(ctx.EventManager().Events().ToABCIEvents(), expEvent) } - } else { s.Require().ErrorIs(err, tc.expError) } diff --git a/modules/apps/callbacks/v2/v2_test.go b/modules/apps/callbacks/v2/v2_test.go index f28a583353a..5e4deb2a04e 100644 --- a/modules/apps/callbacks/v2/v2_test.go +++ b/modules/apps/callbacks/v2/v2_test.go @@ -111,8 +111,8 @@ func (s *CallbacksTestSuite) AssertCallbackCounters(callbackType types.CallbackT switch callbackType { case "none": - s.Require().Len(sourceCounters, 0) - s.Require().Len(destCounters, 0) + s.Require().Empty(sourceCounters) + s.Require().Empty(destCounters) case types.CallbackTypeSendPacket: s.Require().Len(sourceCounters, 1) @@ -123,10 +123,10 @@ func (s *CallbacksTestSuite) AssertCallbackCounters(callbackType types.CallbackT s.Require().Equal(1, sourceCounters[types.CallbackTypeSendPacket]) s.Require().Equal(1, sourceCounters[types.CallbackTypeAcknowledgementPacket]) - s.Require().Len(destCounters, 0) + s.Require().Empty(destCounters) case types.CallbackTypeReceivePacket: - s.Require().Len(sourceCounters, 0) + s.Require().Empty(sourceCounters) s.Require().Len(destCounters, 1) s.Require().Equal(1, destCounters[types.CallbackTypeReceivePacket]) @@ -135,7 +135,7 @@ func (s *CallbacksTestSuite) AssertCallbackCounters(callbackType types.CallbackT s.Require().Equal(1, sourceCounters[types.CallbackTypeSendPacket]) s.Require().Equal(1, sourceCounters[types.CallbackTypeTimeoutPacket]) - s.Require().Len(destCounters, 0) + s.Require().Empty(destCounters) default: s.FailNow(fmt.Sprintf("invalid callback type %s", callbackType)) diff --git a/modules/apps/packet-forward-middleware/ibc_middleware.go b/modules/apps/packet-forward-middleware/ibc_middleware.go new file mode 100644 index 00000000000..2d12eb41900 --- /dev/null +++ b/modules/apps/packet-forward-middleware/ibc_middleware.go @@ -0,0 +1,387 @@ +package packetforward + +import ( + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-metrics" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +var ( + _ porttypes.Middleware = &IBCMiddleware{} + _ porttypes.PacketUnmarshalerModule = &IBCMiddleware{} +) + +// IBCMiddleware implements the ICS26 callbacks for the forward middleware given the +// forward keeper and the underlying application. +type IBCMiddleware struct { + app porttypes.PacketUnmarshalerModule + keeper *keeper.Keeper + + retriesOnTimeout uint8 + forwardTimeout time.Duration +} + +// NewIBCMiddleware creates a new IBCMiddleware given the keeper and underlying application. +func NewIBCMiddleware(k *keeper.Keeper, retriesOnTimeout uint8, forwardTimeout time.Duration) *IBCMiddleware { + return &IBCMiddleware{ + keeper: k, + retriesOnTimeout: retriesOnTimeout, + forwardTimeout: forwardTimeout, + } +} + +// OnChanOpenInit implements the IBCModule interface. +func (im *IBCMiddleware) OnChanOpenInit(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID string, channelID string, counterparty channeltypes.Counterparty, version string) (string, error) { + return im.app.OnChanOpenInit(ctx, order, connectionHops, portID, channelID, counterparty, version) +} + +// OnChanOpenTry implements the IBCModule interface. +func (im *IBCMiddleware) OnChanOpenTry(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, counterpartyVersion string) (string, error) { + return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, counterparty, counterpartyVersion) +} + +// OnChanOpenAck implements the IBCModule interface. +func (im *IBCMiddleware) OnChanOpenAck(ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string) error { + return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) +} + +// OnChanOpenConfirm implements the IBCModule interface. +func (im *IBCMiddleware) OnChanOpenConfirm(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanOpenConfirm(ctx, portID, channelID) +} + +// OnChanCloseInit implements the IBCModule interface. +func (im *IBCMiddleware) OnChanCloseInit(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanCloseInit(ctx, portID, channelID) +} + +// OnChanCloseConfirm implements the IBCModule interface. +func (im *IBCMiddleware) OnChanCloseConfirm(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanCloseConfirm(ctx, portID, channelID) +} + +// UnmarshalPacketData implements PacketDataUnmarshaler. +func (im *IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { + return im.app.UnmarshalPacketData(ctx, portID, channelID, bz) +} + +func getDenomForThisChain(port, channel, counterpartyPort, counterpartyChannel string, denom transfertypes.Denom) string { + if denom.HasPrefix(counterpartyPort, counterpartyChannel) { + // unwind denom + denom.Trace = denom.Trace[1:] + if len(denom.Trace) == 0 { + // denom is now unwound back to native denom + return denom.Path() + } + // denom is still IBC denom + return denom.IBCDenom() + } + // append port and channel from this chain to denom + trace := []transfertypes.Hop{transfertypes.NewHop(port, channel)} + denom.Trace = append(trace, denom.Trace...) + + return denom.IBCDenom() +} + +// getBoolFromAny returns the bool value is any is a valid bool, otherwise false. +func getBoolFromAny(value any) bool { + if value == nil { + return false + } + boolVal, ok := value.(bool) + if !ok { + return false + } + return boolVal +} + +// GetReceiver returns the receiver address for a given channel and original sender. +// it overrides the receiver address to be a hash of the channel/origSender so that +// the receiver address is deterministic and can be used to identify the sender on the +// initial chain. +func GetReceiver(channel string, originalSender string) (string, error) { + senderStr := fmt.Sprintf("%s/%s", channel, originalSender) + senderHash32 := address.Hash(types.ModuleName, []byte(senderStr)) + sender := sdk.AccAddress(senderHash32[:20]) + bech32Prefix := sdk.GetConfig().GetBech32AccountAddrPrefix() + return sdk.Bech32ifyAddressBytes(bech32Prefix, sender) +} + +// newErrorAcknowledgement returns an error that identifies PFM and provides the error. +// It's okay if these errors are non-deterministic, because they will not be committed to state, only emitted as events. +func newErrorAcknowledgement(err error) channeltypes.Acknowledgement { + return channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Error{ + Error: fmt.Sprintf("packet-forward-middleware error: %s", err.Error()), + }, + } +} + +// OnRecvPacket checks the memo field on this packet and if the metadata inside's root key indicates this packet +// should be handled by the swap middleware it attempts to perform a swap. If the swap is successful +// the underlying application's OnRecvPacket callback is invoked, an ack error is returned otherwise. +func (im *IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { + logger := im.keeper.Logger(ctx) + + var data transfertypes.FungibleTokenPacketData + if err := transfertypes.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + logger.Debug(fmt.Sprintf("packetForwardMiddleware OnRecvPacket payload is not a FungibleTokenPacketData: %s", err.Error())) + return im.app.OnRecvPacket(ctx, channelVersion, packet, relayer) + } + + transferDetail, err := transfertypes.PacketDataV1ToV2(data) + if err != nil { + logger.Error(fmt.Sprintf("packetForwardMiddleware OnRecvPacket could not convert FungibleTokenPacketData to InternalRepresentation: %s", err.Error())) + return im.app.OnRecvPacket(ctx, channelVersion, packet, relayer) + } + + logger.Debug("packetForwardMiddleware OnRecvPacket", + "sequence", packet.Sequence, + "src-channel", packet.SourceChannel, + "src-port", packet.SourcePort, + "dst-channel", packet.DestinationChannel, + "dst-port", packet.DestinationPort, + "amount", data.Amount, + "denom", data.Denom, + "memo", data.Memo, + ) + + packetMetadata, isPFM, err := types.GetPacketMetadataFromPacketdata(data) + if err != nil && !isPFM { + // not a packet that should be forwarded + logger.Debug("packetForwardMiddleware OnRecvPacket forward metadata does not exist") + return im.app.OnRecvPacket(ctx, channelVersion, packet, relayer) + } + if err != nil && isPFM { + logger.Error("packetForwardMiddleware OnRecvPacket error parsing forward metadata", "error", err) + return newErrorAcknowledgement(fmt.Errorf("error parsing forward metadata: %w", err)) + } + + metadata := packetMetadata.Forward + + goCtx := ctx.Context() + nonrefundable := getBoolFromAny(goCtx.Value(types.NonrefundableKey{})) + + if err := metadata.Validate(); err != nil { + logger.Error("packetForwardMiddleware OnRecvPacket forward metadata is invalid", "error", err) + return newErrorAcknowledgement(err) + } + + // override the receiver so that senders cannot move funds through arbitrary addresses. + overrideReceiver, err := GetReceiver(packet.DestinationChannel, data.Sender) + if err != nil { + logger.Error("packetForwardMiddleware OnRecvPacket failed to construct override receiver", "error", err) + return newErrorAcknowledgement(fmt.Errorf("failed to construct override receiver: %w", err)) + } + + if err := im.receiveFunds(ctx, channelVersion, packet, data, overrideReceiver, relayer); err != nil { + logger.Error("packetForwardMiddleware OnRecvPacket error receiving packet", "error", err) + return newErrorAcknowledgement(fmt.Errorf("error receiving packet: %w", err)) + } + + // if this packet's token denom is already the base denom for some native token on this chain, + // we do not need to do any further composition of the denom before forwarding the packet + denomOnThisChain := getDenomForThisChain(packet.DestinationPort, packet.DestinationChannel, packet.SourcePort, packet.SourceChannel, transferDetail.Token.Denom) + + amountInt, ok := sdkmath.NewIntFromString(transferDetail.Token.Amount) + if !ok { + logger.Error("packetForwardMiddleware OnRecvPacket error parsing amount for forward", "amount", transferDetail.Token.Amount) + return newErrorAcknowledgement(fmt.Errorf("error parsing amount for forward: %s", transferDetail.Token.Amount)) + } + + token := sdk.NewCoin(denomOnThisChain, amountInt) + + timeout := metadata.Timeout + + if timeout.Nanoseconds() <= 0 { + timeout = im.forwardTimeout + } + + var retries uint8 + if metadata.Retries != nil { + retries = *metadata.Retries + } else { + retries = im.retriesOnTimeout + } + + err = im.keeper.ForwardTransferPacket(ctx, nil, packet, data.Sender, overrideReceiver, metadata, token, retries, timeout, []metrics.Label{}, nonrefundable) + if err != nil { + logger.Error("packetForwardMiddleware OnRecvPacket error forwarding packet", "error", err) + return newErrorAcknowledgement(err) + } + + // returning nil ack will prevent WriteAcknowledgement from occurring for forwarded packet. + // This is intentional so that the acknowledgement will be written later based on the ack/timeout of the forwarded packet. + return nil +} + +// receiveFunds receives funds from the packet into the override receiver +// address and returns an error if the funds cannot be received. +func (im *IBCMiddleware) receiveFunds(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, data transfertypes.FungibleTokenPacketData, overrideReceiver string, relayer sdk.AccAddress) error { + overrideData := data + overrideData.Receiver = overrideReceiver + overrideData.Memo = "" // Memo explicitly emptied. + + overrideDataBz := transfertypes.ModuleCdc.MustMarshalJSON(&overrideData) + + overridePacket := packet + overridePacket.Data = overrideDataBz // Override data. + ack := im.app.OnRecvPacket(ctx, channelVersion, overridePacket, relayer) + if ack == nil { + return errors.New("ack is nil") + } + + if !ack.Success() { + return fmt.Errorf("ack error: %s", string(ack.Acknowledgement())) + } + + return nil +} + +// OnAcknowledgementPacket implements the IBCModule interface. +func (im *IBCMiddleware) OnAcknowledgementPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress) error { + var data transfertypes.FungibleTokenPacketData + if err := transfertypes.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + im.keeper.Logger(ctx).Error("packetForwardMiddleware error parsing packet data from ack packet", + "sequence", packet.Sequence, + "src-channel", packet.SourceChannel, + "src-port", packet.SourcePort, + "dst-channel", packet.DestinationChannel, + "dst-port", packet.DestinationPort, + "error", err, + ) + return im.app.OnAcknowledgementPacket(ctx, channelVersion, packet, acknowledgement, relayer) + } + transferDetail, err := transfertypes.PacketDataV1ToV2(data) + if err != nil { + im.keeper.Logger(ctx).Error("packetForwardMiddleware error converting FungibleTokenPacket to InternalRepresentation") + return im.app.OnAcknowledgementPacket(ctx, channelVersion, packet, acknowledgement, relayer) + } + + im.keeper.Logger(ctx).Debug("packetForwardMiddleware OnAcknowledgementPacket", + "sequence", packet.Sequence, + "src-channel", packet.SourceChannel, + "src-port", packet.SourcePort, + "dst-channel", packet.DestinationChannel, + "dst-port", packet.DestinationPort, + "amount", data.Amount, + "denom", data.Denom, + ) + + var ack channeltypes.Acknowledgement + if err := channeltypes.SubModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil { + return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %v", err) + } + + inFlightPacket, err := im.keeper.GetInflightPacket(ctx, packet) + if err != nil { + return err + } + + if inFlightPacket != nil { + im.keeper.RemoveInFlightPacket(ctx, packet) + // this is a forwarded packet, so override handling to avoid refund from being processed. + return im.keeper.WriteAcknowledgementForForwardedPacket(ctx, packet, transferDetail, inFlightPacket, ack) + } + + return im.app.OnAcknowledgementPacket(ctx, channelVersion, packet, acknowledgement, relayer) +} + +// OnTimeoutPacket implements the IBCModule interface. +func (im *IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) error { + var data transfertypes.FungibleTokenPacketData + if err := transfertypes.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + im.keeper.Logger(ctx).Error("packetForwardMiddleware error parsing packet data from timeout packet", + "sequence", packet.Sequence, + "src-channel", packet.SourceChannel, + "src-port", packet.SourcePort, + "dst-channel", packet.DestinationChannel, + "dst-port", packet.DestinationPort, + "error", err, + ) + return im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer) + } + + transferDetail, err := transfertypes.PacketDataV1ToV2(data) + if err != nil { + im.keeper.Logger(ctx).Error("packetForwardMiddleware error converting FungibleTokenPacket to InternalRepresentation") + return im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer) + } + + im.keeper.Logger(ctx).Debug("packetForwardMiddleware OnTimeoutPacket", + "sequence", packet.Sequence, + "src-channel", packet.SourceChannel, + "src-port", packet.SourcePort, + "dst-channel", packet.DestinationChannel, + "dst-port", packet.DestinationPort, + "amount", data.Amount, + "denom", data.Denom, + ) + + inFlightPacket, err := im.keeper.TimeoutShouldRetry(ctx, packet) + if inFlightPacket != nil { + im.keeper.RemoveInFlightPacket(ctx, packet) + if err != nil { + // this is a forwarded packet, so override handling to avoid refund from being processed on this chain. + // WriteAcknowledgement with proxied ack to return success/fail to previous chain. + return im.keeper.WriteAcknowledgementForForwardedPacket(ctx, packet, transferDetail, inFlightPacket, newErrorAcknowledgement(err)) + } + // timeout should be retried. In order to do that, we need to handle this timeout to refund on this chain first. + if err := im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer); err != nil { + return err + } + return im.keeper.RetryTimeout(ctx, packet.SourceChannel, packet.SourcePort, transferDetail, inFlightPacket) + } + + return im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer) +} + +// SendPacket implements the ICS4 Wrapper interface. +func (im *IBCMiddleware) SendPacket(ctx sdk.Context, sourcePort, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) (uint64, error) { + return im.keeper.SendPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface. +func (im *IBCMiddleware) WriteAcknowledgement(ctx sdk.Context, packet ibcexported.PacketI, ack ibcexported.Acknowledgement) error { + return im.keeper.WriteAcknowledgement(ctx, packet, ack) +} + +func (im *IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return im.keeper.GetAppVersion(ctx, portID, channelID) +} + +func (im *IBCMiddleware) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } + im.keeper.WithICS4Wrapper(wrapper) +} + +func (im *IBCMiddleware) SetUnderlyingApplication(app porttypes.IBCModule) { + if im.app != nil { + panic("underlying application already set") + } + // the underlying application must implement the PacketUnmarshalerModule interface + pdApp, ok := app.(porttypes.PacketUnmarshalerModule) + if !ok { + panic(fmt.Errorf("underlying application must implement PacketUnmarshalerModule, got %T", app)) + } + im.app = pdApp +} diff --git a/modules/apps/packet-forward-middleware/ibc_middleware_test.go b/modules/apps/packet-forward-middleware/ibc_middleware_test.go new file mode 100644 index 00000000000..a917767693c --- /dev/null +++ b/modules/apps/packet-forward-middleware/ibc_middleware_test.go @@ -0,0 +1,279 @@ +package packetforward_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + packetforward "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware" + packetforwardkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" + ibcmock "github.com/cosmos/ibc-go/v10/testing/mock" +) + +type PFMTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + chainC *ibctesting.TestChain + + pathAB *ibctesting.Path + pathBC *ibctesting.Path +} + +func TestPFMTestSuite(t *testing.T) { + suite.Run(t, new(PFMTestSuite)) +} + +// setupChains sets up a coordinator with 3 test chains. +func (s *PFMTestSuite) setupChains() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) + + s.pathAB = ibctesting.NewTransferPath(s.chainA, s.chainB) + s.pathAB.Setup() + + s.pathBC = ibctesting.NewTransferPath(s.chainB, s.chainC) + s.pathBC.Setup() +} + +func (s *PFMTestSuite) TestSetICS4Wrapper() { + s.setupChains() + + pfm := s.pktForwardMiddleware(s.chainA) + + s.Require().Panics(func() { + pfm.SetICS4Wrapper(nil) + }, "ICS4Wrapper cannot be nil") + + s.Require().NotPanics(func() { + pfm.SetICS4Wrapper(s.chainA.App.GetIBCKeeper().ChannelKeeper) + }, "ICS4Wrapper should be set without panic") +} + +func (s *PFMTestSuite) TestSetUnderlyingApplication() { + s.setupChains() + + pfmKeeper := s.chainA.GetSimApp().PFMKeeper + + pfm := packetforward.NewIBCMiddleware(pfmKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp) + + s.Require().Panics(func() { + pfm.SetUnderlyingApplication(nil) + }, "underlying application cannot be nil") + + s.Require().NotPanics(func() { + pfm.SetUnderlyingApplication(&ibcmock.IBCModule{}) + }, "underlying application should be set without panic") + + s.Require().Panics(func() { + pfm.SetUnderlyingApplication(&ibcmock.IBCModule{}) + }, "underlying application should not be set again") +} + +func (s *PFMTestSuite) TestOnRecvPacket_NonfungibleToken() { + s.setupChains() + + ctx := s.chainA.GetContext() + version := s.pathAB.EndpointA.GetChannel().Version + relayerAddr := s.chainA.SenderAccount.GetAddress() + + pfm := s.pktForwardMiddleware(s.chainA) + ack := pfm.OnRecvPacket(ctx, version, channeltypes.Packet{}, relayerAddr) + s.Require().False(ack.Success()) + + expectedAck := &channeltypes.Acknowledgement{} + err := s.chainA.Codec.UnmarshalJSON(ack.Acknowledgement(), expectedAck) + s.Require().NoError(err) + + // Transfer keeper returns this error if the packet received is not a fungible token. + s.Require().Equal("ABCI code: 12: error handling packet: see events for details", expectedAck.GetError()) +} + +func (s *PFMTestSuite) TestOnRecvPacket_NoMemo() { + s.setupChains() + + ctx := s.chainA.GetContext() + version := s.pathAB.EndpointA.GetChannel().Version + relayerAddr := s.chainA.SenderAccount.GetAddress() + receiverAddr := s.chainB.SenderAccount.GetAddress() + + packet := s.transferPacket(relayerAddr.String(), receiverAddr.String(), s.pathAB, 0, "{}") + + pfm := s.pktForwardMiddleware(s.chainA) + ack := pfm.OnRecvPacket(ctx, version, packet, relayerAddr) + s.Require().True(ack.Success()) + + expectedAck := &channeltypes.Acknowledgement{} + err := s.chainA.Codec.UnmarshalJSON(ack.Acknowledgement(), expectedAck) + s.Require().NoError(err) + + s.Require().Empty(expectedAck.GetError()) + s.Require().ElementsMatch([]byte{1}, expectedAck.GetResult()) +} + +func (s *PFMTestSuite) TestOnRecvPacket_InvalidReceiver() { + s.setupChains() + + ctx := s.chainA.GetContext() + version := s.pathAB.EndpointA.GetChannel().Version + relayerAddr := s.chainA.SenderAccount.GetAddress() + + packet := s.transferPacket(relayerAddr.String(), "", s.pathAB, 0, "") + + pfm := s.pktForwardMiddleware(s.chainA) + ack := pfm.OnRecvPacket(ctx, version, packet, relayerAddr) + s.Require().False(ack.Success()) + + expectedAck := &channeltypes.Acknowledgement{} + err := s.chainA.Codec.UnmarshalJSON(ack.Acknowledgement(), expectedAck) + s.Require().NoError(err) + + s.Require().Equal("ABCI code: 5: error handling packet: see events for details", expectedAck.GetError()) + s.Require().Empty(expectedAck.GetResult()) +} + +func (s *PFMTestSuite) TestOnRecvPacket_NoForward() { + s.setupChains() + + ctx := s.chainA.GetContext() + version := s.pathAB.EndpointA.GetChannel().Version + + senderAddr := s.chainA.SenderAccount.GetAddress() + receiverAddr := s.chainB.SenderAccount.GetAddress() + + packet := s.transferPacket(senderAddr.String(), receiverAddr.String(), s.pathAB, 0, "") + + pfm := s.pktForwardMiddleware(s.chainA) + ack := pfm.OnRecvPacket(ctx, version, packet, senderAddr) + s.Require().True(ack.Success()) + + expectedAck := &channeltypes.Acknowledgement{} + err := s.chainA.Codec.UnmarshalJSON(ack.Acknowledgement(), expectedAck) + s.Require().NoError(err) + s.Require().Empty(expectedAck.GetError()) + + s.Require().Equal([]byte{1}, expectedAck.GetResult()) +} + +func (s *PFMTestSuite) TestOnRecvPacket_RecvPacketFailed() { + s.setupChains() + + transferKeeper := s.chainA.GetSimApp().TransferKeeper + ctx := s.chainA.GetContext() + transferKeeper.SetParams(ctx, transfertypes.Params{ReceiveEnabled: false}) + + version := s.pathAB.EndpointA.GetChannel().Version + + senderAddr := s.chainA.SenderAccount.GetAddress() + receiverAddr := s.chainB.SenderAccount.GetAddress() + metadata := &packetforwardtypes.PacketMetadata{ + Forward: packetforwardtypes.ForwardMetadata{ + Receiver: receiverAddr.String(), + Port: s.pathAB.EndpointA.ChannelConfig.PortID, + Channel: s.pathAB.EndpointA.ChannelID, + }, + } + metadataJSON, err := metadata.ToMemo() + s.Require().NoError(err) + packet := s.transferPacket(senderAddr.String(), receiverAddr.String(), s.pathAB, 0, metadataJSON) + + pfm := s.pktForwardMiddleware(s.chainA) + ack := pfm.OnRecvPacket(ctx, version, packet, senderAddr) + s.Require().False(ack.Success()) + + expectedAck := &channeltypes.Acknowledgement{} + + err = s.chainA.Codec.UnmarshalJSON(ack.Acknowledgement(), expectedAck) + s.Require().NoError(err) + s.Require().Equal("packet-forward-middleware error: error receiving packet: ack error: {\"error\":\"ABCI code: 8: error handling packet: see events for details\"}", expectedAck.GetError()) + + s.Require().Equal([]byte(nil), expectedAck.GetResult()) +} + +func (s *PFMTestSuite) TestOnRecvPacket_ForwardNoFee() { + s.setupChains() + + senderAddr := s.chainA.SenderAccount.GetAddress() + receiverAddr := s.chainC.SenderAccount.GetAddress() + metadata := &packetforwardtypes.PacketMetadata{ + Forward: packetforwardtypes.ForwardMetadata{ + Receiver: receiverAddr.String(), + Port: s.pathBC.EndpointA.ChannelConfig.PortID, + Channel: s.pathBC.EndpointA.ChannelID, + }, + } + metadataJSON, err := metadata.ToMemo() + s.Require().NoError(err) + packet := s.transferPacket(senderAddr.String(), receiverAddr.String(), s.pathAB, 0, metadataJSON) + version := s.pathAB.EndpointA.GetChannel().Version + ctxB := s.chainB.GetContext() + + pfmB := s.pktForwardMiddleware(s.chainB) + ack := pfmB.OnRecvPacket(ctxB, version, packet, senderAddr) + s.Require().Nil(ack) + + // Check that chain C has received the packet + ctxC := s.chainC.GetContext() + packet = s.transferPacket(senderAddr.String(), receiverAddr.String(), s.pathBC, 0, "") + version = s.pathBC.EndpointA.GetChannel().Version + + pfmC := s.pktForwardMiddleware(s.chainC) + ack = pfmC.OnRecvPacket(ctxC, version, packet, senderAddr) + s.Require().NotNil(ack) + + // Ack on chainC + packet = s.transferPacket(senderAddr.String(), receiverAddr.String(), s.pathBC, 1, "") + err = pfmC.OnAcknowledgementPacket(ctxC, version, packet, ack.Acknowledgement(), senderAddr) + s.Require().NoError(err) + + // Ack on ChainB + err = pfmB.OnAcknowledgementPacket(ctxB, version, packet, ack.Acknowledgement(), senderAddr) + s.Require().NoError(err) +} + +func (s *PFMTestSuite) pktForwardMiddleware(chain *ibctesting.TestChain) *packetforward.IBCMiddleware { + pfmKeeper := chain.GetSimApp().PFMKeeper + + ibcModule, ok := chain.App.GetIBCKeeper().PortKeeper.Route(transfertypes.ModuleName) + s.Require().True(ok) + + transferStack, ok := ibcModule.(porttypes.PacketUnmarshalerModule) + s.Require().True(ok) + + ibcMiddleware := packetforward.NewIBCMiddleware(pfmKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp) + ibcMiddleware.SetUnderlyingApplication(transferStack) + return ibcMiddleware +} + +func (s *PFMTestSuite) transferPacket(sender string, receiver string, path *ibctesting.Path, seq uint64, metadata string) channeltypes.Packet { + s.T().Helper() + tokenPacket := transfertypes.FungibleTokenPacketData{ + Denom: "uatom", + Amount: "100", + Sender: sender, + Receiver: receiver, + Memo: metadata, + } + + tokenData, err := transfertypes.ModuleCdc.MarshalJSON(&tokenPacket) + s.Require().NoError(err) + + return channeltypes.Packet{ + SourcePort: path.EndpointA.ChannelConfig.PortID, + SourceChannel: path.EndpointA.ChannelID, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + Data: tokenData, + Sequence: seq, + } +} diff --git a/modules/apps/packet-forward-middleware/keeper/genesis.go b/modules/apps/packet-forward-middleware/keeper/genesis.go new file mode 100644 index 00000000000..18112c2c8b9 --- /dev/null +++ b/modules/apps/packet-forward-middleware/keeper/genesis.go @@ -0,0 +1,39 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" +) + +// TODO: Write unit tests #8321 + +// InitGenesis +func (k *Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { + // Initialize store refund path for forwarded packets in genesis state that have not yet been acked. + store := k.storeService.OpenKVStore(ctx) + for key, value := range state.InFlightPackets { + bz := k.cdc.MustMarshal(&value) + if err := store.Set([]byte(key), bz); err != nil { + panic(err) + } + } +} + +// ExportGenesis +func (k *Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + store := k.storeService.OpenKVStore(ctx) + + inFlightPackets := make(map[string]types.InFlightPacket) + + itr, err := store.Iterator(nil, nil) + if err != nil { + panic(err) + } + for ; itr.Valid(); itr.Next() { + var inFlightPacket types.InFlightPacket + k.cdc.MustUnmarshal(itr.Value(), &inFlightPacket) + inFlightPackets[string(itr.Key())] = inFlightPacket + } + return &types.GenesisState{InFlightPackets: inFlightPackets} +} diff --git a/modules/apps/packet-forward-middleware/keeper/genesis_test.go b/modules/apps/packet-forward-middleware/keeper/genesis_test.go new file mode 100644 index 00000000000..3612d4d07e0 --- /dev/null +++ b/modules/apps/packet-forward-middleware/keeper/genesis_test.go @@ -0,0 +1,45 @@ +package keeper_test + +import "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + +func (s *KeeperTestSuite) TestGenesis() { + sampleInflight := types.InFlightPacket{ + PacketData: []byte{1}, + OriginalSenderAddress: "senderAddress", + RefundChannelId: "refundChainID", + RefundPortId: "refundPortID", + RefundSequence: 1, + PacketSrcPortId: "SourcePort", + PacketSrcChannelId: "SourceChannel", + + PacketTimeoutTimestamp: 1010101010, + PacketTimeoutHeight: "16-200", + + RetriesRemaining: 2, + Timeout: 10101010101, + Nonrefundable: false, + } + + key := types.RefundPacketKey(sampleInflight.PacketSrcChannelId, sampleInflight.PacketSrcPortId, sampleInflight.RefundSequence) + keeper := s.chainA.GetSimApp().PFMKeeper + err := keeper.SetInflightPacket(s.chainA.GetContext(), sampleInflight.PacketSrcChannelId, sampleInflight.PacketSrcPortId, sampleInflight.RefundSequence, &sampleInflight) + s.Require().NoError(err) + + genState := keeper.ExportGenesis(s.chainA.GetContext()) + s.Require().Len(genState.InFlightPackets, 1) + + genesisInflight := genState.InFlightPackets[string(key)] + + s.Require().Equal(genesisInflight, sampleInflight) + + keeper.RemoveInFlightPacket(s.chainA.GetContext(), sampleInflight.ChannelPacket()) + inflightFromStore, err := keeper.GetInflightPacket(s.chainA.GetContext(), sampleInflight.ChannelPacket()) + s.Require().NoError(err) + s.Require().Nil(inflightFromStore) + + keeper.InitGenesis(s.chainA.GetContext(), *genState) + + inflightFromStore, err = keeper.GetInflightPacket(s.chainA.GetContext(), sampleInflight.ChannelPacket()) + s.Require().NoError(err) + s.Require().Equal(sampleInflight, *inflightFromStore) +} diff --git a/modules/apps/packet-forward-middleware/keeper/keeper.go b/modules/apps/packet-forward-middleware/keeper/keeper.go new file mode 100644 index 00000000000..10488219f0c --- /dev/null +++ b/modules/apps/packet-forward-middleware/keeper/keeper.go @@ -0,0 +1,457 @@ +package keeper + +import ( + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-metrics" + + "cosmossdk.io/core/address" + corestore "cosmossdk.io/core/store" + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/bech32" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" + coremetrics "github.com/cosmos/ibc-go/v10/modules/core/metrics" +) + +var ( + // DefaultTransferPacketTimeoutHeight is the timeout height following IBC defaults + DefaultTransferPacketTimeoutHeight = clienttypes.NewHeight(0, 0) + + // DefaultForwardTransferPacketTimeoutTimestamp is the timeout timestamp following IBC defaults + DefaultForwardTransferPacketTimeoutTimestamp = time.Duration(10) * time.Minute +) + +// Keeper defines the packet forward middleware keeper +type Keeper struct { + storeService corestore.KVStoreService + cdc codec.BinaryCodec + addressCodec address.Codec + + transferKeeper types.TransferKeeper + channelKeeper types.ChannelKeeper + bankKeeper types.BankKeeper + ics4Wrapper porttypes.ICS4Wrapper + + // the address capable of executing a MsgUpdateParams message. Typically, this + // should be the x/gov module account. + authority string +} + +// NewKeeper creates a new forward Keeper instance +func NewKeeper(cdc codec.BinaryCodec, addressCodec address.Codec, storeService corestore.KVStoreService, transferKeeper types.TransferKeeper, channelKeeper types.ChannelKeeper, bankKeeper types.BankKeeper, authority string, +) *Keeper { + return &Keeper{ + cdc: cdc, + addressCodec: addressCodec, + storeService: storeService, + transferKeeper: transferKeeper, + // Defaults to using the channel keeper as the ICS4Wrapper + // This can be overridden later with WithICS4Wrapper (e.g. by the middleware stack wiring) + ics4Wrapper: channelKeeper, + channelKeeper: channelKeeper, + bankKeeper: bankKeeper, + authority: authority, + } +} + +// WithICS4Wrapper sets the ICS4Wrapper for the keeper. +func (k *Keeper) WithICS4Wrapper(ics4Wrapper porttypes.ICS4Wrapper) { + k.ics4Wrapper = ics4Wrapper +} + +// GetAuthority returns the module's authority. +func (k *Keeper) GetAuthority() string { + return k.authority +} + +// SetICS4Wrapper sets the ICS4 wrapper. +func (k *Keeper) SetICS4Wrapper(ics4Wrapper porttypes.ICS4Wrapper) { + k.ics4Wrapper = ics4Wrapper +} + +// ICS4Wrapper gets the ICS4 Wrapper for PFM. +func (k *Keeper) ICS4Wrapper() porttypes.ICS4Wrapper { + return k.ics4Wrapper +} + +// Logger returns a module-specific logger. +func (*Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", "x/"+ibcexported.ModuleName+"-"+types.ModuleName) +} + +// moveFundsToUserRecoverableAccount will move the funds from the escrow account to the user recoverable account +// this is only used when the maximum timeouts have been reached or there is an acknowledgement error and the packet is nonrefundable, +// i.e. an operation has occurred to make the original packet funds inaccessible to the user, e.g. a swap. +// We cannot refund the funds back to the original chain, so we move them to an account on this chain that the user can access. +func (k *Keeper) moveFundsToUserRecoverableAccount(ctx sdk.Context, packet channeltypes.Packet, token transfertypes.Token, inFlightPacket *types.InFlightPacket) error { + amount, ok := sdkmath.NewIntFromString(token.GetAmount()) + if !ok { + return fmt.Errorf("failed to parse amount from packet data for forward recovery: %s", token.GetAmount()) + } + denom := token.GetDenom() + coin := sdk.NewCoin(denom.IBCDenom(), amount) + + userAccount, err := k.userRecoverableAccount(inFlightPacket) + if err != nil { + return fmt.Errorf("failed to get user recoverable account: %w", err) + } + + if !denom.HasPrefix(packet.SourcePort, packet.SourceChannel) { + // mint vouchers back to sender + if err := k.bankKeeper.MintCoins(ctx, transfertypes.ModuleName, sdk.NewCoins(coin)); err != nil { + return err + } + + if err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, transfertypes.ModuleName, userAccount, sdk.NewCoins(coin)); err != nil { + panic(fmt.Sprintf("unable to send coins from module to account despite previously minting coins to module account: %v", err)) + } + return nil + } + + escrowAddress := transfertypes.GetEscrowAddress(packet.SourcePort, packet.SourceChannel) + + if err := k.bankKeeper.SendCoins(ctx, escrowAddress, userAccount, sdk.NewCoins(coin)); err != nil { + return fmt.Errorf("failed to send coins from escrow account to user recoverable account: %w", err) + } + + // update the total escrow amount for the denom. + k.unescrowToken(ctx, coin) + + return nil +} + +// userRecoverableAccount finds an account on this chain that the original sender of the packet can recover funds from. +// If the destination receiver of the original packet is a valid bech32 address for this chain, we use that address. +// Otherwise, if the sender of the original packet is a valid bech32 address for another chain, we translate that address to this chain. +// Note that for the fallback, the coin type of the source chain sender account must be compatible with this chain. +func (k *Keeper) userRecoverableAccount(inFlightPacket *types.InFlightPacket) (sdk.AccAddress, error) { + var originalData transfertypes.FungibleTokenPacketData + err := transfertypes.ModuleCdc.UnmarshalJSON(inFlightPacket.PacketData, &originalData) + if err == nil { // if NO error + sender, err := k.addressCodec.StringToBytes(originalData.Receiver) + if err == nil { // if NO error + return sender, nil + } + } + + _, sender, fallbackErr := bech32.DecodeAndConvert(inFlightPacket.OriginalSenderAddress) + if fallbackErr == nil { // if NO error + return sender, nil + } + + return nil, fmt.Errorf("failed to decode bech32 addresses: %w", errors.Join(err, fallbackErr)) +} + +func (k *Keeper) WriteAcknowledgementForForwardedPacket(ctx sdk.Context, packet channeltypes.Packet, transferDetail transfertypes.InternalTransferRepresentation, inFlightPacket *types.InFlightPacket, ack channeltypes.Acknowledgement) error { + // Lookup module by channel capability + _, found := k.channelKeeper.GetChannel(ctx, inFlightPacket.RefundPortId, inFlightPacket.RefundChannelId) + if !found { + return errors.New("could not retrieve module from port-id") + } + + if ack.Success() { + return k.ics4Wrapper.WriteAcknowledgement(ctx, inFlightPacket.ChannelPacket(), ack) + } + + // For forwarded packets, the funds were moved into an escrow account if the denom originated on this chain. + // On an ack error or timeout on a forwarded packet, the funds in the escrow account + // should be moved to the other escrow account on the other side or burned. + + // If this packet is non-refundable due to some action that took place between the initial ibc transfer and the forward + // we write a successful ack containing details on what happened regardless of ack error or timeout + if inFlightPacket.Nonrefundable { + // We are not allowed to refund back to the source chain. + // attempt to move funds to user recoverable account on this chain. + if err := k.moveFundsToUserRecoverableAccount(ctx, packet, transferDetail.Token, inFlightPacket); err != nil { + return err + } + + ackResult := fmt.Sprintf("packet forward failed after point of no return: %s", ack.GetError()) + newAck := channeltypes.NewResultAcknowledgement([]byte(ackResult)) + + return k.ics4Wrapper.WriteAcknowledgement(ctx, inFlightPacket.ChannelPacket(), newAck) + } + + amount, ok := sdkmath.NewIntFromString(transferDetail.Token.GetAmount()) + if !ok { + return fmt.Errorf("failed to parse amount from packet data for forward refund: %s", transferDetail.Token.GetAmount()) + } + + denom := transferDetail.Token.GetDenom() + coin := sdk.NewCoin(denom.IBCDenom(), amount) + + escrowAddress := transfertypes.GetEscrowAddress(packet.SourcePort, packet.SourceChannel) + refundEscrowAddress := transfertypes.GetEscrowAddress(inFlightPacket.RefundPortId, inFlightPacket.RefundChannelId) + + newToken := sdk.NewCoins(coin) + + // Sender chain is source + if !denom.HasPrefix(packet.SourcePort, packet.SourceChannel) { + // funds were moved to escrow account for transfer, so they need to either: + // - move to the other escrow account, in the case of native denom + // - burn + if !denom.HasPrefix(inFlightPacket.RefundPortId, inFlightPacket.RefundChannelId) { + // transfer funds from escrow account for forwarded packet to escrow account going back for refund. + if err := k.bankKeeper.SendCoins(ctx, escrowAddress, refundEscrowAddress, newToken); err != nil { + return fmt.Errorf("failed to send coins from escrow account to refund escrow account: %w", err) + } + } else { + // Transfer the coins from the escrow account to the module account and burn them. + if err := k.bankKeeper.SendCoinsFromAccountToModule(ctx, escrowAddress, transfertypes.ModuleName, newToken); err != nil { + return fmt.Errorf("failed to send coins from escrow to module account for burn: %w", err) + } + + if err := k.bankKeeper.BurnCoins(ctx, transfertypes.ModuleName, newToken); err != nil { + // NOTE: should not happen as the module account was + // retrieved on the step above and it has enough balance + // to burn. + panic(fmt.Sprintf("cannot burn coins after a successful send from escrow account to module account: %v", err)) + } + + k.unescrowToken(ctx, coin) + } + } else { + // Funds in the escrow account were burned, + // so on a timeout or acknowledgement error we need to mint the funds back to the escrow account. + if err := k.bankKeeper.MintCoins(ctx, transfertypes.ModuleName, newToken); err != nil { + return fmt.Errorf("cannot mint coins to the %s module account: %w", transfertypes.ModuleName, err) + } + + if err := k.bankKeeper.SendCoinsFromModuleToAccount(ctx, transfertypes.ModuleName, refundEscrowAddress, newToken); err != nil { + return fmt.Errorf("cannot send coins from the %s module to the escrow account %s: %w", transfertypes.ModuleName, refundEscrowAddress, err) + } + + currentTotalEscrow := k.transferKeeper.GetTotalEscrowForDenom(ctx, coin.GetDenom()) + newTotalEscrow := currentTotalEscrow.Add(coin) + k.transferKeeper.SetTotalEscrowForDenom(ctx, newTotalEscrow) + } + + return k.ics4Wrapper.WriteAcknowledgement(ctx, inFlightPacket.ChannelPacket(), ack) +} + +// unescrowToken will update the total escrow by deducting the unescrowed token +// from the current total escrow. +func (k *Keeper) unescrowToken(ctx sdk.Context, token sdk.Coin) { + currentTotalEscrow := k.transferKeeper.GetTotalEscrowForDenom(ctx, token.GetDenom()) + newTotalEscrow := currentTotalEscrow.Sub(token) + k.transferKeeper.SetTotalEscrowForDenom(ctx, newTotalEscrow) +} + +func (k *Keeper) ForwardTransferPacket(ctx sdk.Context, inFlightPacket *types.InFlightPacket, srcPacket channeltypes.Packet, srcPacketSender, receiver string, metadata types.ForwardMetadata, token sdk.Coin, maxRetries uint8, timeoutDelta time.Duration, labels []metrics.Label, nonrefundable bool) error { + memo := "" + + // set memo for next transfer with next from this transfer. + if metadata.Next != nil { + var err error + memo, err = metadata.Next.ToMemo() + if err != nil { + k.Logger(ctx).Error("packetForwardMiddleware error marshaling next as JSON", "error", err) + return errorsmod.Wrap(sdkerrors.ErrJSONMarshal, err.Error()) + } + } + + k.Logger(ctx).Debug("packetForwardMiddleware ForwardTransferPacket", + "port", metadata.Port, + "channel", metadata.Channel, + "sender", receiver, + "receiver", metadata.Receiver, + "amount", token.Amount.String(), + "denom", token.Denom, + ) + + msgTransfer := transfertypes.NewMsgTransfer(metadata.Port, metadata.Channel, token, receiver, metadata.Receiver, DefaultTransferPacketTimeoutHeight, uint64(ctx.BlockTime().UnixNano())+uint64(timeoutDelta.Nanoseconds()), memo) + // send tokens to destination + res, err := k.transferKeeper.Transfer(ctx, msgTransfer) + if err != nil { + k.Logger(ctx).Error("packetForwardMiddleware ForwardTransferPacket error", + "port", metadata.Port, + "channel", metadata.Channel, + "sender", receiver, + "receiver", metadata.Receiver, + "amount", token.Amount.String(), + "denom", token.Denom, + "error", err, + ) + // TODO: Should probably have custom errors! + return errorsmod.Wrap(sdkerrors.ErrInsufficientFunds, err.Error()) + } + + // Store the following information in keeper: + // key - information about forwarded packet: src_channel (parsedReceiver.Channel), src_port (parsedReceiver.Port), sequence + // value - information about original packet for refunding if necessary: retries, srcPacketSender, srcPacket.DestinationChannel, srcPacket.DestinationPort + if inFlightPacket == nil { + inFlightPacket = &types.InFlightPacket{ + PacketData: srcPacket.Data, + OriginalSenderAddress: srcPacketSender, + RefundChannelId: srcPacket.DestinationChannel, + RefundPortId: srcPacket.DestinationPort, + RefundSequence: srcPacket.Sequence, + PacketSrcPortId: srcPacket.SourcePort, + PacketSrcChannelId: srcPacket.SourceChannel, + + PacketTimeoutTimestamp: srcPacket.TimeoutTimestamp, + PacketTimeoutHeight: srcPacket.TimeoutHeight.String(), + + RetriesRemaining: int32(maxRetries), + Timeout: uint64(timeoutDelta.Nanoseconds()), + Nonrefundable: nonrefundable, + } + } else { + inFlightPacket.RetriesRemaining-- + } + + if err := k.SetInflightPacket(ctx, metadata.Channel, metadata.Port, res.Sequence, inFlightPacket); err != nil { + return err + } + + defer func() { + if token.Amount.IsInt64() { + telemetry.SetGaugeWithLabels([]string{"tx", "msg", "ibc", "transfer"}, float32(token.Amount.Int64()), []metrics.Label{telemetry.NewLabel(coremetrics.LabelDenom, token.Denom)}) + } + + telemetry.IncrCounterWithLabels([]string{"ibc", types.ModuleName, "send"}, 1, labels) + }() + return nil +} + +// TimeoutShouldRetry returns inFlightPacket and no error if retry should be attempted. Error is returned if IBC refund should occur. +func (k *Keeper) TimeoutShouldRetry(ctx sdk.Context, packet channeltypes.Packet) (*types.InFlightPacket, error) { + inFlightPacket, err := k.GetInflightPacket(ctx, packet) + if err != nil { + return nil, err + } + + // Not a forwarded packet. Ignore. + if inFlightPacket == nil { + // nolint:nilnil + return nil, nil + } + + if inFlightPacket.RetriesRemaining <= 0 { + key := types.RefundPacketKey(packet.SourceChannel, packet.SourcePort, packet.Sequence) + k.Logger(ctx).Error("packetForwardMiddleware reached max retries for packet", + "key", string(key), + "original-sender-address", inFlightPacket.OriginalSenderAddress, + "refund-channel-id", inFlightPacket.RefundChannelId, + "refund-port-id", inFlightPacket.RefundPortId, + ) + + return inFlightPacket, fmt.Errorf("giving up on packet on channel (%s) port (%s) after max retries", inFlightPacket.RefundChannelId, inFlightPacket.RefundPortId) + } + + return inFlightPacket, nil +} + +func (k *Keeper) RetryTimeout(ctx sdk.Context, channel, port string, transferDetail transfertypes.InternalTransferRepresentation, inFlightPacket *types.InFlightPacket) error { + // send transfer again + metadata := types.ForwardMetadata{ + Receiver: transferDetail.Receiver, + Channel: channel, + Port: port, + } + + if transferDetail.Memo != "" { + next, _, err := types.GetPacketMetadataFromPacketdata(transferDetail) + if err != nil { + k.Logger(ctx).Error("packetForwardMiddleware error getting next from transfer detail memo", "error", err) + } + + metadata.Next = &next + } + + amount, ok := sdkmath.NewIntFromString(transferDetail.Token.GetAmount()) + if !ok { + k.Logger(ctx).Error("packetForwardMiddleware error parsing amount from string for packetforward retry on timeout", + "original-sender-address", inFlightPacket.OriginalSenderAddress, + "refund-channel-id", inFlightPacket.RefundChannelId, + "refund-port-id", inFlightPacket.RefundPortId, + "retries-remaining", inFlightPacket.RetriesRemaining, + "amount", transferDetail.Token.GetAmount(), + ) + return fmt.Errorf("error parsing amount from string for packetforward retry: %s", transferDetail.Token.GetAmount()) + } + + ibcDenom := transferDetail.Token.Denom.IBCDenom() + + token := sdk.NewCoin(ibcDenom, amount) + + // srcPacket and srcPacketSender are empty because inFlightPacket is non-nil. + return k.ForwardTransferPacket(ctx, inFlightPacket, channeltypes.Packet{}, "", transferDetail.Sender, metadata, token, uint8(inFlightPacket.RetriesRemaining), time.Duration(inFlightPacket.Timeout)*time.Nanosecond, nil, inFlightPacket.Nonrefundable) +} + +func (k *Keeper) SetInflightPacket(ctx sdk.Context, channel, port string, sequence uint64, packet *types.InFlightPacket) error { + key := types.RefundPacketKey(channel, port, sequence) + store := k.storeService.OpenKVStore(ctx) + bz := k.cdc.MustMarshal(packet) + return store.Set(key, bz) +} + +func (k *Keeper) GetInflightPacket(ctx sdk.Context, packet channeltypes.Packet) (*types.InFlightPacket, error) { + store := k.storeService.OpenKVStore(ctx) + key := types.RefundPacketKey(packet.SourceChannel, packet.SourcePort, packet.Sequence) + bz, err := store.Get(key) + if err != nil { + return nil, err + } + if len(bz) == 0 { + // nolint:nilnil + return nil, nil + } + var inFlightPacket types.InFlightPacket + k.cdc.MustUnmarshal(bz, &inFlightPacket) + return &inFlightPacket, nil +} + +func (k *Keeper) RemoveInFlightPacket(ctx sdk.Context, packet channeltypes.Packet) { + store := k.storeService.OpenKVStore(ctx) + key := types.RefundPacketKey(packet.SourceChannel, packet.SourcePort, packet.Sequence) + hasKey, err := store.Has(key) + if err != nil { + panic(err) + } + if !hasKey { + // not a forwarded packet, ignore. + return + } + + // done with packet key now, delete. + if err := store.Delete(key); err != nil { + panic(err) + } +} + +// SendPacket wraps IBC ChannelKeeper's SendPacket function +func (k *Keeper) SendPacket(ctx sdk.Context, sourcePort, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) (uint64, error) { + return k.ics4Wrapper.SendPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) +} + +// WriteAcknowledgement wraps IBC ICS4Wrapper WriteAcknowledgement function. +// ICS29 WriteAcknowledgement is used for asynchronous acknowledgements. +func (k *Keeper) WriteAcknowledgement(ctx sdk.Context, packet ibcexported.PacketI, acknowledgement ibcexported.Acknowledgement) error { + return k.ics4Wrapper.WriteAcknowledgement(ctx, packet, acknowledgement) +} + +// WriteAcknowledgement wraps IBC ICS4Wrapper GetAppVersion function. +func (k *Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) +} + +// LookupModuleByChannel wraps ChannelKeeper LookupModuleByChannel function. +func (k *Keeper) GetChannel(ctx sdk.Context, portID, channelID string) (channeltypes.Channel, bool) { + return k.channelKeeper.GetChannel(ctx, portID, channelID) +} diff --git a/modules/apps/packet-forward-middleware/keeper/keeper_test.go b/modules/apps/packet-forward-middleware/keeper/keeper_test.go new file mode 100644 index 00000000000..d376e04419b --- /dev/null +++ b/modules/apps/packet-forward-middleware/keeper/keeper_test.go @@ -0,0 +1,368 @@ +package keeper_test + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "testing" + "time" + + testifysuite "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + + cmtbytes "github.com/cometbft/cometbft/libs/bytes" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + pfmtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" + ibcmock "github.com/cosmos/ibc-go/v10/testing/mock" +) + +type KeeperTestSuite struct { + testifysuite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + chainC *ibctesting.TestChain +} + +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) +} + +func TestKeeperTestSuite(t *testing.T) { + testifysuite.Run(t, new(KeeperTestSuite)) +} + +func (s *KeeperTestSuite) TestWriteAcknowledgementForForwardedPacket() { + fundAcc := func(ctx sdk.Context, bk bankkeeper.Keeper, acc sdk.AccAddress) { + coins := sdk.NewCoins(sdk.NewInt64Coin(sdk.DefaultBondDenom, 10000000000)) + err := bk.MintCoins(ctx, "transfer", coins) + s.Require().NoError(err) + err = bk.SendCoinsFromModuleToAccount(ctx, "transfer", acc, coins) + s.Require().NoError(err) + } + + var expectedAckBz []byte + + tests := []struct { + name string + ack channeltypes.Acknowledgement + malleate func() + nonRefundable bool + }{ + { + name: "Ack success -> propagated to ics4 wrapper", + ack: channeltypes.NewResultAcknowledgement([]byte{1}), + nonRefundable: false, + }, + { + name: "Ack error + Non refundable -> Asset moved to recoverable account then propagate ack to ics4 wrapper", + ack: channeltypes.NewErrorAcknowledgement(nil), + malleate: func() { + ack := channeltypes.NewErrorAcknowledgement(nil) + ackResult := fmt.Sprintf("packet forward failed after point of no return: %s", ack.GetError()) + newAck := channeltypes.NewResultAcknowledgement([]byte(ackResult)) + expectedAckBz = channeltypes.CommitAcknowledgement(newAck.Acknowledgement()) + }, + nonRefundable: true, + }, + { + name: "Ack error + Refundable -> Escrow coin then propagate ack to ics4 wrapper", + ack: channeltypes.NewErrorAcknowledgement(nil), + nonRefundable: false, + }, + } + + for _, tc := range tests { + s.Run(tc.name, func() { + s.SetupTest() + + pathBC := ibctesting.NewTransferPath(s.chainB, s.chainC) + pathBC.Setup() + + ctxB := s.chainB.GetContext() + pfmKeeperB := s.chainB.GetSimApp().PFMKeeper + + ctxC := s.chainC.GetContext() + pfmKeeperC := s.chainC.GetSimApp().PFMKeeper + + srcPacket := channeltypes.Packet{ + Data: []byte{1}, + Sequence: 1, + SourcePort: pathBC.EndpointA.ChannelConfig.PortID, + SourceChannel: pathBC.EndpointA.ChannelID, + DestinationPort: pathBC.EndpointB.ChannelConfig.PortID, + DestinationChannel: pathBC.EndpointB.ChannelID, + TimeoutHeight: clienttypes.Height{ + RevisionNumber: 10, + RevisionHeight: 100, + }, + TimeoutTimestamp: 10101001, + } + + retries := uint8(2) + timeout := time.Duration(1010101010) + + initialSender := s.chainA.SenderAccount.GetAddress() + // Simulate an "Override Receiver" on destination chain. + intermediateAcc := s.chainB.SenderAccounts[1].SenderAccount.GetAddress() + finalReceiver := s.chainB.SenderAccount.GetAddress() + + metadata := pfmtypes.ForwardMetadata{ + Receiver: finalReceiver.String(), + Port: pathBC.EndpointA.ChannelConfig.PortID, + Channel: pathBC.EndpointA.ChannelID, + Timeout: timeout, + Retries: &retries, + Next: nil, + } + + fundAcc(ctxB, s.chainB.GetSimApp().BankKeeper, intermediateAcc) + + err := pfmKeeperB.ForwardTransferPacket(ctxB, nil, srcPacket, initialSender.String(), intermediateAcc.String(), metadata, ibctesting.TestCoin, 2, timeout, nil, tc.nonRefundable) + s.Require().NoError(err) + + inflightPacket, err := pfmKeeperB.GetInflightPacket(ctxB, srcPacket) + s.Require().NoError(err) + + token := transfertypes.Token{ + Denom: transfertypes.ExtractDenomFromPath(ibctesting.TestCoin.GetDenom()), + Amount: ibctesting.DefaultCoinAmount.String(), + } + data := transfertypes.NewInternalTransferRepresentation(token, initialSender.String(), finalReceiver.String(), "") + expectedAckBz = channeltypes.CommitAcknowledgement(tc.ack.Acknowledgement()) + if tc.malleate != nil { + tc.malleate() + } + + // Escrow on chainC + escrow := transfertypes.GetEscrowAddress(srcPacket.SourcePort, srcPacket.SourceChannel) + fundAcc(ctxC, s.chainC.GetSimApp().BankKeeper, escrow) + + err = pfmKeeperC.WriteAcknowledgementForForwardedPacket(ctxC, srcPacket, data, inflightPacket, tc.ack) + s.Require().NoError(err) + + ackBZFromStore := s.chainC.GetAcknowledgement(srcPacket) + s.Require().True(bytes.Equal(expectedAckBz, ackBZFromStore)) + }) + } +} + +func (s *KeeperTestSuite) TestForwardTransferPacket() { + var ( + pfmKeeper *keeper.Keeper + initialSender string + finalReceiver string + ) + tests := []struct { + name string + malleate func() + }{ + { + name: "success: standard cosmos address", + malleate: func() {}, + }, + { + name: "success: with hex address codec", + malleate: func() { + pfmKeeper = keeper.NewKeeper(s.chainA.GetSimApp().AppCodec(), ibcmock.TestAddressCodec{}, runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(pfmtypes.StoreKey)), &transferMock{}, s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, s.chainA.GetSimApp().BankKeeper, "authority") + + initialSender = hex.EncodeToString(s.chainA.SenderAccount.GetAddress().Bytes()) + finalReceiver = hex.EncodeToString(s.chainB.SenderAccount.GetAddress().Bytes()) + }, + }, + } + + for _, tc := range tests { + s.Run(tc.name, func() { + s.SetupTest() + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + pfmKeeper = keeper.NewKeeper(s.chainA.GetSimApp().AppCodec(), s.chainA.GetSimApp().AccountKeeper.AddressCodec(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(pfmtypes.StoreKey)), &transferMock{}, s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, s.chainA.GetSimApp().BankKeeper, "authority") + + ctx := s.chainA.GetContext() + srcPacket := channeltypes.Packet{ + Data: []byte{1}, + Sequence: 1, + SourcePort: path.EndpointA.ChannelConfig.PortID, + SourceChannel: path.EndpointA.ChannelID, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + TimeoutHeight: clienttypes.Height{ + RevisionNumber: 10, + RevisionHeight: 100, + }, + TimeoutTimestamp: 10101001, + } + + retries := uint8(2) + timeout := time.Duration(1010101010) + nonRefundable := false + + metadata := pfmtypes.ForwardMetadata{ + Receiver: "first-receiver", + Port: path.EndpointA.ChannelConfig.PortID, + Channel: path.EndpointA.ChannelID, + Timeout: timeout, + Retries: &retries, + Next: nil, + } + + initialSender = s.chainA.SenderAccount.GetAddress().String() + finalReceiver = s.chainB.SenderAccount.GetAddress().String() + + tc.malleate() + + err := pfmKeeper.ForwardTransferPacket(ctx, nil, srcPacket, initialSender, finalReceiver, metadata, sdk.NewInt64Coin("denom", 1000), 2, timeout, nil, nonRefundable) + s.Require().NoError(err) + + // Get the inflight packer + inflightPacket, err := pfmKeeper.GetInflightPacket(ctx, srcPacket) + s.Require().NoError(err) + + s.Require().Equal(inflightPacket.RetriesRemaining, int32(retries)) + + // Call the same function again with inflight packet. Num retries should decrease. + err = pfmKeeper.ForwardTransferPacket(ctx, inflightPacket, srcPacket, initialSender, finalReceiver, metadata, sdk.NewInt64Coin("denom", 1000), 2, timeout, nil, nonRefundable) + s.Require().NoError(err) + + // Get the inflight packer + inflightPacket2, err := pfmKeeper.GetInflightPacket(ctx, srcPacket) + s.Require().NoError(err) + + s.Require().Equal(inflightPacket.RetriesRemaining, inflightPacket2.RetriesRemaining) + s.Require().Equal(int32(retries-1), inflightPacket.RetriesRemaining) + }) + } +} + +func (s *KeeperTestSuite) TestForwardTransferPacketWithNext() { + s.SetupTest() + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + pfmKeeper := keeper.NewKeeper(s.chainA.GetSimApp().AppCodec(), s.chainA.GetSimApp().AccountKeeper.AddressCodec(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(pfmtypes.StoreKey)), &transferMock{}, s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, s.chainA.GetSimApp().BankKeeper, "authority") + ctx := s.chainA.GetContext() + srcPacket := channeltypes.Packet{ + Data: []byte{1}, + Sequence: 1, + SourcePort: path.EndpointA.ChannelConfig.PortID, + SourceChannel: path.EndpointA.ChannelID, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + TimeoutHeight: clienttypes.Height{ + RevisionNumber: 10, + RevisionHeight: 100, + }, + TimeoutTimestamp: 10101001, + } + + retries := uint8(2) + timeout := time.Duration(1010101010) + nonRefundable := false + + // Test with valid metadata.Next - it should be a *PacketMetadata + nextPacketMetadata := &pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: "next-receiver", + Port: "port-1", + Channel: "channel-1", + Timeout: timeout, + Retries: &retries, + Next: nil, + }, + } + + metadata := pfmtypes.ForwardMetadata{ + Receiver: "first-receiver", + Port: path.EndpointA.ChannelConfig.PortID, + Channel: path.EndpointA.ChannelID, + Timeout: timeout, + Retries: &retries, + Next: nextPacketMetadata, + } + + initialSender := s.chainA.SenderAccount.GetAddress() + finalReceiver := s.chainB.SenderAccount.GetAddress() + + err := pfmKeeper.ForwardTransferPacket(ctx, nil, srcPacket, initialSender.String(), finalReceiver.String(), metadata, sdk.NewInt64Coin("denom", 1000), 2, timeout, nil, nonRefundable) + s.Require().NoError(err) + + // Verify the inflight packet was created + inflightPacket, err := pfmKeeper.GetInflightPacket(ctx, srcPacket) + s.Require().NoError(err) + s.Require().NotNil(inflightPacket) + s.Require().Equal(inflightPacket.RetriesRemaining, int32(retries)) +} + +func (s *KeeperTestSuite) TestRetryTimeoutErrorGettingNext() { + s.SetupTest() + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + pfmKeeper := keeper.NewKeeper(s.chainA.GetSimApp().AppCodec(), s.chainA.GetSimApp().AccountKeeper.AddressCodec(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(pfmtypes.StoreKey)), &transferMock{}, s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, s.chainA.GetSimApp().BankKeeper, "authority") + ctx := s.chainA.GetContext() + + // Create a transfer detail with invalid memo that will cause GetPacketMetadataFromPacketdata to fail + transferDetail := transfertypes.InternalTransferRepresentation{ + Token: transfertypes.Token{ + Denom: transfertypes.Denom{Base: "denom"}, + Amount: "1000", + }, + Sender: "sender", + Receiver: "receiver", + Memo: `{"invalid_json": malformed}`, // This will cause JSON parsing to fail + } + + inFlightPacket := &pfmtypes.InFlightPacket{ + RetriesRemaining: 1, + Timeout: 1000, + Nonrefundable: false, + } + + err := pfmKeeper.RetryTimeout(ctx, path.EndpointA.ChannelID, path.EndpointA.ChannelConfig.PortID, transferDetail, inFlightPacket) + // The function should still succeed since it only logs the error and continues + s.Require().NoError(err) +} + +type transferMock struct{} + +func (*transferMock) Transfer(_ context.Context, _ *transfertypes.MsgTransfer) (*transfertypes.MsgTransferResponse, error) { + return &transfertypes.MsgTransferResponse{ + Sequence: 1, + }, nil +} + +func (*transferMock) GetDenom(_ sdk.Context, _ cmtbytes.HexBytes) (transfertypes.Denom, bool) { + return transfertypes.Denom{}, false +} + +func (*transferMock) GetTotalEscrowForDenom(ctx sdk.Context, denom string) sdk.Coin { + return sdk.Coin{} +} + +func (*transferMock) SetTotalEscrowForDenom(ctx sdk.Context, coin sdk.Coin) { +} + +func (*transferMock) DenomPathFromHash(ctx sdk.Context, ibcDenom string) (string, error) { + return "", nil +} + +func (*transferMock) GetPort(ctx sdk.Context) string { + return "" +} diff --git a/modules/apps/packet-forward-middleware/keeper/migrator.go b/modules/apps/packet-forward-middleware/keeper/migrator.go new file mode 100644 index 00000000000..4c3e78a2357 --- /dev/null +++ b/modules/apps/packet-forward-middleware/keeper/migrator.go @@ -0,0 +1,24 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/migrations/v3" +) + +// Migrator is a struct for handling in-place state migrations. +type Migrator struct { + keeper *Keeper +} + +func NewMigrator(k *Keeper) Migrator { + return Migrator{ + keeper: k, + } +} + +// Migrate2to3 migrates the module state from the consensus version 2 to +// version 3 +func (m Migrator) Migrate2to3(ctx sdk.Context) error { + return v3.Migrate(ctx, m.keeper.bankKeeper, m.keeper.channelKeeper, m.keeper.transferKeeper) +} diff --git a/modules/apps/packet-forward-middleware/keeper/migrator_test.go b/modules/apps/packet-forward-middleware/keeper/migrator_test.go new file mode 100644 index 00000000000..a19cb6b0e68 --- /dev/null +++ b/modules/apps/packet-forward-middleware/keeper/migrator_test.go @@ -0,0 +1,147 @@ +package keeper_test + +import ( + "math/rand" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + + pfmkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/migrations/v3" + pfmtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +func (s *KeeperTestSuite) TestMigrator() { + retries := uint8(2) + var ( + accA, accB, accC, port string + firstHopMetadata *pfmtypes.PacketMetadata + err error + nextMemo string + pathAB, pathBC *ibctesting.Path + ) + + tests := []struct { + name string + malleate func() + shouldEmpty bool + }{ + { + name: "A -> B -> C. A and B escrowed", + malleate: func() { + firstHopMetadata = &pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: accC, + Port: port, + Channel: pathBC.EndpointA.ChannelID, + Timeout: time.Duration(100000000000), + Retries: &retries, + }, + } + nextMemo, err = firstHopMetadata.ToMemo() + s.Require().NoError(err) + }, + shouldEmpty: false, + }, + { + name: "A -> B -> A. Everything unescrowed", + malleate: func() { + firstHopMetadata = &pfmtypes.PacketMetadata{ + Forward: pfmtypes.ForwardMetadata{ + Receiver: accA, + Port: port, + Channel: pathAB.EndpointB.ChannelID, + Timeout: time.Duration(100000000000), + Retries: &retries, + }, + } + nextMemo, err = firstHopMetadata.ToMemo() + s.Require().NoError(err) + }, + shouldEmpty: true, + }, + } + for _, tc := range tests { + s.Run(tc.name, func() { + s.SetupTest() + + accA = s.chainA.SenderAccount.GetAddress().String() + accB = s.chainB.SenderAccount.GetAddress().String() + accC = s.chainC.SenderAccount.GetAddress().String() + + pathAB = ibctesting.NewTransferPath(s.chainA, s.chainB) + pathAB.Setup() + + pathBC = ibctesting.NewTransferPath(s.chainB, s.chainC) + pathBC.Setup() + + transferKeeperA := s.chainA.GetSimApp().TransferKeeper + transferKeeperB := s.chainB.GetSimApp().TransferKeeper + + port = pathBC.EndpointA.ChannelConfig.PortID + + ctxA := s.chainA.GetContext() + ctxB := s.chainB.GetContext() + + denomA := transfertypes.Denom{Base: sdk.DefaultBondDenom} + randSendAmt := int64(rand.Intn(10000000000) + 1000000) + sendCoin := sdk.NewInt64Coin(denomA.IBCDenom(), randSendAmt) + + tc.malleate() // Hammer time!!! + + transferMsg := transfertypes.NewMsgTransfer(port, pathAB.EndpointA.ChannelID, sendCoin, accA, accB, s.chainB.GetTimeoutHeight(), 0, nextMemo) + result, err := s.chainA.SendMsgs(transferMsg) + s.Require().NoError(err) + + // Transfer escrowed on chainA and sent amount to chainB + totalEscrowA, _ := v3.TotalEscrow(ctxA, s.chainA.GetSimApp().BankKeeper, s.chainA.App.GetIBCKeeper().ChannelKeeper, port) + s.Require().Equal(randSendAmt, totalEscrowA[0].Amount.Int64()) + + // ChainB has no escrow until the packet is relayed. + totalEscrowB, _ := v3.TotalEscrow(ctxB, s.chainB.GetSimApp().BankKeeper, s.chainB.App.GetIBCKeeper().ChannelKeeper, port) + s.Require().Empty(totalEscrowB) + + packet, err := ibctesting.ParseV1PacketFromEvents(result.Events) + s.Require().NoError(err) + + err = pathAB.RelayPacket(packet) + s.Require().ErrorContains(err, "acknowledgement event attribute not found") + + // After the relay, we have amount escrowed on chainB + totalEscrowA, _ = v3.TotalEscrow(ctxA, s.chainA.GetSimApp().BankKeeper, s.chainA.App.GetIBCKeeper().ChannelKeeper, port) + s.Require().Equal(randSendAmt, totalEscrowA[0].Amount.Int64()) + + totalEscrowB, _ = v3.TotalEscrow(ctxB, s.chainB.GetSimApp().BankKeeper, s.chainB.App.GetIBCKeeper().ChannelKeeper, port) + if tc.shouldEmpty { + s.Require().Empty(totalEscrowB) + } else { + s.Require().Equal(randSendAmt, totalEscrowB[0].Amount.Int64()) + } + + // Artificially set escrow balance to 0. So that we can show that after the migration, balances are restored. + transferKeeperA.SetTotalEscrowForDenom(ctxA, sdk.NewInt64Coin(totalEscrowA[0].Denom, 0)) + if !tc.shouldEmpty { + transferKeeperB.SetTotalEscrowForDenom(ctxB, sdk.NewInt64Coin(totalEscrowB[0].Denom, 0)) + } + + // Run the migration + migratorA := pfmkeeper.NewMigrator(s.chainA.GetSimApp().PFMKeeper) + err = migratorA.Migrate2to3(ctxA) + s.Require().NoError(err) + + migratorB := pfmkeeper.NewMigrator(s.chainB.GetSimApp().PFMKeeper) + err = migratorB.Migrate2to3(ctxB) + s.Require().NoError(err) + + denomEscrowA := transferKeeperA.GetTotalEscrowForDenom(ctxA, totalEscrowA[0].Denom) + s.Require().Equal(randSendAmt, denomEscrowA.Amount.Int64()) + + if !tc.shouldEmpty { + denomEscrowB := transferKeeperB.GetTotalEscrowForDenom(ctxB, totalEscrowB[0].Denom) + s.Require().Equal(randSendAmt, denomEscrowB.Amount.Int64()) + } + }) + } +} diff --git a/modules/apps/packet-forward-middleware/migrations/v3/migrate.go b/modules/apps/packet-forward-middleware/migrations/v3/migrate.go new file mode 100644 index 00000000000..13349db70d8 --- /dev/null +++ b/modules/apps/packet-forward-middleware/migrations/v3/migrate.go @@ -0,0 +1,55 @@ +package v3 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" +) + +// Migrate migrates the x/packetforward module state from the consensus version +// 2 to version 3 +func Migrate(ctx sdk.Context, bankKeeper types.BankKeeper, channelKeeper types.ChannelKeeper, transferKeeper types.TransferKeeper) error { + logger := ctx.Logger() + + portID := transferKeeper.GetPort(ctx) + expectedTotalEscrowed, transferChannels := TotalEscrow(ctx, bankKeeper, channelKeeper, portID) + + logger.Info( + "Calculated expected total escrowed from escrow account bank balances", + "num channels", len(transferChannels), + "bank total escrowed", expectedTotalEscrowed, + ) + + // 4. Set the total escrowed for each denom + for _, totalEscrowCoin := range expectedTotalEscrowed { + prevDenomEscrow := transferKeeper.GetTotalEscrowForDenom(ctx, totalEscrowCoin.Denom) + + transferKeeper.SetTotalEscrowForDenom(ctx, totalEscrowCoin) + + logger.Info( + "Corrected total escrow for denom to match escrow account bank balances", + "denom", totalEscrowCoin.Denom, + "previous escrow", prevDenomEscrow, + "new escrow", totalEscrowCoin, + ) + } + + return nil +} + +func TotalEscrow(ctx sdk.Context, bankKeeper types.BankKeeper, channelKeeper types.ChannelKeeper, portID string) (sdk.Coins, []channeltypes.IdentifiedChannel) { + expectedTotalEscrowed := sdk.NewCoins() + // 1. Iterate over all IBC transfer channels + transferChannels := channelKeeper.GetAllChannelsWithPortPrefix(ctx, portID) + for _, channel := range transferChannels { + // 2. For each channel, get the escrow address and corresponding bank balance + escrowAddress := transfertypes.GetEscrowAddress(portID, channel.ChannelId) + bankBalances := bankKeeper.GetAllBalances(ctx, escrowAddress) + + // 3. Aggregate the bank balances to calculate the expected total escrowed + expectedTotalEscrowed = expectedTotalEscrowed.Add(bankBalances...) + } + return expectedTotalEscrowed, transferChannels +} diff --git a/modules/apps/packet-forward-middleware/module.go b/modules/apps/packet-forward-middleware/module.go new file mode 100644 index 00000000000..a0f94e7b0e4 --- /dev/null +++ b/modules/apps/packet-forward-middleware/module.go @@ -0,0 +1,130 @@ +package packetforward + +import ( + "encoding/json" + "fmt" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + + abci "github.com/cometbft/cometbft/abci/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// AppModuleBasic is the packetforward AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(r codectypes.InterfaceRegistry) { + types.RegisterInterfaces(r) +} + +// DefaultGenesis returns default genesis state as raw bytes for the ibc +// packetforward module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis performs genesis state validation for the packetforward module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return gs.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the packetforward module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return nil +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper *keeper.Keeper +} + +// NewAppModule creates a new packetforward module +func NewAppModule(k *keeper.Keeper) AppModule { + return AppModule{ + keeper: k, + } +} + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (AppModule) IsOnePerModuleType() {} + +// IsAppModule implements the appmodule.AppModule interface. +func (AppModule) IsAppModule() {} + +// QuerierRoute implements the AppModule interface +func (AppModule) QuerierRoute() string { + return types.QuerierRoute +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + m := keeper.NewMigrator(am.keeper) + if err := cfg.RegisterMigration(types.ModuleName, 2, m.Migrate2to3); err != nil { + panic(fmt.Sprintf("failed to migrate x/%s from version 2 to 3: %v", types.ModuleName, err)) + } +} + +// InitGenesis performs genesis initialization for the packetforward module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate { + err := am.ValidateGenesis(cdc, nil, data) + if err != nil { + panic(err) + } + + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the exported genesis state as raw bytes for the packetforward +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 3 } diff --git a/modules/apps/packet-forward-middleware/types/codec.go b/modules/apps/packet-forward-middleware/types/codec.go new file mode 100644 index 00000000000..3a4f6648512 --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/codec.go @@ -0,0 +1,28 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var amino = codec.NewLegacyAmino() + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + sdk.RegisterLegacyAminoCodec(amino) + + // Register all Amino interfaces and concrete types on the authz Amino codec + // so that this can later be used to properly serialize MsgGrant and MsgExec + // instances. + // RegisterLegacyAminoCodec(authzcodec.Amino) // TODO(bez): Investigate this. +} + +// RegisterLegacyAminoCodec registers concrete types on the LegacyAmino codec +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { +} + +func RegisterInterfaces(registry types.InterfaceRegistry) { +} diff --git a/modules/apps/packet-forward-middleware/types/errors.go b/modules/apps/packet-forward-middleware/types/errors.go new file mode 100644 index 00000000000..aa63493fafd --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/errors.go @@ -0,0 +1,10 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +var ( + ErrMetadataKeyNotFound = errorsmod.Register(ModuleName, 1, "metadata key not found in packet data") + ErrInvalidForwardMetadata = errorsmod.Register(ModuleName, 2, "invalid forward metadata") +) diff --git a/modules/apps/packet-forward-middleware/types/expected_keepers.go b/modules/apps/packet-forward-middleware/types/expected_keepers.go new file mode 100644 index 00000000000..72d29fbeb72 --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/expected_keepers.go @@ -0,0 +1,46 @@ +package types + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + + cmtbytes "github.com/cometbft/cometbft/libs/bytes" + + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" +) + +// TransferKeeper defines the expected transfer keeper +type TransferKeeper interface { + Transfer(ctx context.Context, msg *transfertypes.MsgTransfer) (*transfertypes.MsgTransferResponse, error) + GetDenom(ctx sdk.Context, denomHash cmtbytes.HexBytes) (transfertypes.Denom, bool) + GetTotalEscrowForDenom(ctx sdk.Context, denom string) sdk.Coin + SetTotalEscrowForDenom(ctx sdk.Context, coin sdk.Coin) + DenomPathFromHash(ctx sdk.Context, ibcDenom string) (string, error) + + // Only used for v3 migration + GetPort(ctx sdk.Context) string +} + +// ChannelKeeper defines the expected IBC channel keeper +type ChannelKeeper interface { + porttypes.ICS4Wrapper + GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) + + // Only used for v3 migration + GetAllChannelsWithPortPrefix(ctx sdk.Context, portPrefix string) []channeltypes.IdentifiedChannel +} + +// BankKeeper defines the expected bank keeper +type BankKeeper interface { + SendCoins(ctx context.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx context.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx context.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error + MintCoins(ctx context.Context, moduleName string, amt sdk.Coins) error + BurnCoins(ctx context.Context, moduleName string, amt sdk.Coins) error + + // Only used for v3 migration + GetAllBalances(ctx context.Context, addr sdk.AccAddress) sdk.Coins +} diff --git a/modules/apps/packet-forward-middleware/types/forward.go b/modules/apps/packet-forward-middleware/types/forward.go new file mode 100644 index 00000000000..7c6132c7cfe --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/forward.go @@ -0,0 +1,207 @@ +package types + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + errorsmod "cosmossdk.io/errors" + + host "github.com/cosmos/ibc-go/v10/modules/core/24-host" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +// PacketMetadata represents the metadata for a packet in the packet-forward middleware. +// Do not use this type directly with json encoding/decoding, as it is not json serializable. +// Instead use the provided helper methods to convert it, or use the Metadata keys defined in this package. +type PacketMetadata struct { + Forward ForwardMetadata +} + +// ForwardMetadata represents the metadata for forwarding a packet. +// Do not use this type directly with json encoding/decoding, as it is not json serializable. +// Instead use the provided helper methods to convert it, or use the Metadata keys defined in this package. +type ForwardMetadata struct { + Receiver string + Port string + Channel string + Timeout time.Duration + Retries *uint8 + + Next *PacketMetadata // Next is a pointer to allow nil values +} + +func (m ForwardMetadata) Validate() error { + if m.Receiver == "" { + return errors.New("failed to validate metadata. receiver cannot be empty") + } + if err := host.PortIdentifierValidator(m.Port); err != nil { + return fmt.Errorf("failed to validate metadata: %w", err) + } + if err := host.ChannelIdentifierValidator(m.Channel); err != nil { + return fmt.Errorf("failed to validate metadata: %w", err) + } + + return nil +} + +func (m ForwardMetadata) ToMap() map[string]any { + forwardMetadataMap := map[string]any{ + ForwardReceiverKey: m.Receiver, + ForwardPortKey: m.Port, + ForwardChannelKey: m.Channel, + } + + if m.Timeout > 0 { + forwardMetadataMap[ForwardTimeoutKey] = m.Timeout + } + + if m.Retries != nil { + forwardMetadataMap[ForwardRetriesKey] = *m.Retries + } + + if m.Next != nil { + forwardMetadataMap[ForwardNextKey] = m.Next.toMap() + } + + return forwardMetadataMap +} + +func (m PacketMetadata) toMap() map[string]any { + packetMetadataMap := map[string]any{ + ForwardMetadataKey: m.Forward.ToMap(), + } + + return packetMetadataMap +} + +func (m PacketMetadata) ToMemo() (string, error) { + packetMetadataMap := map[string]any{ + ForwardMetadataKey: m.Forward.ToMap(), + } + + packetMetadataJSON, err := json.Marshal(packetMetadataMap) + if err != nil { + return "", err + } + + return string(packetMetadataJSON), nil +} + +func GetPacketMetadataFromPacketdata(transferDetail ibcexported.PacketDataProvider) (PacketMetadata, bool, error) { + forwardData, ok := transferDetail.GetCustomPacketData(ForwardMetadataKey).(map[string]any) + if forwardData == nil || !ok { + return PacketMetadata{}, false, errorsmod.Wrapf(ErrMetadataKeyNotFound, "key %s not found in packet data", ForwardMetadataKey) + } + + forwardMetadata, err := getForwardMetadata(forwardData) + if err != nil { + return PacketMetadata{}, true, errorsmod.Wrapf(err, "failed to get forward metadata from packet data") + } + + return PacketMetadata{ + Forward: forwardMetadata, + }, true, nil +} + +func getForwardMetadata(forwardData map[string]any) (ForwardMetadata, error) { + receiver, ok := forwardData[ForwardReceiverKey].(string) + if !ok { + return ForwardMetadata{}, errorsmod.Wrapf(ErrMetadataKeyNotFound, "key %s not found in packet data", ForwardReceiverKey) + } + + port, ok := forwardData[ForwardPortKey].(string) + if !ok { + return ForwardMetadata{}, errorsmod.Wrapf(ErrMetadataKeyNotFound, "key %s not found in packet data", ForwardPortKey) + } + + channel, ok := forwardData[ForwardChannelKey].(string) + if !ok { + return ForwardMetadata{}, errorsmod.Wrapf(ErrMetadataKeyNotFound, "key %s not found in packet data", ForwardChannelKey) + } + + var err error + timeout := time.Duration(0) + timeoutData, ok := forwardData[ForwardTimeoutKey] + if ok { + timeout, err = parseDuration(timeoutData) + if err != nil { + return ForwardMetadata{}, err + } + } + + var retries *uint8 + retriesData, ok := forwardData[ForwardRetriesKey] + if ok { + retriesFloat, ok := retriesData.(float64) + if !ok { + return ForwardMetadata{}, errorsmod.Wrapf(ErrInvalidForwardMetadata, "key %s has invalid type, expected number", ForwardRetriesKey) + } + if retriesFloat < 0 || retriesFloat > 255 { + return ForwardMetadata{}, errors.New("retries must be between 0 and 255") + } + retriesU8 := uint8(retriesFloat) + retries = &retriesU8 + } + + var next *PacketMetadata + nextDataAny, ok := forwardData[ForwardNextKey] + if ok { + nextData, err := getForwardMetadataFromNext(nextDataAny) + if err != nil { + return ForwardMetadata{}, errorsmod.Wrapf(err, "failed to get next data") + } + + nextForward, err := getForwardMetadata(nextData) + if err != nil { + return ForwardMetadata{}, errorsmod.Wrapf(err, "failed to get next forward metadata from packet data") + } + + next = &PacketMetadata{ + Forward: nextForward, + } + } + + return ForwardMetadata{ + Receiver: receiver, + Port: port, + Channel: channel, + Timeout: timeout, + Retries: retries, + Next: next, + }, nil +} + +func getForwardMetadataFromNext(nextData any) (map[string]any, error) { + var packetMetadataMap map[string]any + packetMetadataMap, ok := nextData.(map[string]any) + if !ok { + nextDataStr, ok := nextData.(string) + if !ok { + return nil, errorsmod.Wrapf(ErrInvalidForwardMetadata, "next forward metadata is not a valid map or string") + } + + if err := json.Unmarshal([]byte(nextDataStr), &packetMetadataMap); err != nil { + return nil, errorsmod.Wrapf(ErrInvalidForwardMetadata, "failed to unmarshal next forward metadata: %s", err.Error()) + } + } + + forwardData, ok := packetMetadataMap[ForwardMetadataKey].(map[string]any) + if !ok { + return nil, errorsmod.Wrapf(ErrMetadataKeyNotFound, "key %s not found in next forward metadata", ForwardMetadataKey) + } + + return forwardData, nil +} + +func parseDuration(duration any) (time.Duration, error) { + switch value := duration.(type) { + case float64: + return time.Duration(value), nil + case string: + return time.ParseDuration(value) + default: + return 0, errors.New("invalid duration") + } +} diff --git a/modules/apps/packet-forward-middleware/types/forward_test.go b/modules/apps/packet-forward-middleware/types/forward_test.go new file mode 100644 index 00000000000..c68a9b3d35c --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/forward_test.go @@ -0,0 +1,491 @@ +package types_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" +) + +func TestTimeoutUnmarshalJSON(t *testing.T) { + const memo = "{\"forward\":{\"receiver\":\"noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8\",\"port\":\"transfer\",\"channel\":\"channel-0\",\"timeout\": 60000000000}}" + var packetMetadata types.PacketMetadata + + err := json.Unmarshal([]byte(memo), &packetMetadata) + require.NoError(t, err) + + timeoutBz, err := json.Marshal(packetMetadata.Forward.Timeout) + require.NoError(t, err) + + require.Equal(t, "60000000000", string(timeoutBz)) +} + +// Additional tests for improved coverage +func TestValidateForwardMetadata(t *testing.T) { + tests := []struct { + name string + metadata types.ForwardMetadata + expectErr bool + }{ + { + name: "valid metadata", + metadata: types.ForwardMetadata{ + Receiver: "validaddress", + Port: "validport", + Channel: "validchannel", + Timeout: time.Duration(0), + Retries: nil, + }, + expectErr: false, + }, + { + name: "empty receiver", + metadata: types.ForwardMetadata{ + Receiver: "", + Port: "validport", + Channel: "validchannel", + Timeout: time.Duration(0), + Retries: nil, + }, + expectErr: true, + }, + { + name: "invalid port", + metadata: types.ForwardMetadata{ + Receiver: "validaddress", + Port: "!nv@lidport", + Channel: "validchannel", + Timeout: time.Duration(0), + Retries: nil, + }, + expectErr: true, + }, + { + name: "invalid channel", + metadata: types.ForwardMetadata{ + Receiver: "validaddress", + Port: "validport", + Channel: "invalid|channel", + Timeout: time.Duration(0), + Retries: nil, + }, + expectErr: true, + }, + { + name: "valid metadata with next", + metadata: types.ForwardMetadata{ + Receiver: "validaddress", + Port: "validport", + Channel: "validchannel", + Timeout: time.Duration(0), + Retries: nil, + Next: &types.PacketMetadata{ + Forward: types.ForwardMetadata{ + Receiver: "nextreceiver", + Port: "nextport", + Channel: "nextchannel", + }, + }, + }, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.metadata.Validate() + if tt.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestForwardMetadataToMap(t *testing.T) { + metadata := types.ForwardMetadata{ + Receiver: "receiver", + Port: "port", + Channel: "channel", + Timeout: time.Duration(30), + Retries: func(v uint8) *uint8 { return &v }(2), + } + + m := metadata.ToMap() + require.Equal(t, "receiver", m["receiver"]) + require.Equal(t, "port", m["port"]) + require.Equal(t, "channel", m["channel"]) + require.Equal(t, time.Duration(30), m["timeout"]) + require.Equal(t, uint8(2), m["retries"]) + + // Include Next metadata + metadata.Next = &types.PacketMetadata{ + Forward: types.ForwardMetadata{ + Receiver: "nextreceiver", + Port: "nextport", + Channel: "nextchannel", + }, + } + + m = metadata.ToMap() + next, ok := m["next"].(map[string]any)["forward"].(map[string]any) + require.True(t, ok) + require.Equal(t, "nextreceiver", next["receiver"]) +} + +func TestPacketMetadataToMemo(t *testing.T) { + metadata := types.PacketMetadata{ + Forward: types.ForwardMetadata{ + Receiver: "receiver", + Port: "port", + Channel: "channel", + Timeout: time.Duration(30), + Retries: func(v uint8) *uint8 { return &v }(2), + }, + } + + memo, err := metadata.ToMemo() + require.NoError(t, err) + require.Contains(t, memo, "receiver") + require.Contains(t, memo, "port") + require.Contains(t, memo, "channel") +} + +func TestGetPacketMetadataFromPacketdata(t *testing.T) { + // Create a mock PacketDataProvider with forward metadata + mockProvider := &MockPacketDataProvider{ + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + }, + }, + } + + packetMetadata, hasForward, err := types.GetPacketMetadataFromPacketdata(mockProvider) + require.NoError(t, err) + require.True(t, hasForward) + require.Equal(t, "test-receiver", packetMetadata.Forward.Receiver) + require.Equal(t, "test-port", packetMetadata.Forward.Port) + require.Equal(t, "test-channel", packetMetadata.Forward.Channel) + + // Test with missing forward key + mockProviderNoForward := &MockPacketDataProvider{ + customData: map[string]any{}, + } + + _, hasForward, err = types.GetPacketMetadataFromPacketdata(mockProviderNoForward) + require.Error(t, err) + require.False(t, hasForward) +} + +// MockPacketDataProvider for testing +type MockPacketDataProvider struct { + customData map[string]any +} + +func (m *MockPacketDataProvider) GetCustomPacketData(key string) any { + return m.customData[key] +} + +// Tests for nested Next metadata parsing (covering functionality from original commented tests) +func TestGetPacketMetadataWithNestedNext(t *testing.T) { + // Test parsing nested Next metadata as a map (equivalent to JSON object) + mockProvider := &MockPacketDataProvider{ + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", + "port": "transfer", + "channel": "channel-0", + "timeout": float64(0), + "next": map[string]any{ + "forward": map[string]any{ + "receiver": "noble1l505zhahp24v5jsmps9vs5asah759fdce06sfp", + "port": "transfer", + "channel": "channel-0", + "timeout": float64(0), + }, + }, + }, + }, + } + + packetMetadata, hasForward, err := types.GetPacketMetadataFromPacketdata(mockProvider) + require.NoError(t, err) + require.True(t, hasForward) + require.Equal(t, "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", packetMetadata.Forward.Receiver) + require.Equal(t, "transfer", packetMetadata.Forward.Port) + require.Equal(t, "channel-0", packetMetadata.Forward.Channel) + require.Equal(t, time.Duration(0), packetMetadata.Forward.Timeout) + + // Verify nested Next metadata + require.NotNil(t, packetMetadata.Forward.Next) + require.Equal(t, "noble1l505zhahp24v5jsmps9vs5asah759fdce06sfp", packetMetadata.Forward.Next.Forward.Receiver) + require.Equal(t, "transfer", packetMetadata.Forward.Next.Forward.Port) + require.Equal(t, "channel-0", packetMetadata.Forward.Next.Forward.Channel) + require.Equal(t, time.Duration(0), packetMetadata.Forward.Next.Forward.Timeout) +} + +func TestGetPacketMetadataWithStringNext(t *testing.T) { + // Test parsing nested Next metadata as a JSON string + nextJSON := `{"forward":{"receiver":"noble1l505zhahp24v5jsmps9vs5asah759fdce06sfp","port":"transfer","channel":"channel-0","timeout":0}}` + + mockProvider := &MockPacketDataProvider{ + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", + "port": "transfer", + "channel": "channel-0", + "timeout": float64(0), + "next": nextJSON, // Next as JSON string + }, + }, + } + + packetMetadata, hasForward, err := types.GetPacketMetadataFromPacketdata(mockProvider) + require.NoError(t, err) + require.True(t, hasForward) + require.Equal(t, "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", packetMetadata.Forward.Receiver) + + // Verify nested Next metadata parsed from JSON string + require.NotNil(t, packetMetadata.Forward.Next) + require.Equal(t, "noble1l505zhahp24v5jsmps9vs5asah759fdce06sfp", packetMetadata.Forward.Next.Forward.Receiver) + require.Equal(t, "transfer", packetMetadata.Forward.Next.Forward.Port) + require.Equal(t, "channel-0", packetMetadata.Forward.Next.Forward.Channel) +} + +func TestGetPacketMetadataTimeoutParsing(t *testing.T) { + // Test parsing timeout as string duration (like "60s") + mockProvider := &MockPacketDataProvider{ + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", + "port": "transfer", + "channel": "channel-0", + "timeout": "60s", // Timeout as string + }, + }, + } + + packetMetadata, hasForward, err := types.GetPacketMetadataFromPacketdata(mockProvider) + require.NoError(t, err) + require.True(t, hasForward) + require.Equal(t, "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", packetMetadata.Forward.Receiver) + require.Equal(t, 60*time.Second, packetMetadata.Forward.Timeout) + + // Test parsing timeout as float64 (nanoseconds) + mockProviderFloat := &MockPacketDataProvider{ + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "noble1f4cur2krsua2th9kkp7n0zje4stea4p9tu70u8", + "port": "transfer", + "channel": "channel-0", + "timeout": float64(60000000000), // 60 seconds in nanoseconds + }, + }, + } + + packetMetadata, hasForward, err = types.GetPacketMetadataFromPacketdata(mockProviderFloat) + require.NoError(t, err) + require.True(t, hasForward) + require.Equal(t, time.Duration(60000000000), packetMetadata.Forward.Timeout) +} + +func TestGetPacketMetadataRetriesParsing(t *testing.T) { + // Test parsing retries field + mockProvider := &MockPacketDataProvider{ + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "retries": float64(5), // Retries as float64 + }, + }, + } + + packetMetadata, hasForward, err := types.GetPacketMetadataFromPacketdata(mockProvider) + require.NoError(t, err) + require.True(t, hasForward) + require.NotNil(t, packetMetadata.Forward.Retries) + require.Equal(t, uint8(5), *packetMetadata.Forward.Retries) +} + +func TestGetPacketMetadataErrorCases(t *testing.T) { + tests := []struct { + name string + customData map[string]any + expectedError string + expectedHasForward bool + }{ + { + name: "invalid timeout string", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "timeout": "invalid-duration", + }, + }, + expectedError: "time: invalid duration", + expectedHasForward: true, + }, + { + name: "retries value too high", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "retries": float64(300), // > 255 + }, + }, + expectedError: "retries must be between 0 and 255", + expectedHasForward: true, + }, + { + name: "invalid retries type", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "retries": "not-a-number", // Invalid type + }, + }, + expectedError: "key retries has invalid type, expected number", + expectedHasForward: true, + }, + { + name: "invalid next JSON string", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "next": "invalid json", + }, + }, + expectedError: "failed to unmarshal next forward metadata", + expectedHasForward: true, + }, + { + name: "missing receiver", + customData: map[string]any{ + "forward": map[string]any{ + "port": "test-port", + "channel": "test-channel", + }, + }, + expectedError: "receiver", + expectedHasForward: true, + }, + { + name: "missing port", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "channel": "test-channel", + }, + }, + expectedError: "port", + expectedHasForward: true, + }, + { + name: "missing channel", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + }, + }, + expectedError: "channel", + expectedHasForward: true, + }, + { + name: "nested forward metadata error", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "next": map[string]any{ + "forward": map[string]any{ + "receiver": "nested-receiver", + "port": "nested-port", + // Missing required "channel" key + }, + }, + }, + }, + expectedError: "failed to get next forward metadata from packet data", + expectedHasForward: true, + }, + { + name: "invalid next type", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "next": 42, // Invalid type (not map or string) + }, + }, + expectedError: "next forward metadata is not a valid map or string", + expectedHasForward: true, + }, + { + name: "missing forward key in next metadata", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "next": map[string]any{ + "other_key": "some_value", // Missing "forward" key + }, + }, + }, + expectedError: "key forward not found in next forward metadata", + expectedHasForward: true, + }, + { + name: "invalid timeout type", + customData: map[string]any{ + "forward": map[string]any{ + "receiver": "test-receiver", + "port": "test-port", + "channel": "test-channel", + "timeout": true, // Invalid type (boolean instead of duration) + }, + }, + expectedError: "invalid duration", + expectedHasForward: true, + }, + { + name: "missing forward key entirely", + customData: map[string]any{}, + expectedError: "key forward not found in packet data", + expectedHasForward: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockProvider := &MockPacketDataProvider{ + customData: tt.customData, + } + + _, hasForward, err := types.GetPacketMetadataFromPacketdata(mockProvider) + require.Error(t, err) + require.Equal(t, tt.expectedHasForward, hasForward) + require.Contains(t, err.Error(), tt.expectedError) + }) + } +} diff --git a/modules/apps/packet-forward-middleware/types/genesis.go b/modules/apps/packet-forward-middleware/types/genesis.go new file mode 100644 index 00000000000..41d18f3e1ee --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/genesis.go @@ -0,0 +1,19 @@ +package types + +import "errors" + +// DefaultGenesisState returns a GenesisState with an empty map of in-flight packets. +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + InFlightPackets: make(map[string]InFlightPacket), + } +} + +// Validate performs basic genesis state validation returning an error upon any failure. +func (gs GenesisState) Validate() error { + if gs.InFlightPackets == nil { + return errors.New("in-flight packets cannot be nil") + } + + return nil +} diff --git a/modules/apps/packet-forward-middleware/types/genesis.pb.go b/modules/apps/packet-forward-middleware/types/genesis.pb.go new file mode 100644 index 00000000000..42e605ca3ff --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/genesis.pb.go @@ -0,0 +1,1131 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/packet_forward_middleware/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the packetforward genesis state +type GenesisState struct { + // key - information about forwarded packet: src_channel + // (parsedReceiver.Channel), src_port (parsedReceiver.Port), sequence value - + // information about original packet for refunding if necessary: retries, + // srcPacketSender, srcPacket.DestinationChannel, srcPacket.DestinationPort + InFlightPackets map[string]InFlightPacket `protobuf:"bytes,2,rep,name=in_flight_packets,json=inFlightPackets,proto3" json:"in_flight_packets" yaml:"in_flight_packets" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_421a822166afb238, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetInFlightPackets() map[string]InFlightPacket { + if m != nil { + return m.InFlightPackets + } + return nil +} + +// InFlightPacket contains information about original packet for +// writing the acknowledgement and refunding if necessary. +type InFlightPacket struct { + OriginalSenderAddress string `protobuf:"bytes,1,opt,name=original_sender_address,json=originalSenderAddress,proto3" json:"original_sender_address,omitempty"` + RefundChannelId string `protobuf:"bytes,2,opt,name=refund_channel_id,json=refundChannelId,proto3" json:"refund_channel_id,omitempty"` + RefundPortId string `protobuf:"bytes,3,opt,name=refund_port_id,json=refundPortId,proto3" json:"refund_port_id,omitempty"` + PacketSrcChannelId string `protobuf:"bytes,4,opt,name=packet_src_channel_id,json=packetSrcChannelId,proto3" json:"packet_src_channel_id,omitempty"` + PacketSrcPortId string `protobuf:"bytes,5,opt,name=packet_src_port_id,json=packetSrcPortId,proto3" json:"packet_src_port_id,omitempty"` + PacketTimeoutTimestamp uint64 `protobuf:"varint,6,opt,name=packet_timeout_timestamp,json=packetTimeoutTimestamp,proto3" json:"packet_timeout_timestamp,omitempty"` + PacketTimeoutHeight string `protobuf:"bytes,7,opt,name=packet_timeout_height,json=packetTimeoutHeight,proto3" json:"packet_timeout_height,omitempty"` + PacketData []byte `protobuf:"bytes,8,opt,name=packet_data,json=packetData,proto3" json:"packet_data,omitempty"` + RefundSequence uint64 `protobuf:"varint,9,opt,name=refund_sequence,json=refundSequence,proto3" json:"refund_sequence,omitempty"` + RetriesRemaining int32 `protobuf:"varint,10,opt,name=retries_remaining,json=retriesRemaining,proto3" json:"retries_remaining,omitempty"` + Timeout uint64 `protobuf:"varint,11,opt,name=timeout,proto3" json:"timeout,omitempty"` + Nonrefundable bool `protobuf:"varint,12,opt,name=nonrefundable,proto3" json:"nonrefundable,omitempty"` +} + +func (m *InFlightPacket) Reset() { *m = InFlightPacket{} } +func (m *InFlightPacket) String() string { return proto.CompactTextString(m) } +func (*InFlightPacket) ProtoMessage() {} +func (*InFlightPacket) Descriptor() ([]byte, []int) { + return fileDescriptor_421a822166afb238, []int{1} +} +func (m *InFlightPacket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InFlightPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InFlightPacket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InFlightPacket) XXX_Merge(src proto.Message) { + xxx_messageInfo_InFlightPacket.Merge(m, src) +} +func (m *InFlightPacket) XXX_Size() int { + return m.Size() +} +func (m *InFlightPacket) XXX_DiscardUnknown() { + xxx_messageInfo_InFlightPacket.DiscardUnknown(m) +} + +var xxx_messageInfo_InFlightPacket proto.InternalMessageInfo + +func (m *InFlightPacket) GetOriginalSenderAddress() string { + if m != nil { + return m.OriginalSenderAddress + } + return "" +} + +func (m *InFlightPacket) GetRefundChannelId() string { + if m != nil { + return m.RefundChannelId + } + return "" +} + +func (m *InFlightPacket) GetRefundPortId() string { + if m != nil { + return m.RefundPortId + } + return "" +} + +func (m *InFlightPacket) GetPacketSrcChannelId() string { + if m != nil { + return m.PacketSrcChannelId + } + return "" +} + +func (m *InFlightPacket) GetPacketSrcPortId() string { + if m != nil { + return m.PacketSrcPortId + } + return "" +} + +func (m *InFlightPacket) GetPacketTimeoutTimestamp() uint64 { + if m != nil { + return m.PacketTimeoutTimestamp + } + return 0 +} + +func (m *InFlightPacket) GetPacketTimeoutHeight() string { + if m != nil { + return m.PacketTimeoutHeight + } + return "" +} + +func (m *InFlightPacket) GetPacketData() []byte { + if m != nil { + return m.PacketData + } + return nil +} + +func (m *InFlightPacket) GetRefundSequence() uint64 { + if m != nil { + return m.RefundSequence + } + return 0 +} + +func (m *InFlightPacket) GetRetriesRemaining() int32 { + if m != nil { + return m.RetriesRemaining + } + return 0 +} + +func (m *InFlightPacket) GetTimeout() uint64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *InFlightPacket) GetNonrefundable() bool { + if m != nil { + return m.Nonrefundable + } + return false +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibc.applications.packet_forward_middleware.v1.GenesisState") + proto.RegisterMapType((map[string]InFlightPacket)(nil), "ibc.applications.packet_forward_middleware.v1.GenesisState.InFlightPacketsEntry") + proto.RegisterType((*InFlightPacket)(nil), "ibc.applications.packet_forward_middleware.v1.InFlightPacket") +} + +func init() { + proto.RegisterFile("ibc/applications/packet_forward_middleware/v1/genesis.proto", fileDescriptor_421a822166afb238) +} + +var fileDescriptor_421a822166afb238 = []byte{ + // 597 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4b, 0x6e, 0xdb, 0x3c, + 0x14, 0x85, 0x4d, 0xe7, 0x4d, 0xfb, 0xcf, 0x83, 0x7f, 0xd2, 0x12, 0x19, 0x38, 0x82, 0x11, 0xa0, + 0x42, 0x03, 0x4b, 0x75, 0x0a, 0x14, 0x41, 0x8a, 0x0e, 0x9a, 0x3e, 0x3d, 0x0b, 0xe4, 0x8c, 0x3a, + 0x11, 0x68, 0x89, 0x91, 0x89, 0x48, 0xa4, 0x4a, 0xd2, 0x0e, 0x3c, 0xec, 0x0e, 0xba, 0x82, 0x76, + 0x11, 0xdd, 0x44, 0x86, 0x19, 0x76, 0x14, 0x14, 0xf1, 0x0e, 0xba, 0x82, 0x42, 0x22, 0x95, 0xda, + 0x68, 0x3b, 0xc8, 0xc8, 0xf4, 0x3d, 0xf7, 0x7c, 0xf7, 0xe8, 0x42, 0x22, 0x7c, 0xce, 0x06, 0x91, + 0x4f, 0xf2, 0x3c, 0x65, 0x11, 0xd1, 0x4c, 0x70, 0xe5, 0xe7, 0x24, 0xba, 0xa0, 0x3a, 0x3c, 0x17, + 0xf2, 0x92, 0xc8, 0x38, 0xcc, 0x58, 0x1c, 0xa7, 0xf4, 0x92, 0x48, 0xea, 0x8f, 0xbb, 0x7e, 0x42, + 0x39, 0x55, 0x4c, 0x79, 0xb9, 0x14, 0x5a, 0xa0, 0x0e, 0x1b, 0x44, 0xde, 0xac, 0xd9, 0xfb, 0xa7, + 0xd9, 0x1b, 0x77, 0x77, 0xb7, 0x13, 0x91, 0x88, 0xd2, 0xe9, 0x17, 0x27, 0x03, 0x69, 0x7f, 0xab, + 0xc3, 0xe6, 0x3b, 0x83, 0xed, 0x6b, 0xa2, 0x29, 0xfa, 0x02, 0xe0, 0x16, 0xe3, 0xe1, 0x79, 0xca, + 0x92, 0xa1, 0x0e, 0x0d, 0x51, 0xe1, 0xba, 0xb3, 0xe0, 0x36, 0x0e, 0x4f, 0xbd, 0x7b, 0x8d, 0xf4, + 0x66, 0xc1, 0x5e, 0x8f, 0xbf, 0x2d, 0x99, 0xa7, 0x06, 0xf9, 0x86, 0x6b, 0x39, 0x39, 0x71, 0xae, + 0x6e, 0xf6, 0x6a, 0x3f, 0x6f, 0xf6, 0xf0, 0x84, 0x64, 0xe9, 0x71, 0xfb, 0x8f, 0xc1, 0xed, 0x60, + 0x83, 0xcd, 0xfb, 0x76, 0x3f, 0x01, 0xb8, 0xfd, 0x37, 0x16, 0xda, 0x84, 0x0b, 0x17, 0x74, 0x82, + 0x81, 0x03, 0xdc, 0xb5, 0xa0, 0x38, 0xa2, 0x3e, 0x5c, 0x1a, 0x93, 0x74, 0x44, 0x71, 0xdd, 0x01, + 0x6e, 0xe3, 0xf0, 0xc5, 0x3d, 0xe3, 0xcf, 0x4f, 0x09, 0x0c, 0xeb, 0xb8, 0x7e, 0x04, 0xda, 0x5f, + 0x17, 0xe1, 0xfa, 0xbc, 0x8a, 0x9e, 0xc1, 0x87, 0x42, 0xb2, 0x84, 0x71, 0x92, 0x86, 0x8a, 0xf2, + 0x98, 0xca, 0x90, 0xc4, 0xb1, 0xa4, 0x4a, 0xd9, 0x44, 0x3b, 0x95, 0xdc, 0x2f, 0xd5, 0x97, 0x46, + 0x44, 0x8f, 0xe1, 0x96, 0xa4, 0xe7, 0x23, 0x1e, 0x87, 0xd1, 0x90, 0x70, 0x4e, 0xd3, 0x90, 0xc5, + 0x65, 0xde, 0xb5, 0x60, 0xc3, 0x08, 0xaf, 0x4c, 0xbd, 0x17, 0xa3, 0x7d, 0xb8, 0x6e, 0x7b, 0x73, + 0x21, 0x75, 0xd1, 0xb8, 0x50, 0x36, 0x36, 0x4d, 0xf5, 0x54, 0x48, 0xdd, 0x8b, 0x51, 0x17, 0xee, + 0xd8, 0xc7, 0x52, 0x32, 0x9a, 0xa5, 0x2e, 0x96, 0xcd, 0xc8, 0x88, 0x7d, 0x19, 0xfd, 0x06, 0x1f, + 0x40, 0x34, 0x63, 0xa9, 0xe0, 0x4b, 0x26, 0xc5, 0x5d, 0xbf, 0xe5, 0x1f, 0x41, 0x6c, 0x9b, 0x35, + 0xcb, 0xa8, 0x18, 0x99, 0x5f, 0xa5, 0x49, 0x96, 0xe3, 0x65, 0x07, 0xb8, 0x8b, 0xc1, 0x03, 0xa3, + 0x9f, 0x19, 0xf9, 0xac, 0x52, 0xd1, 0xe1, 0x5d, 0xb2, 0xca, 0x39, 0xa4, 0xc5, 0x0a, 0xf1, 0x4a, + 0x39, 0xe9, 0xff, 0x39, 0xdb, 0xfb, 0x52, 0x42, 0x7b, 0xb0, 0x61, 0x3d, 0x31, 0xd1, 0x04, 0xaf, + 0x3a, 0xc0, 0x6d, 0x06, 0xd0, 0x94, 0x5e, 0x13, 0x4d, 0xd0, 0x23, 0x68, 0xf7, 0x14, 0x2a, 0xfa, + 0x71, 0x44, 0x79, 0x44, 0xf1, 0x5a, 0x99, 0xc2, 0xee, 0xaa, 0x6f, 0xab, 0xe8, 0xa0, 0xd8, 0xb4, + 0x96, 0x8c, 0xaa, 0x50, 0xd2, 0x8c, 0x30, 0xce, 0x78, 0x82, 0xa1, 0x03, 0xdc, 0xa5, 0x60, 0xd3, + 0x0a, 0x41, 0x55, 0x47, 0x18, 0xae, 0xd8, 0x8c, 0xb8, 0x51, 0xd2, 0xaa, 0xbf, 0x68, 0x1f, 0xfe, + 0xc7, 0x05, 0x37, 0x6c, 0x32, 0x48, 0x29, 0x6e, 0x3a, 0xc0, 0x5d, 0x0d, 0xe6, 0x8b, 0x27, 0xd1, + 0xd5, 0x6d, 0x0b, 0x5c, 0xdf, 0xb6, 0xc0, 0x8f, 0xdb, 0x16, 0xf8, 0x3c, 0x6d, 0xd5, 0xae, 0xa7, + 0xad, 0xda, 0xf7, 0x69, 0xab, 0xf6, 0xa1, 0x97, 0x30, 0x3d, 0x1c, 0x0d, 0xbc, 0x48, 0x64, 0x7e, + 0x24, 0x54, 0x26, 0x94, 0xcf, 0x06, 0x51, 0x27, 0x11, 0xfe, 0xb8, 0xfb, 0xc4, 0xcf, 0x44, 0x3c, + 0x4a, 0xa9, 0x2a, 0x2e, 0x85, 0xea, 0x32, 0xe8, 0xd8, 0xb7, 0xb3, 0x33, 0x73, 0x19, 0xe8, 0x49, + 0x4e, 0xd5, 0x60, 0xb9, 0xfc, 0x86, 0x9f, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x12, 0x34, 0xfe, + 0x19, 0x47, 0x04, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.InFlightPackets) > 0 { + for k := range m.InFlightPackets { + v := m.InFlightPackets[k] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGenesis(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenesis(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *InFlightPacket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InFlightPacket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InFlightPacket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Nonrefundable { + i-- + if m.Nonrefundable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.Timeout != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.Timeout)) + i-- + dAtA[i] = 0x58 + } + if m.RetriesRemaining != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.RetriesRemaining)) + i-- + dAtA[i] = 0x50 + } + if m.RefundSequence != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.RefundSequence)) + i-- + dAtA[i] = 0x48 + } + if len(m.PacketData) > 0 { + i -= len(m.PacketData) + copy(dAtA[i:], m.PacketData) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PacketData))) + i-- + dAtA[i] = 0x42 + } + if len(m.PacketTimeoutHeight) > 0 { + i -= len(m.PacketTimeoutHeight) + copy(dAtA[i:], m.PacketTimeoutHeight) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PacketTimeoutHeight))) + i-- + dAtA[i] = 0x3a + } + if m.PacketTimeoutTimestamp != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.PacketTimeoutTimestamp)) + i-- + dAtA[i] = 0x30 + } + if len(m.PacketSrcPortId) > 0 { + i -= len(m.PacketSrcPortId) + copy(dAtA[i:], m.PacketSrcPortId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PacketSrcPortId))) + i-- + dAtA[i] = 0x2a + } + if len(m.PacketSrcChannelId) > 0 { + i -= len(m.PacketSrcChannelId) + copy(dAtA[i:], m.PacketSrcChannelId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PacketSrcChannelId))) + i-- + dAtA[i] = 0x22 + } + if len(m.RefundPortId) > 0 { + i -= len(m.RefundPortId) + copy(dAtA[i:], m.RefundPortId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.RefundPortId))) + i-- + dAtA[i] = 0x1a + } + if len(m.RefundChannelId) > 0 { + i -= len(m.RefundChannelId) + copy(dAtA[i:], m.RefundChannelId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.RefundChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.OriginalSenderAddress) > 0 { + i -= len(m.OriginalSenderAddress) + copy(dAtA[i:], m.OriginalSenderAddress) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.OriginalSenderAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.InFlightPackets) > 0 { + for k, v := range m.InFlightPackets { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenesis(uint64(len(k))) + 1 + l + sovGenesis(uint64(l)) + n += mapEntrySize + 1 + sovGenesis(uint64(mapEntrySize)) + } + } + return n +} + +func (m *InFlightPacket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.OriginalSenderAddress) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.RefundChannelId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.RefundPortId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.PacketSrcChannelId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.PacketSrcPortId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if m.PacketTimeoutTimestamp != 0 { + n += 1 + sovGenesis(uint64(m.PacketTimeoutTimestamp)) + } + l = len(m.PacketTimeoutHeight) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.PacketData) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if m.RefundSequence != 0 { + n += 1 + sovGenesis(uint64(m.RefundSequence)) + } + if m.RetriesRemaining != 0 { + n += 1 + sovGenesis(uint64(m.RetriesRemaining)) + } + if m.Timeout != 0 { + n += 1 + sovGenesis(uint64(m.Timeout)) + } + if m.Nonrefundable { + n += 2 + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InFlightPackets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InFlightPackets == nil { + m.InFlightPackets = make(map[string]InFlightPacket) + } + var mapkey string + mapvalue := &InFlightPacket{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenesis + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenesis + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenesis + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenesis + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &InFlightPacket{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.InFlightPackets[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InFlightPacket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InFlightPacket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InFlightPacket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalSenderAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalSenderAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefundChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefundChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefundPortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefundPortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketSrcChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PacketSrcChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketSrcPortId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PacketSrcPortId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketTimeoutTimestamp", wireType) + } + m.PacketTimeoutTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PacketTimeoutTimestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketTimeoutHeight", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PacketTimeoutHeight = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketData", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PacketData = append(m.PacketData[:0], dAtA[iNdEx:postIndex]...) + if m.PacketData == nil { + m.PacketData = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RefundSequence", wireType) + } + m.RefundSequence = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RefundSequence |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetriesRemaining", wireType) + } + m.RetriesRemaining = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetriesRemaining |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonrefundable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Nonrefundable = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/packet-forward-middleware/types/keys.go b/modules/apps/packet-forward-middleware/types/keys.go new file mode 100644 index 00000000000..b5b74d9a425 --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/keys.go @@ -0,0 +1,30 @@ +package types + +import "fmt" + +const ( + // ModuleName defines the module name + // NOTE: There is a spelling mistake in the module name that came from the original implementation + // and is currently kept for backward compatibility. Consideration for renaming done in #8388 + ModuleName = "packetfowardmiddleware" + + // StoreKey is the store key string for IBC transfer + StoreKey = ModuleName + + // QuerierRoute is the querier route for IBC transfer + QuerierRoute = ModuleName + + ForwardMetadataKey = "forward" + ForwardReceiverKey = "receiver" + ForwardPortKey = "port" + ForwardChannelKey = "channel" + ForwardTimeoutKey = "timeout" + ForwardRetriesKey = "retries" + ForwardNextKey = "next" +) + +type NonrefundableKey struct{} + +func RefundPacketKey(channelID, portID string, sequence uint64) []byte { + return fmt.Appendf(nil, "%s/%s/%d", channelID, portID, sequence) +} diff --git a/modules/apps/packet-forward-middleware/types/types.go b/modules/apps/packet-forward-middleware/types/types.go new file mode 100644 index 00000000000..a63a6049d4a --- /dev/null +++ b/modules/apps/packet-forward-middleware/types/types.go @@ -0,0 +1,19 @@ +package types + +import ( + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" +) + +func (ifp *InFlightPacket) ChannelPacket() channeltypes.Packet { + return channeltypes.Packet{ + Data: ifp.PacketData, + Sequence: ifp.RefundSequence, + SourcePort: ifp.PacketSrcPortId, + SourceChannel: ifp.PacketSrcChannelId, + DestinationPort: ifp.RefundPortId, + DestinationChannel: ifp.RefundChannelId, + TimeoutHeight: clienttypes.MustParseHeight(ifp.PacketTimeoutHeight), + TimeoutTimestamp: ifp.PacketTimeoutTimestamp, + } +} diff --git a/modules/apps/rate-limiting/client/cli/cli.go b/modules/apps/rate-limiting/client/cli/cli.go new file mode 100644 index 00000000000..c9a614c84ac --- /dev/null +++ b/modules/apps/rate-limiting/client/cli/cli.go @@ -0,0 +1,27 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" +) + +// GetQueryCmd returns the cli query commands for this module. +func GetQueryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "ratelimiting", + Short: "IBC ratelimiting querying subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetCmdQueryRateLimit(), + GetCmdQueryAllRateLimits(), + GetCmdQueryRateLimitsByChainID(), + GetCmdQueryAllBlacklistedDenoms(), + GetCmdQueryAllWhitelistedAddresses(), + ) + return cmd +} diff --git a/modules/apps/rate-limiting/client/cli/query.go b/modules/apps/rate-limiting/client/cli/query.go new file mode 100644 index 00000000000..702b451d017 --- /dev/null +++ b/modules/apps/rate-limiting/client/cli/query.go @@ -0,0 +1,196 @@ +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/version" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +const ( + FlagDenom = "denom" +) + +// GetCmdQueryRateLimit implements a command to query rate limits by channel-id or client-id and denom +func GetCmdQueryRateLimit() *cobra.Command { + cmd := &cobra.Command{ + Use: "rate-limit [channel-or-client-id]", + Short: "Query rate limits from a given channel-id/client-id and denom", + Long: strings.TrimSpace( + fmt.Sprintf(`Query rate limits from a given channel-id/client-id and denom. +If the denom flag is omitted, all rate limits for the given channel-id/client-id are returned. + +Example: + $ %s query %s rate-limit [channel-or-client-id] + $ %s query %s rate-limit [channel-or-client-id] --denom=[denom] +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + channelOrClientID := args[0] + denom, err := cmd.Flags().GetString(FlagDenom) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + // Query all rate limits for the channel/client ID if denom is not specified. + if denom == "" { + req := &types.QueryRateLimitsByChannelOrClientIDRequest{ + ChannelOrClientId: channelOrClientID, + } + res, err := queryClient.RateLimitsByChannelOrClientID(context.Background(), req) + if err != nil { + return err + } + return clientCtx.PrintProto(res) + } + + // Query specific rate limit if denom is provided + req := &types.QueryRateLimitRequest{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + } + res, err := queryClient.RateLimit(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res.RateLimit) + }, + } + + cmd.Flags().String(FlagDenom, "", "The denom identifying a specific rate limit") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryAllRateLimits return all available rate limits. +func GetCmdQueryAllRateLimits() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-rate-limits", + Short: "Query for all rate limits", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllRateLimitsRequest{} + res, err := queryClient.AllRateLimits(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryRateLimitsByChainID return all rate limits that exist between this chain +// and the specified ChainId +func GetCmdQueryRateLimitsByChainID() *cobra.Command { + cmd := &cobra.Command{ + Use: "rate-limits-by-chain [chain-id]", + Short: "Query for all rate limits associated with the channels/clients connecting to the given ChainID", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + chainID := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryRateLimitsByChainIDRequest{ + ChainId: chainID, + } + res, err := queryClient.RateLimitsByChainID(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetCmdQueryAllBlacklistedDenoms returns the command to query all blacklisted denoms +func GetCmdQueryAllBlacklistedDenoms() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-blacklisted-denoms", + Short: "Query for all blacklisted denoms", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllBlacklistedDenomsRequest{} + res, err := queryClient.AllBlacklistedDenoms(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetCmdQueryAllWhitelistedAddresses returns the command to query all whitelisted address pairs +func GetCmdQueryAllWhitelistedAddresses() *cobra.Command { + cmd := &cobra.Command{ + Use: "list-whitelisted-addresses", + Short: "Query for all whitelisted address pairs", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := types.NewQueryClient(clientCtx) + + req := &types.QueryAllWhitelistedAddressesRequest{} + res, err := queryClient.AllWhitelistedAddresses(context.Background(), req) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + return cmd +} diff --git a/modules/apps/rate-limiting/doc.go b/modules/apps/rate-limiting/doc.go new file mode 100644 index 00000000000..75c737aaa3a --- /dev/null +++ b/modules/apps/rate-limiting/doc.go @@ -0,0 +1,8 @@ +/* +Package rate-limiting implements a middleware to rate limit IBC transfers +between different chains to prevent excessive token flow in either direction. +This module monitors and enforces configurable rate limits on token transfers +across IBC channels to protect chains from economic attacks or unintended +token drainage. +*/ +package ratelimiting diff --git a/modules/apps/rate-limiting/ibc_middleware.go b/modules/apps/rate-limiting/ibc_middleware.go new file mode 100644 index 00000000000..b94cc89101d --- /dev/null +++ b/modules/apps/rate-limiting/ibc_middleware.go @@ -0,0 +1,144 @@ +package ratelimiting + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +var ( + _ porttypes.Middleware = (*IBCMiddleware)(nil) + _ porttypes.PacketUnmarshalerModule = (*IBCMiddleware)(nil) +) + +// IBCMiddleware implements the ICS26 callbacks for the rate-limiting middleware. +type IBCMiddleware struct { + app porttypes.PacketUnmarshalerModule + keeper *keeper.Keeper +} + +// NewIBCMiddleware creates a new IBCMiddleware given the keeper, underlying application, and channel keeper. +func NewIBCMiddleware(k *keeper.Keeper) *IBCMiddleware { + return &IBCMiddleware{ + keeper: k, + } +} + +// OnChanOpenInit implements the IBCMiddleware interface. Call underlying app's OnChanOpenInit. +func (im *IBCMiddleware) OnChanOpenInit(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID string, channelID string, counterparty channeltypes.Counterparty, version string) (string, error) { + return im.app.OnChanOpenInit(ctx, order, connectionHops, portID, channelID, counterparty, version) +} + +// OnChanOpenTry implements the IBCMiddleware interface. Call underlying app's OnChanOpenTry. +func (im *IBCMiddleware) OnChanOpenTry(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, counterparty channeltypes.Counterparty, counterpartyVersion string) (string, error) { + return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, counterparty, counterpartyVersion) +} + +// OnChanOpenAck implements the IBCMiddleware interface. Call underlying app's OnChanOpenAck. +func (im *IBCMiddleware) OnChanOpenAck(ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string) error { + return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) +} + +// OnChanOpenConfirm implements the IBCMiddleware interface. Call underlying app's OnChanOpenConfirm. +func (im *IBCMiddleware) OnChanOpenConfirm(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanOpenConfirm(ctx, portID, channelID) +} + +// OnChanCloseInit implements the IBCMiddleware interface. Call underlying app's OnChanCloseInit. +func (im *IBCMiddleware) OnChanCloseInit(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanCloseInit(ctx, portID, channelID) +} + +// OnChanCloseConfirm implements the IBCMiddleware interface. Call underlying app's OnChanCloseConfirm. +func (im *IBCMiddleware) OnChanCloseConfirm(ctx sdk.Context, portID, channelID string) error { + return im.app.OnChanCloseConfirm(ctx, portID, channelID) +} + +// OnRecvPacket implements the IBCMiddleware interface. +// Rate limits the incoming packet. If the packet is allowed, call underlying app's OnRecvPacket. +func (im *IBCMiddleware) OnRecvPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("Receive packet rate limited", "error", err) + return channeltypes.NewErrorAcknowledgement(err) + } + + // If the packet was not rate-limited, pass it down to the underlying app's OnRecvPacket callback + return im.app.OnRecvPacket(ctx, channelVersion, packet, relayer) +} + +// OnAcknowledgementPacket implements the IBCMiddleware interface. +// If the acknowledgement was an error, revert the outflow amount. +// Then, call underlying app's OnAcknowledgementPacket. +func (im *IBCMiddleware) OnAcknowledgementPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress) error { + if err := im.keeper.AcknowledgeRateLimitedPacket(ctx, packet, acknowledgement); err != nil { + im.keeper.Logger(ctx).Error("Rate limit OnAcknowledgementPacket failed", "error", err) + } + + return im.app.OnAcknowledgementPacket(ctx, channelVersion, packet, acknowledgement, relayer) +} + +// OnTimeoutPacket implements the IBCMiddleware interface. +// Revert the outflow amount. Then, call underlying app's OnTimeoutPacket. +func (im *IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress) error { + if err := im.keeper.TimeoutRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("Rate limit OnTimeoutPacket failed", "error", err) + } + + return im.app.OnTimeoutPacket(ctx, channelVersion, packet, relayer) +} + +// SendPacket implements the ICS4 Wrapper interface. +// It calls the keeper's SendRateLimitedPacket function first to check the rate limit. +// If the packet is allowed, it then calls the underlying ICS4Wrapper SendPacket. +func (im *IBCMiddleware) SendPacket(ctx sdk.Context, sourcePort string, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) (uint64, error) { + err := im.keeper.SendRateLimitedPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 packet send was denied by rate limiter", "error", err) + return 0, err + } + + return im.keeper.SendPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface. +// It calls the underlying ICS4Wrapper. +func (im *IBCMiddleware) WriteAcknowledgement(ctx sdk.Context, packet ibcexported.PacketI, ack ibcexported.Acknowledgement) error { + return im.keeper.WriteAcknowledgement(ctx, packet, ack) +} + +// GetAppVersion implements the ICS4 Wrapper interface. +// It calls the underlying ICS4Wrapper. +func (im *IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return im.keeper.GetAppVersion(ctx, portID, channelID) +} + +// UnmarshalPacketData implements the PacketDataUnmarshaler interface. +// It defers to the underlying app to unmarshal the packet data. +func (im *IBCMiddleware) UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) { + return im.app.UnmarshalPacketData(ctx, portID, channelID, bz) +} + +func (im *IBCMiddleware) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } + im.keeper.SetICS4Wrapper(wrapper) +} + +func (im *IBCMiddleware) SetUnderlyingApplication(app porttypes.IBCModule) { + if im.app != nil { + panic("underlying application already set") + } + // the underlying application must implement the PacketUnmarshalerModule interface + pdApp, ok := app.(porttypes.PacketUnmarshalerModule) + if !ok { + panic(fmt.Errorf("underlying application must implement PacketUnmarshalerModule, got %T", app)) + } + im.app = pdApp +} diff --git a/modules/apps/rate-limiting/keeper/abci.go b/modules/apps/rate-limiting/keeper/abci.go new file mode 100644 index 00000000000..6be8f86478b --- /dev/null +++ b/modules/apps/rate-limiting/keeper/abci.go @@ -0,0 +1,26 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Before each hour epoch, check if any of the rate limits have expired, +// and reset them if they have +func (k *Keeper) BeginBlocker(ctx sdk.Context) { + epochStarting, epochNumber, err := k.CheckHourEpochStarting(ctx) + if err != nil { + k.Logger(ctx).Error("BeginBlocker", "error", err) + return + } + if !epochStarting { + return + } + for _, rateLimit := range k.GetAllRateLimits(ctx) { + if rateLimit.Quota.DurationHours == 0 || epochNumber%rateLimit.Quota.DurationHours != 0 { + continue + } + if err := k.ResetRateLimit(ctx, rateLimit.Path.Denom, rateLimit.Path.ChannelOrClientId); err != nil { + k.Logger(ctx).Error("Unable to reset quota", "Denom", rateLimit.Path.Denom, "ChannelOrClientId", rateLimit.Path.ChannelOrClientId, "error", err) + } + } +} diff --git a/modules/apps/rate-limiting/keeper/abci_test.go b/modules/apps/rate-limiting/keeper/abci_test.go new file mode 100644 index 00000000000..d97545d0673 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/abci_test.go @@ -0,0 +1,100 @@ +package keeper_test + +import ( + "fmt" + "time" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Store a rate limit with a non-zero flow for each duration +func (s *KeeperTestSuite) resetRateLimits(denom string, durations []uint64, nonZeroFlow int64) { + // Add/reset rate limit with a quota duration hours for each duration in the list + for i, duration := range durations { + channelID := fmt.Sprintf("channel-%d", i) + + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelOrClientId: channelID, + }, + Quota: &types.Quota{ + DurationHours: duration, + }, + Flow: &types.Flow{ + Inflow: sdkmath.NewInt(nonZeroFlow), + Outflow: sdkmath.NewInt(nonZeroFlow), + ChannelValue: sdkmath.NewInt(100), + }, + }) + } +} + +func (s *KeeperTestSuite) TestBeginBlocker_NoPanic() { + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), types.HourEpoch{ + Duration: 0, + }) + s.Require().NoError(err) + s.Require().NotPanics(func() { + s.chainA.GetSimApp().RateLimitKeeper.BeginBlocker(s.chainA.GetContext()) + }) +} + +func (s *KeeperTestSuite) TestBeginBlocker_ReturnsWhenEpochInPast() { + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), types.HourEpoch{ + Duration: time.Minute, + EpochStartTime: time.Now().Add(time.Hour * -1), + }) + s.Require().NoError(err) + s.Require().NotPanics(func() { + s.chainA.GetSimApp().RateLimitKeeper.BeginBlocker(s.chainA.GetContext()) + }) +} + +func (s *KeeperTestSuite) TestBeginBlocker() { + // We'll create three rate limits with different durations + // And then pass in epoch ids that will cause each to trigger a reset in order + // i.e. epochId 2 will only cause duration 2 to trigger (2 % 2 == 0; and 9 % 2 != 0; 25 % 2 != 0), + // epochId 9, will only cause duration 3 to trigger (9 % 2 != 0; and 9 % 3 == 0; 25 % 3 != 0) + // epochId 25, will only cause duration 5 to trigger (9 % 5 != 0; and 9 % 5 != 0; 25 % 5 == 0) + durations := []uint64{2, 3, 5} + epochIDs := []uint64{2, 9, 25} + nonZeroFlow := int64(10) + + blockTime := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + s.coordinator.SetTime(blockTime) + + for i, epochID := range epochIDs { + // First reset the rate limits to they have a non-zero flow + s.resetRateLimits(denom, durations, nonZeroFlow) + + duration := durations[i] + channelIDFromResetRateLimit := fmt.Sprintf("channel-%d", i) + + // Setup epochs so that the hook triggers + // (epoch start time + duration must be before block time) + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), types.HourEpoch{ + EpochNumber: epochID - 1, + Duration: time.Minute, + EpochStartTime: blockTime.Add(-2 * time.Minute), + }) + s.Require().NoError(err) + s.chainA.GetSimApp().RateLimitKeeper.BeginBlocker(s.chainA.GetContext()) + + // Check rate limits (only one rate limit should reset for each hook trigger) + rateLimits := s.chainA.GetSimApp().RateLimitKeeper.GetAllRateLimits(s.chainA.GetContext()) + for _, rateLimit := range rateLimits { + context := fmt.Sprintf("duration: %d, epoch: %d", duration, epochID) + + if rateLimit.Path.ChannelOrClientId == channelIDFromResetRateLimit { + s.Require().Equal(int64(0), rateLimit.Flow.Inflow.Int64(), "inflow was not reset to 0 - %s", context) + s.Require().Equal(int64(0), rateLimit.Flow.Outflow.Int64(), "outflow was not reset to 0 - %s", context) + } else { + s.Require().Equal(nonZeroFlow, rateLimit.Flow.Inflow.Int64(), "inflow should have been left unchanged - %s", context) + s.Require().Equal(nonZeroFlow, rateLimit.Flow.Outflow.Int64(), "outflow should have been left unchanged - %s", context) + } + } + } +} diff --git a/modules/apps/rate-limiting/keeper/blacklist.go b/modules/apps/rate-limiting/keeper/blacklist.go new file mode 100644 index 00000000000..ad64b9748b8 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/blacklist.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Adds a denom to a blacklist to prevent all IBC transfers with that denom +func (k *Keeper) AddDenomToBlacklist(ctx sdk.Context, denom string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + key := []byte(denom) + store.Set(key, []byte{1}) +} + +// Removes a denom from a blacklist to re-enable IBC transfers for that denom +func (k *Keeper) RemoveDenomFromBlacklist(ctx sdk.Context, denom string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + key := []byte(denom) + store.Delete(key) +} + +// Check if a denom is currently blacklisted +func (k *Keeper) IsDenomBlacklisted(ctx sdk.Context, denom string) bool { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + + key := []byte(denom) + value := store.Get(key) + found := len(value) != 0 + + return found +} + +// Get all the blacklisted denoms +func (k *Keeper) GetAllBlacklistedDenoms(ctx sdk.Context) []string { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.DenomBlacklistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allBlacklistedDenoms := []string{} + for ; iterator.Valid(); iterator.Next() { + allBlacklistedDenoms = append(allBlacklistedDenoms, string(iterator.Key())) + } + + return allBlacklistedDenoms +} diff --git a/modules/apps/rate-limiting/keeper/blacklist_test.go b/modules/apps/rate-limiting/keeper/blacklist_test.go new file mode 100644 index 00000000000..358c541b973 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/blacklist_test.go @@ -0,0 +1,47 @@ +package keeper_test + +import "slices" + +func (s *KeeperTestSuite) TestDenomBlacklist() { + allDenoms := []string{"denom1", "denom2", "denom3", "denom4"} + denomsToBlacklist := []string{"denom1", "denom3"} + + // No denoms are currently blacklisted + for _, denom := range allDenoms { + isBlacklisted := s.chainA.GetSimApp().RateLimitKeeper.IsDenomBlacklisted(s.chainA.GetContext(), denom) + s.Require().False(isBlacklisted, "%s should not be blacklisted yet", denom) + } + + // Blacklist two denoms + for _, denom := range denomsToBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), denom) + } + + // Confirm half the list was blacklisted and the others were not + for _, denom := range allDenoms { + isBlacklisted := s.chainA.GetSimApp().RateLimitKeeper.IsDenomBlacklisted(s.chainA.GetContext(), denom) + + if slices.Contains(denomsToBlacklist, denom) { + s.Require().True(isBlacklisted, "%s should have been blacklisted", denom) + continue + } + s.Require().False(isBlacklisted, "%s should not have been blacklisted", denom) + } + actualBlacklistedDenoms := s.chainA.GetSimApp().RateLimitKeeper.GetAllBlacklistedDenoms(s.chainA.GetContext()) + s.Require().Len(actualBlacklistedDenoms, len(denomsToBlacklist), "number of blacklisted denoms") + s.Require().ElementsMatch(denomsToBlacklist, actualBlacklistedDenoms, "list of blacklisted denoms") + + // Finally, remove denoms from blacklist and confirm they were removed + for _, denom := range denomsToBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveDenomFromBlacklist(s.chainA.GetContext(), denom) + } + for _, denom := range allDenoms { + isBlacklisted := s.chainA.GetSimApp().RateLimitKeeper.IsDenomBlacklisted(s.chainA.GetContext(), denom) + + if slices.Contains(denomsToBlacklist, denom) { + s.Require().False(isBlacklisted, "%s should have been removed from the blacklist", denom) + continue + } + s.Require().False(isBlacklisted, "%s should never have been blacklisted", denom) + } +} diff --git a/modules/apps/rate-limiting/keeper/epoch.go b/modules/apps/rate-limiting/keeper/epoch.go new file mode 100644 index 00000000000..297ec7eb1a6 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/epoch.go @@ -0,0 +1,72 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Stores the hour epoch +func (k *Keeper) SetHourEpoch(ctx sdk.Context, epoch types.HourEpoch) error { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + epochBz, err := k.cdc.Marshal(&epoch) + if err != nil { + return err + } + store.Set(types.HourEpochKey, epochBz) + return nil +} + +// Reads the hour epoch from the store +// Returns a zero-value epoch and logs an error if the epoch is not found or fails to unmarshal. +func (k *Keeper) GetHourEpoch(ctx sdk.Context) (types.HourEpoch, error) { + store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + + var epoch types.HourEpoch + epochBz := store.Get(types.HourEpochKey) + if len(epochBz) == 0 { + return types.HourEpoch{}, types.ErrEpochNotFound + } + + if err := k.cdc.Unmarshal(epochBz, &epoch); err != nil { + return types.HourEpoch{}, errorsmod.Wrapf(types.ErrUnmarshalEpoch, "error: %s", err.Error()) + } + + return epoch, nil +} + +// Checks if it's time to start the new hour epoch. +// This function returns epochStarting, epochNumber and a possible error. +func (k *Keeper) CheckHourEpochStarting(ctx sdk.Context) (bool, uint64, error) { + hourEpoch, err := k.GetHourEpoch(ctx) + if err != nil { + return false, 0, err + } + + // If GetHourEpoch returned a zero-value epoch (due to error or missing key), + // we cannot proceed with the check. + if hourEpoch.Duration == 0 || hourEpoch.EpochStartTime.IsZero() { + return false, 0, errorsmod.Wrapf(types.ErrInvalidEpoce, "cannot check hour epoch starting. epoch: %v", hourEpoch) + } + + // If the block time is later than the current epoch start time + epoch duration, + // move onto the next epoch by incrementing the epoch number, height, and start time + currentEpochEndTime := hourEpoch.EpochStartTime.Add(hourEpoch.Duration) + shouldNextEpochStart := ctx.BlockTime().After(currentEpochEndTime) + if shouldNextEpochStart { + hourEpoch.EpochNumber++ + hourEpoch.EpochStartTime = currentEpochEndTime + hourEpoch.EpochStartHeight = ctx.BlockHeight() + + if err := k.SetHourEpoch(ctx, hourEpoch); err != nil { + return false, 0, err + } + return true, hourEpoch.EpochNumber, nil + } + + // Otherwise, indicate that a new epoch is not starting + return false, 0, nil +} diff --git a/modules/apps/rate-limiting/keeper/epoch_test.go b/modules/apps/rate-limiting/keeper/epoch_test.go new file mode 100644 index 00000000000..a648adff685 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/epoch_test.go @@ -0,0 +1,131 @@ +package keeper_test + +import ( + "time" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Tests Get/Set Hour epoch +func (s *KeeperTestSuite) TestHourEpoch() { + expectedHourEpoch := types.HourEpoch{ + Duration: time.Hour, + EpochNumber: 1, + EpochStartTime: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + EpochStartHeight: 10, + } + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), expectedHourEpoch) + s.Require().NoError(err) + + actualHourEpoch, err := s.chainA.GetSimApp().RateLimitKeeper.GetHourEpoch(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Equal(expectedHourEpoch, actualHourEpoch, "hour epoch") +} + +func (s *KeeperTestSuite) TestCheckHourEpochStarting() { + epochStartTime := time.Date(2024, 1, 1, 0, 0, 1, 0, time.UTC) + blockHeight := int64(10) + duration := time.Minute + + initialEpoch := types.HourEpoch{ + EpochNumber: 10, + EpochStartTime: epochStartTime, + Duration: duration, + } + nextEpoch := types.HourEpoch{ + EpochNumber: initialEpoch.EpochNumber + 1, // epoch number increments + EpochStartTime: epochStartTime.Add(duration), // start time increments by duration + EpochStartHeight: blockHeight, // height gets current block height + Duration: duration, + } + + testCases := []struct { + name string + blockTime time.Time + expectedEpochStarting bool + initialEpoch types.HourEpoch + err error + }{ + { + name: "in middle of epoch", + blockTime: epochStartTime.Add(duration / 2), // halfway through epoch + expectedEpochStarting: false, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "right before epoch boundary", + blockTime: epochStartTime.Add(duration).Add(-1 * time.Second), // 1 second before epoch + expectedEpochStarting: false, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "at epoch boundary", + blockTime: epochStartTime.Add(duration), // at epoch boundary + expectedEpochStarting: false, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "right after epoch boundary", + blockTime: epochStartTime.Add(duration).Add(time.Second), // one second after epoch boundary + expectedEpochStarting: true, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "in middle of next epoch", + blockTime: epochStartTime.Add(duration).Add(duration / 2), // halfway through next epoch + expectedEpochStarting: true, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "next epoch skipped", + blockTime: epochStartTime.Add(duration * 10), // way after next epoch (still increments only once) + expectedEpochStarting: true, + initialEpoch: initialEpoch, + err: nil, + }, + { + name: "error - invalid epoch", + blockTime: epochStartTime.Add(duration * 10), // way after next epoch (still increments only once) + expectedEpochStarting: true, + initialEpoch: types.HourEpoch{ + Duration: 0, + }, + err: types.ErrInvalidEpoce, + }, + } + + // Set the block height to blockHeight + s.coordinator.CommitNBlocks(s.chainA, uint64(blockHeight)-uint64(s.chainA.App.LastBlockHeight()+1)) + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.coordinator.SetTime(tc.blockTime) + + err := s.chainA.GetSimApp().RateLimitKeeper.SetHourEpoch(s.chainA.GetContext(), tc.initialEpoch) + s.Require().NoError(err) + + actualStarting, actualEpochNumber, err := s.chainA.GetSimApp().RateLimitKeeper.CheckHourEpochStarting(s.chainA.GetContext()) + if tc.err != nil { + s.Require().ErrorIs(err, tc.err) + return + } + s.Require().NoError(err) + s.Require().Equal(tc.expectedEpochStarting, actualStarting, "epoch starting") + + expectedEpoch := tc.initialEpoch + if tc.expectedEpochStarting { + expectedEpoch = nextEpoch + s.Require().Equal(expectedEpoch.EpochNumber, actualEpochNumber, "epoch number") + } + + actualHourEpoch, err := s.chainA.GetSimApp().RateLimitKeeper.GetHourEpoch(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Equal(expectedEpoch, actualHourEpoch, "hour epoch") + }) + } +} diff --git a/modules/apps/rate-limiting/keeper/events.go b/modules/apps/rate-limiting/keeper/events.go new file mode 100644 index 00000000000..f37b0356a5f --- /dev/null +++ b/modules/apps/rate-limiting/keeper/events.go @@ -0,0 +1,27 @@ +package keeper + +import ( + "strings" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// If the rate limit is exceeded or the denom is blacklisted, we emit an event +func EmitTransferDeniedEvent(ctx sdk.Context, reason, denom, channelOrClientID string, direction types.PacketDirection, amount sdkmath.Int, err error) { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTransferDenied, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReason, reason), + sdk.NewAttribute(types.AttributeKeyAction, strings.ToLower(direction.String())), // packet_send or packet_recv + sdk.NewAttribute(types.AttributeKeyDenom, denom), + sdk.NewAttribute(types.AttributeKeyChannelOrClient, channelOrClientID), + sdk.NewAttribute(types.AttributeKeyAmount, amount.String()), + sdk.NewAttribute(types.AttributeKeyError, err.Error()), + ), + ) +} diff --git a/modules/apps/rate-limiting/keeper/flow.go b/modules/apps/rate-limiting/keeper/flow.go new file mode 100644 index 00000000000..3d6bc5f92bf --- /dev/null +++ b/modules/apps/rate-limiting/keeper/flow.go @@ -0,0 +1,74 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// The total value on a given path (aka, the denominator in the percentage calculation) +// is the total supply of the given denom +func (k *Keeper) GetChannelValue(ctx sdk.Context, denom string) sdkmath.Int { + return k.bankKeeper.GetSupply(ctx, denom).Amount +} + +// CheckRateLimitAndUpdateFlow checks whether the given packet will exceed the rate limit. +// Called by OnRecvPacket and OnSendPacket +func (k *Keeper) CheckRateLimitAndUpdateFlow(ctx sdk.Context, direction types.PacketDirection, packetInfo RateLimitedPacketInfo) (bool, error) { + denom := packetInfo.Denom + channelOrClientID := packetInfo.ChannelID + amount := packetInfo.Amount + + // First check if the denom is blacklisted + if k.IsDenomBlacklisted(ctx, denom) { + err := errorsmod.Wrapf(types.ErrDenomIsBlacklisted, "denom %s is blacklisted", denom) + EmitTransferDeniedEvent(ctx, types.EventBlacklistedDenom, denom, channelOrClientID, direction, amount, err) + return false, err + } + + // If there's no rate limit yet for this denom, no action is necessary + rateLimit, found := k.GetRateLimit(ctx, denom, channelOrClientID) + if !found { + return false, nil + } + + // Check if the sender/receiver pair is whitelisted + // If so, return a success without modifying the quota + if k.IsAddressPairWhitelisted(ctx, packetInfo.Sender, packetInfo.Receiver) { + return false, nil + } + + // Update the flow object with the change in amount + if err := rateLimit.UpdateFlow(direction, amount); err != nil { + // If the rate limit was exceeded, emit an event + EmitTransferDeniedEvent(ctx, types.EventRateLimitExceeded, denom, channelOrClientID, direction, amount, err) + return false, err + } + + // If there's no quota error, update the rate limit object in the store with the new flow + k.SetRateLimit(ctx, rateLimit) + + return true, nil +} + +// If a SendPacket fails or times out, undo the outflow increment that happened during the send +func (k *Keeper) UndoSendPacket(ctx sdk.Context, channelOrClientID string, sequence uint64, denom string, amount sdkmath.Int) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelOrClientID) + if !found { + return nil + } + + // If the packet was sent during this quota, decrement the outflow + // Otherwise, it can be ignored + if k.CheckPacketSentDuringCurrentQuota(ctx, channelOrClientID, sequence) { + rateLimit.Flow.Outflow = rateLimit.Flow.Outflow.Sub(amount) + k.SetRateLimit(ctx, rateLimit) + + k.RemovePendingSendPacket(ctx, channelOrClientID, sequence) + } + + return nil +} diff --git a/modules/apps/rate-limiting/keeper/flow_test.go b/modules/apps/rate-limiting/keeper/flow_test.go new file mode 100644 index 00000000000..26d22f4f80e --- /dev/null +++ b/modules/apps/rate-limiting/keeper/flow_test.go @@ -0,0 +1,449 @@ +package keeper_test + +import ( + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +type action struct { + direction types.PacketDirection + amount int64 + addToBlacklist bool + removeFromBlacklist bool + addToWhitelist bool + removeFromWhitelist bool + skipFlowUpdate bool + expectedError string +} + +type checkRateLimitTestCase struct { + name string + actions []action +} + +func (s *KeeperTestSuite) TestGetChannelValue() { + supply := sdkmath.NewInt(100) + + // Mint coins to increase the supply, which will increase the channel value + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(denom, supply))) + s.Require().NoError(err) + + expected := supply + actual := s.chainA.GetSimApp().RateLimitKeeper.GetChannelValue(s.chainA.GetContext(), denom) + s.Require().Equal(expected, actual) +} + +// Adds a rate limit object to the store in preparation for the check rate limit tests +func (s *KeeperTestSuite) SetupCheckRateLimitAndUpdateFlowTest() { + channelValue := sdkmath.NewInt(100) + maxPercentSend := sdkmath.NewInt(10) + maxPercentRecv := sdkmath.NewInt(10) + + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelOrClientId: channelID, + }, + Quota: &types.Quota{ + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: 1, + }, + Flow: &types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + }, + }) + + s.chainA.GetSimApp().RateLimitKeeper.RemoveDenomFromBlacklist(s.chainA.GetContext(), denom) + s.chainA.GetSimApp().RateLimitKeeper.RemoveWhitelistedAddressPair(s.chainA.GetContext(), sender, receiver) +} + +// Helper function to check the rate limit across a series of transfers +func (s *KeeperTestSuite) processCheckRateLimitAndUpdateFlowTestCase(tc checkRateLimitTestCase) { + s.SetupCheckRateLimitAndUpdateFlowTest() + + expectedInflow := sdkmath.NewInt(0) + expectedOutflow := sdkmath.NewInt(0) + for i, action := range tc.actions { + if action.addToBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), denom) + continue + } + + if action.removeFromBlacklist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveDenomFromBlacklist(s.chainA.GetContext(), denom) + continue + } + + if action.addToWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), types.WhitelistedAddressPair{ + Sender: sender, + Receiver: receiver, + }) + continue + } + + if action.removeFromWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveWhitelistedAddressPair(s.chainA.GetContext(), sender, receiver) + continue + } + + amount := sdkmath.NewInt(action.amount) + packetInfo := keeper.RateLimitedPacketInfo{ + ChannelID: channelID, + Denom: denom, + Amount: amount, + Sender: sender, + Receiver: receiver, + } + updatedFlow, err := s.chainA.GetSimApp().RateLimitKeeper.CheckRateLimitAndUpdateFlow(s.chainA.GetContext(), action.direction, packetInfo) + + // Each action optionally errors or skips a flow update + if action.expectedError != "" { + s.Require().ErrorContains(err, action.expectedError, tc.name+" - action: #%d - error", i) + } else { + s.Require().NoError(err, tc.name+" - action: #%d - no error", i) + + expectedUpdateFlow := !action.skipFlowUpdate + s.Require().Equal(expectedUpdateFlow, updatedFlow, tc.name+" - action: #%d - updated flow", i) + + if expectedUpdateFlow { + if action.direction == types.PACKET_RECV { + expectedInflow = expectedInflow.Add(amount) + } else { + expectedOutflow = expectedOutflow.Add(amount) + } + } + } + + // Confirm flow is updated properly (or left as is if the theshold was exceeded) + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(expectedInflow.Int64(), rateLimit.Flow.Inflow.Int64(), tc.name+" - action: #%d - inflow", i) + s.Require().Equal(expectedOutflow.Int64(), rateLimit.Flow.Outflow.Int64(), tc.name+" - action: #%d - outflow", i) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdateFlow_UnidirectionalFlow() { + testCases := []checkRateLimitTestCase{ + { + name: "send_under_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 5}, + {direction: types.PACKET_SEND, amount: 5}, + }, + }, + { + name: "send_over_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 5}, + { + direction: types.PACKET_SEND, amount: 6, + expectedError: "Outflow exceeds quota", + }, + }, + }, + { + name: "recv_under_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 5}, + {direction: types.PACKET_RECV, amount: 5}, + }, + }, + { + name: "recv_over_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 5}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: "Inflow exceeds quota", + }, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_BidirectionalFlow() { + testCases := []checkRateLimitTestCase{ + { + name: "send_then_recv_under_threshold", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + }, + }, + { + name: "recv_then_send_under_threshold", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + }, + }, + { + name: "send_then_recv_over_inflow", + actions: []action{ + {direction: types.PACKET_SEND, amount: 2}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 2}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 2}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: "Inflow exceeds quota", + }, + }, + }, + { + name: "send_then_recv_over_outflow", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_SEND, amount: 1, expectedError: "Outflow exceeds quota"}, + }, + }, + { + name: "recv_then_send_over_inflow", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 2}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_RECV, amount: 1, expectedError: "Inflow exceeds quota"}, + }, + }, + { + name: "recv_then_send_over_outflow", + actions: []action{ + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 2}, + {direction: types.PACKET_SEND, amount: 6, expectedError: "Outflow exceeds quota"}, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_DenomBlacklist() { + testCases := []checkRateLimitTestCase{ + { + name: "add_then_remove_from_blacklist", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + {removeFromBlacklist: true}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + }, + }, + { + name: "send_recv_blacklist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_SEND, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + { + name: "send_recv_blacklist_recv", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + { + name: "recv_send_blacklist_send", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_SEND, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + { + name: "recv_send_blacklist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToBlacklist: true}, + { + direction: types.PACKET_RECV, amount: 6, + expectedError: types.ErrDenomIsBlacklisted.Error(), + }, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestCheckRateLimitAndUpdatedFlow_AddressWhitelist() { + testCases := []checkRateLimitTestCase{ + { + name: "send_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "recv_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "send_send_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_SEND, amount: 6, expectedError: "Outflow exceeds quota"}, + {addToWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "recv_recv_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_RECV, amount: 6, expectedError: "Inflow exceeds quota"}, + {addToWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "send_recv_send_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "recv_send_recv_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {direction: types.PACKET_SEND, amount: 6}, + {direction: types.PACKET_RECV, amount: 6}, + {addToWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, skipFlowUpdate: true}, + }, + }, + { + name: "add_then_remove_whitelist_recv", + actions: []action{ + {direction: types.PACKET_RECV, amount: 6}, + {addToWhitelist: true}, + {removeFromWhitelist: true}, + {direction: types.PACKET_RECV, amount: 6, expectedError: "Inflow exceeds quota"}, + }, + }, + { + name: "add_then_remove_whitelist_send", + actions: []action{ + {direction: types.PACKET_SEND, amount: 6}, + {addToWhitelist: true}, + {removeFromWhitelist: true}, + {direction: types.PACKET_SEND, amount: 6, expectedError: "Outflow exceeds quota"}, + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.processCheckRateLimitAndUpdateFlowTestCase(tc) + }) + } +} + +func (s *KeeperTestSuite) TestUndoSendPacket() { + // Helper function to check the rate limit outflow amount + checkOutflow := func(channelId, denom string, expectedAmount sdkmath.Int) { + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelId) + s.Require().True(found, "rate limit should have been found") + s.Require().Equal(expectedAmount.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow - channel: %s, denom: %s", channelId, denom) + } + + // Create two rate limits + initialOutflow := sdkmath.NewInt(100) + packetSendAmount := sdkmath.NewInt(10) + rateLimit1 := types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + Flow: &types.Flow{Outflow: initialOutflow}, + } + rateLimit2 := types.RateLimit{ + Path: &types.Path{Denom: "different-denom", ChannelOrClientId: "different-channel"}, + Flow: &types.Flow{Outflow: initialOutflow}, + } + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit1) + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit2) + + // Store a pending packet sequence number of 2 for the first rate limit + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), channelID, 2) + + // Undo a send of 10 from the first rate limit, with sequence 1 + // If should NOT modify the outflow since sequence 1 was not sent in the current quota + err := s.chainA.GetSimApp().RateLimitKeeper.UndoSendPacket(s.chainA.GetContext(), channelID, 1, denom, packetSendAmount) + s.Require().NoError(err, "no error expected when undoing send packet sequence 1") + + checkOutflow(channelID, denom, initialOutflow) + + // Now undo a send from the same rate limit with sequence 2 + // If should decrement the outflow since 2 is in the current quota + err = s.chainA.GetSimApp().RateLimitKeeper.UndoSendPacket(s.chainA.GetContext(), channelID, 2, denom, packetSendAmount) + s.Require().NoError(err, "no error expected when undoing send packet sequence 2") + + checkOutflow(channelID, denom, initialOutflow.Sub(packetSendAmount)) + + // Confirm the outflow of the second rate limit has not been touched + checkOutflow("different-channel", "different-denom", initialOutflow) + + // Confirm sequence number was removed + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, 2) + s.Require().False(found, "packet sequence number should have been removed") +} diff --git a/modules/apps/rate-limiting/keeper/genesis.go b/modules/apps/rate-limiting/keeper/genesis.go new file mode 100644 index 00000000000..6c03662a4af --- /dev/null +++ b/modules/apps/rate-limiting/keeper/genesis.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// InitGenesis initializes the rate-limiting module's state from a provided genesis state. +func (k *Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { + // Set rate limits, blacklists, and whitelists + for _, rateLimit := range state.RateLimits { + k.SetRateLimit(ctx, rateLimit) + } + for _, denom := range state.BlacklistedDenoms { + k.AddDenomToBlacklist(ctx, denom) + } + for _, addressPair := range state.WhitelistedAddressPairs { + k.SetWhitelistedAddressPair(ctx, addressPair) + } + + // Set pending sequence numbers - validating that they're in right format of {channelId}/{sequenceNumber} + for _, pendingPacketID := range state.PendingSendPacketSequenceNumbers { + channelOrClientID, sequence, err := types.ParsePendingPacketID(pendingPacketID) + if err != nil { + panic(err.Error()) + } + k.SetPendingSendPacket(ctx, channelOrClientID, sequence) + } + + // If the hour epoch has been initialized already (epoch number != 0), validate and then use it + if state.HourEpoch.EpochNumber > 0 { + if err := k.SetHourEpoch(ctx, state.HourEpoch); err != nil { + panic(err) + } + } else { + // If the hour epoch has not been initialized yet, set it so that the epoch number matches + // the current hour and the start time is precisely on the hour + state.HourEpoch.EpochNumber = uint64(ctx.BlockTime().Hour()) //nolint:gosec + state.HourEpoch.EpochStartTime = ctx.BlockTime().Truncate(time.Hour) + state.HourEpoch.EpochStartHeight = ctx.BlockHeight() + if err := k.SetHourEpoch(ctx, state.HourEpoch); err != nil { + panic(err) + } + } +} + +// ExportGenesis returns the rate-limiting module's exported genesis. +func (k *Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + rateLimits := k.GetAllRateLimits(ctx) + hourEpoch, err := k.GetHourEpoch(ctx) + if err != nil { + panic(err) + } + + return &types.GenesisState{ + RateLimits: rateLimits, + BlacklistedDenoms: k.GetAllBlacklistedDenoms(ctx), + WhitelistedAddressPairs: k.GetAllWhitelistedAddressPairs(ctx), + PendingSendPacketSequenceNumbers: k.GetAllPendingSendPackets(ctx), + HourEpoch: hourEpoch, + } +} diff --git a/modules/apps/rate-limiting/keeper/genesis_test.go b/modules/apps/rate-limiting/keeper/genesis_test.go new file mode 100644 index 00000000000..162c5057f77 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/genesis_test.go @@ -0,0 +1,110 @@ +package keeper_test + +import ( + "strconv" + "time" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func createRateLimits() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := int64(1); i <= 3; i++ { + suffix := strconv.Itoa(int(i)) + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom-" + suffix, ChannelOrClientId: "channel-" + suffix}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(i), MaxPercentRecv: sdkmath.NewInt(i), DurationHours: uint64(i)}, //nolint:gosec + Flow: &types.Flow{Inflow: sdkmath.NewInt(i), Outflow: sdkmath.NewInt(i), ChannelValue: sdkmath.NewInt(i)}, + } + + rateLimits = append(rateLimits, rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestGenesis() { + currentHour := 13 + blockTime := time.Date(2024, 1, 1, currentHour, 55, 8, 0, time.UTC) // 13:55:08 + blockHeight := int64(10) + + testCases := []struct { + name string + genesisState types.GenesisState + firstEpoch bool + panicError string + }{ + { + name: "valid default state", + genesisState: *types.DefaultGenesis(), + firstEpoch: true, + }, + { + name: "valid custom state", + genesisState: types.GenesisState{ + RateLimits: createRateLimits(), + WhitelistedAddressPairs: []types.WhitelistedAddressPair{ + {Sender: "senderA", Receiver: "receiverA"}, + {Sender: "senderB", Receiver: "receiverB"}, + }, + BlacklistedDenoms: []string{"denomA", "denomB"}, + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2/3"}, + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartTime: blockTime, + Duration: time.Minute, + EpochStartHeight: 1, + }, + }, + firstEpoch: false, + }, + { + name: "invalid packet sequence - wrong delimiter", + genesisState: types.GenesisState{ + RateLimits: createRateLimits(), + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2|3"}, + }, + panicError: "invalid pending send packet (channel-2|3), must be of form: {channelId}/{sequenceNumber}", + }, + } + + // Establish base height and time before the loop + s.coordinator.CommitNBlocks(s.chainA, uint64(blockHeight-s.chainA.App.LastBlockHeight()+1)) + s.coordinator.SetTime(blockTime) + + for _, tc := range testCases { + s.Run(tc.name, func() { + if tc.panicError != "" { + s.Require().PanicsWithValue(tc.panicError, func() { + s.chainA.GetSimApp().RateLimitKeeper.InitGenesis(s.chainA.GetContext(), tc.genesisState) + }) + return + } + s.chainA.GetSimApp().RateLimitKeeper.InitGenesis(s.chainA.GetContext(), tc.genesisState) + + // If the hour epoch was not initialized in the raw genState, + // it will be initialized during InitGenesis + expectedGenesis := tc.genesisState + + // For the default genesis with firstEpoch=true, InitGenesis will set the HourEpoch fields + // based on the current block time and height + if tc.firstEpoch { + // Get the context to retrieve current height + ctx := s.chainA.GetContext() + + // For a new epoch, InitGenesis will: + // - Set EpochNumber to current hour (13 from blockTime) + // - Set EpochStartTime to the truncated hour (13:00:00) + // - Set EpochStartHeight to current block height + expectedGenesis.HourEpoch.EpochNumber = uint64(blockTime.Hour()) + expectedGenesis.HourEpoch.EpochStartTime = blockTime.Truncate(time.Hour) + expectedGenesis.HourEpoch.EpochStartHeight = ctx.BlockHeight() + } + + // Check that the exported state matches the imported state + exportedState := s.chainA.GetSimApp().RateLimitKeeper.ExportGenesis(s.chainA.GetContext()) + s.Require().Equal(expectedGenesis, *exportedState, "exported genesis state") + }) + } +} diff --git a/modules/apps/rate-limiting/keeper/grpc_query.go b/modules/apps/rate-limiting/keeper/grpc_query.go new file mode 100644 index 00000000000..123f527beca --- /dev/null +++ b/modules/apps/rate-limiting/keeper/grpc_query.go @@ -0,0 +1,135 @@ +package keeper + +import ( + "context" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + "github.com/cosmos/ibc-go/v10/modules/core/exported" + tmclient "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" +) + +var _ types.QueryServer = Querier{} + +type Querier struct { + k *Keeper +} + +func NewQuerier(keeper *Keeper) Querier { + return Querier{k: keeper} +} + +// Query all rate limits +func (k Querier) AllRateLimits(c context.Context, req *types.QueryAllRateLimitsRequest) (*types.QueryAllRateLimitsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := k.k.GetAllRateLimits(ctx) + return &types.QueryAllRateLimitsResponse{RateLimits: rateLimits}, nil +} + +// Query a rate limit by denom and channelId +func (k Querier) RateLimit(c context.Context, req *types.QueryRateLimitRequest) (*types.QueryRateLimitResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimit, found := k.k.GetRateLimit(ctx, req.Denom, req.ChannelOrClientId) + if !found { + return &types.QueryRateLimitResponse{}, nil + } + return &types.QueryRateLimitResponse{RateLimit: &rateLimit}, nil +} + +// Query all rate limits for a given chain +func (k Querier) RateLimitsByChainID(c context.Context, req *types.QueryRateLimitsByChainIDRequest) (*types.QueryRateLimitsByChainIDResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := make([]types.RateLimit, 0) + for _, rateLimit := range k.k.GetAllRateLimits(ctx) { + // Determine the client state from the channel Id + _, clientState, err := k.k.channelKeeper.GetChannelClientState(ctx, transfertypes.PortID, rateLimit.Path.ChannelOrClientId) + if err != nil { + var ok bool + clientState, ok = k.k.clientKeeper.GetClientState(ctx, rateLimit.Path.ChannelOrClientId) + if !ok { + return &types.QueryRateLimitsByChainIDResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Unable to fetch client state from channel or client Id %s", rateLimit.Path.ChannelOrClientId) + } + } + + // Check if the client state is a tendermint client + if clientState.ClientType() != exported.Tendermint { + continue + } + + // Type assert to tendermint client state + tmClientState, ok := clientState.(*tmclient.ClientState) + if !ok { + // This should never happen if ClientType() == Tendermint, but check anyway + continue + } + + // If the chain ID matches, add the rate limit to the returned list + if tmClientState.GetChainID() == req.ChainId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChainIDResponse{RateLimits: rateLimits}, nil +} + +// Query all rate limits for a given channel +func (k Querier) RateLimitsByChannelOrClientID(c context.Context, req *types.QueryRateLimitsByChannelOrClientIDRequest) (*types.QueryRateLimitsByChannelOrClientIDResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + rateLimits := make([]types.RateLimit, 0) + for _, rateLimit := range k.k.GetAllRateLimits(ctx) { + if rateLimit.Path.ChannelOrClientId == req.ChannelOrClientId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChannelOrClientIDResponse{RateLimits: rateLimits}, nil +} + +// Query all blacklisted denoms +func (k Querier) AllBlacklistedDenoms(c context.Context, req *types.QueryAllBlacklistedDenomsRequest) (*types.QueryAllBlacklistedDenomsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + blacklistedDenoms := k.k.GetAllBlacklistedDenoms(ctx) + return &types.QueryAllBlacklistedDenomsResponse{Denoms: blacklistedDenoms}, nil +} + +// Query all whitelisted addresses +func (k Querier) AllWhitelistedAddresses(c context.Context, req *types.QueryAllWhitelistedAddressesRequest) (*types.QueryAllWhitelistedAddressesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + whitelistedAddresses := k.k.GetAllWhitelistedAddressPairs(ctx) + return &types.QueryAllWhitelistedAddressesResponse{AddressPairs: whitelistedAddresses}, nil +} diff --git a/modules/apps/rate-limiting/keeper/grpc_query_test.go b/modules/apps/rate-limiting/keeper/grpc_query_test.go new file mode 100644 index 00000000000..8d24614a76b --- /dev/null +++ b/modules/apps/rate-limiting/keeper/grpc_query_test.go @@ -0,0 +1,125 @@ +package keeper_test + +import ( + "fmt" + "time" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + connectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + ibctmtypes "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" +) + +// Add three rate limits on different channels +// Each should have a different chainId +func (s *KeeperTestSuite) setupQueryRateLimitTests() []types.RateLimit { + s.T().Helper() + + rateLimits := []types.RateLimit{} + for i := range int64(3) { + clientID := fmt.Sprintf("07-tendermint-%d", i) + chainID := fmt.Sprintf("chain-%d", i) + connectionID := fmt.Sprintf("connection-%d", i) + channelID := fmt.Sprintf("channel-%d", i) + + // First register the client, connection, and channel (so we can map back to chainId) + // Nothing in the client state matters besides the chainId + clientState := ibctmtypes.NewClientState(chainID, ibctmtypes.Fraction{}, time.Duration(0), time.Duration(0), time.Duration(0), clienttypes.Height{}, nil, nil) + connection := connectiontypes.ConnectionEnd{ClientId: clientID} + channel := channeltypes.Channel{ConnectionHops: []string{connectionID}} + + s.chainA.GetSimApp().IBCKeeper.ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) + s.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.SetConnection(s.chainA.GetContext(), connectionID, connection) + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), transfertypes.PortID, channelID, channel) + + // Then add the rate limit + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom", ChannelOrClientId: channelID}, + } + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit) + rateLimits = append(rateLimits, rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestQueryAllRateLimits() { + querier := keeper.NewQuerier(s.chainA.GetSimApp().RateLimitKeeper) + expectedRateLimits := s.setupQueryRateLimitTests() + queryResponse, err := querier.AllRateLimits(s.chainA.GetContext(), &types.QueryAllRateLimitsRequest{}) + s.Require().NoError(err) + s.Require().ElementsMatch(expectedRateLimits, queryResponse.RateLimits) +} + +func (s *KeeperTestSuite) TestQueryRateLimit() { + querier := keeper.NewQuerier(s.chainA.GetSimApp().RateLimitKeeper) + allRateLimits := s.setupQueryRateLimitTests() + for _, expectedRateLimit := range allRateLimits { + queryResponse, err := querier.RateLimit(s.chainA.GetContext(), &types.QueryRateLimitRequest{ + Denom: expectedRateLimit.Path.Denom, + ChannelOrClientId: expectedRateLimit.Path.ChannelOrClientId, + }) + s.Require().NoError(err, "no error expected when querying rate limit on channel: %s", expectedRateLimit.Path.ChannelOrClientId) + s.Require().Equal(expectedRateLimit, *queryResponse.RateLimit) + } +} + +func (s *KeeperTestSuite) TestQueryRateLimitsByChainId() { + querier := keeper.NewQuerier(s.chainA.GetSimApp().RateLimitKeeper) + allRateLimits := s.setupQueryRateLimitTests() + for i, expectedRateLimit := range allRateLimits { + chainID := fmt.Sprintf("chain-%d", i) + queryResponse, err := querier.RateLimitsByChainID(s.chainA.GetContext(), &types.QueryRateLimitsByChainIDRequest{ + ChainId: chainID, + }) + s.Require().NoError(err, "no error expected when querying rate limit on chain: %s", chainID) + s.Require().Len(queryResponse.RateLimits, 1) + s.Require().Equal(expectedRateLimit, queryResponse.RateLimits[0]) + } +} + +func (s *KeeperTestSuite) TestQueryRateLimitsByChannelOrClientId() { + querier := keeper.NewQuerier(s.chainA.GetSimApp().RateLimitKeeper) + allRateLimits := s.setupQueryRateLimitTests() + for i, expectedRateLimit := range allRateLimits { + channelID := fmt.Sprintf("channel-%d", i) + queryResponse, err := querier.RateLimitsByChannelOrClientID(s.chainA.GetContext(), &types.QueryRateLimitsByChannelOrClientIDRequest{ + ChannelOrClientId: channelID, + }) + s.Require().NoError(err, "no error expected when querying rate limit on channel: %s", channelID) + s.Require().Len(queryResponse.RateLimits, 1) + s.Require().Equal(expectedRateLimit, queryResponse.RateLimits[0]) + } +} + +func (s *KeeperTestSuite) TestQueryAllBlacklistedDenoms() { + querier := keeper.NewQuerier(s.chainA.GetSimApp().RateLimitKeeper) + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), "denom-A") + s.chainA.GetSimApp().RateLimitKeeper.AddDenomToBlacklist(s.chainA.GetContext(), "denom-B") + + queryResponse, err := querier.AllBlacklistedDenoms(s.chainA.GetContext(), &types.QueryAllBlacklistedDenomsRequest{}) + s.Require().NoError(err, "no error expected when querying blacklisted denoms") + s.Require().Equal([]string{"denom-A", "denom-B"}, queryResponse.Denoms) +} + +func (s *KeeperTestSuite) TestQueryAllWhitelistedAddresses() { + querier := keeper.NewQuerier(s.chainA.GetSimApp().RateLimitKeeper) + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), types.WhitelistedAddressPair{ + Sender: "address-A", + Receiver: "address-B", + }) + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), types.WhitelistedAddressPair{ + Sender: "address-C", + Receiver: "address-D", + }) + queryResponse, err := querier.AllWhitelistedAddresses(s.chainA.GetContext(), &types.QueryAllWhitelistedAddressesRequest{}) + s.Require().NoError(err, "no error expected when querying whitelisted addresses") + + expectedWhitelist := []types.WhitelistedAddressPair{ + {Sender: "address-A", Receiver: "address-B"}, + {Sender: "address-C", Receiver: "address-D"}, + } + s.Require().Equal(expectedWhitelist, queryResponse.AddressPairs) +} diff --git a/modules/apps/rate-limiting/keeper/ics4.go b/modules/apps/rate-limiting/keeper/ics4.go new file mode 100644 index 00000000000..4930be7916d --- /dev/null +++ b/modules/apps/rate-limiting/keeper/ics4.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +func (k *Keeper) SendPacket(ctx sdk.Context, sourcePort string, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) (uint64, error) { + return k.ics4Wrapper.SendPacket(ctx, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) +} + +func (k *Keeper) WriteAcknowledgement(ctx sdk.Context, packet exported.PacketI, ack exported.Acknowledgement) error { + return k.ics4Wrapper.WriteAcknowledgement(ctx, packet, ack) +} + +func (k *Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) +} diff --git a/modules/apps/rate-limiting/keeper/keeper.go b/modules/apps/rate-limiting/keeper/keeper.go new file mode 100644 index 00000000000..4f8313c5e5d --- /dev/null +++ b/modules/apps/rate-limiting/keeper/keeper.go @@ -0,0 +1,73 @@ +package keeper + +import ( + "errors" + "fmt" + "strings" + + "cosmossdk.io/core/address" + corestore "cosmossdk.io/core/store" + "cosmossdk.io/log" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" +) + +// Keeper maintains the link to storage and exposes getter/setter methods for the various parts of the state machine +type Keeper struct { + storeService corestore.KVStoreService + cdc codec.BinaryCodec + addressCodec address.Codec + + ics4Wrapper porttypes.ICS4Wrapper + channelKeeper types.ChannelKeeper + clientKeeper types.ClientKeeper + + bankKeeper types.BankKeeper + authority string +} + +// NewKeeper creates a new rate-limiting Keeper instance +func NewKeeper(cdc codec.BinaryCodec, addressCodec address.Codec, storeService corestore.KVStoreService, channelKeeper types.ChannelKeeper, clientKeeper types.ClientKeeper, bankKeeper types.BankKeeper, authority string) *Keeper { + if strings.TrimSpace(authority) == "" { + panic(errors.New("authority must be non-empty")) + } + + return &Keeper{ + cdc: cdc, + addressCodec: addressCodec, + storeService: storeService, + // Defaults to using the channel keeper as the ICS4Wrapper + // This can be overridden later with WithICS4Wrapper (e.g. by the middleware stack wiring) + ics4Wrapper: channelKeeper, + channelKeeper: channelKeeper, + clientKeeper: clientKeeper, + bankKeeper: bankKeeper, + authority: authority, + } +} + +// SetICS4Wrapper sets the ICS4Wrapper. +// It is used after the middleware is created since the keeper needs the underlying module's SendPacket capability, +// creating a dependency cycle. +func (k *Keeper) SetICS4Wrapper(ics4Wrapper porttypes.ICS4Wrapper) { + k.ics4Wrapper = ics4Wrapper +} + +// ICS4Wrapper returns the ICS4Wrapper to send packets downstream. +func (k *Keeper) ICS4Wrapper() porttypes.ICS4Wrapper { + return k.ics4Wrapper +} + +// GetAuthority returns the module's authority. +func (k *Keeper) GetAuthority() string { + return k.authority +} + +// Logger returns a module-specific logger. +func (*Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} diff --git a/modules/apps/rate-limiting/keeper/keeper_test.go b/modules/apps/rate-limiting/keeper/keeper_test.go new file mode 100644 index 00000000000..963d95c8333 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/keeper_test.go @@ -0,0 +1,101 @@ +package keeper_test + +import ( + "testing" + + testifysuite "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/runtime" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + ratelimittypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" + ibcmock "github.com/cosmos/ibc-go/v10/testing/mock" +) + +type KeeperTestSuite struct { + testifysuite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain + chainC *ibctesting.TestChain +} + +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) +} + +func TestKeeperTestSuite(t *testing.T) { + testifysuite.Run(t, new(KeeperTestSuite)) +} + +func (s *KeeperTestSuite) TestNewKeeper() { + testCases := []struct { + name string + instantiateFn func() + panicMsg string + }{ + { + name: "success", + instantiateFn: func() { + keeper.NewKeeper( + s.chainA.GetSimApp().AppCodec(), + s.chainA.GetSimApp().AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ratelimittypes.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().IBCKeeper.ClientKeeper, // Add clientKeeper + s.chainA.GetSimApp().BankKeeper, + s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), + ) + }, + panicMsg: "", + }, + { + name: "success: custom address codec", + instantiateFn: func() { + keeper.NewKeeper( + s.chainA.GetSimApp().AppCodec(), + ibcmock.TestAddressCodec{}, + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ratelimittypes.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().IBCKeeper.ClientKeeper, // Add clientKeeper + s.chainA.GetSimApp().BankKeeper, + s.chainA.GetSimApp().ICAHostKeeper.GetAuthority(), + ) + }, + panicMsg: "", + }, + { + name: "failure: empty authority", + instantiateFn: func() { + keeper.NewKeeper( + s.chainA.GetSimApp().AppCodec(), + s.chainA.GetSimApp().AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ratelimittypes.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().IBCKeeper.ClientKeeper, // clientKeeper + s.chainA.GetSimApp().BankKeeper, + "", // empty authority + ) + }, + panicMsg: "authority must be non-empty", + }, + } + + for _, tc := range testCases { + s.SetupTest() + + s.Run(tc.name, func() { + if tc.panicMsg == "" { + s.Require().NotPanics(tc.instantiateFn) + } else { + s.Require().PanicsWithError(tc.panicMsg, tc.instantiateFn) + } + }) + } +} diff --git a/modules/apps/rate-limiting/keeper/msg_server.go b/modules/apps/rate-limiting/keeper/msg_server.go new file mode 100644 index 00000000000..37985cfde84 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/msg_server.go @@ -0,0 +1,102 @@ +package keeper + +import ( + "context" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +type msgServer struct { + *Keeper +} + +// NewMsgServerImpl returns an implementation of the ratelimit MsgServer interface +func NewMsgServerImpl(keeper *Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} + +// Adds a new rate limit. Fails if the rate limit already exists or the channel value is 0 +func (k msgServer) AddRateLimit(goCtx context.Context, msg *types.MsgAddRateLimit) (*types.MsgAddRateLimitResponse, error) { + _, err := k.addressCodec.StringToBytes(msg.Signer) + if err != nil { + return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address: %s", msg.Signer) + } + + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.Keeper.AddRateLimit(ctx, msg); err != nil { + return nil, err + } + + return &types.MsgAddRateLimitResponse{}, nil +} + +// Updates an existing rate limit. Fails if the rate limit doesn't exist +func (k msgServer) UpdateRateLimit(goCtx context.Context, msg *types.MsgUpdateRateLimit) (*types.MsgUpdateRateLimitResponse, error) { + _, err := k.addressCodec.StringToBytes(msg.Signer) + if err != nil { + return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address: %s", msg.Signer) + } + + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.Keeper.UpdateRateLimit(ctx, msg); err != nil { + return nil, err + } + + return &types.MsgUpdateRateLimitResponse{}, nil +} + +// Removes a rate limit. Fails if the rate limit doesn't exist +func (k msgServer) RemoveRateLimit(goCtx context.Context, msg *types.MsgRemoveRateLimit) (*types.MsgRemoveRateLimitResponse, error) { + _, err := k.addressCodec.StringToBytes(msg.Signer) + if err != nil { + return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address: %s", msg.Signer) + } + + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + if !found { + return nil, types.ErrRateLimitNotFound + } + + k.Keeper.RemoveRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + return &types.MsgRemoveRateLimitResponse{}, nil +} + +// Resets the flow on a rate limit. Fails if the rate limit doesn't exist +func (k msgServer) ResetRateLimit(goCtx context.Context, msg *types.MsgResetRateLimit) (*types.MsgResetRateLimitResponse, error) { + _, err := k.addressCodec.StringToBytes(msg.Signer) + if err != nil { + return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidAddress, "invalid signer address: %s", msg.Signer) + } + + if k.authority != msg.Signer { + return nil, errorsmod.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Signer) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + if err := k.Keeper.ResetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId); err != nil { + return nil, err + } + + return &types.MsgResetRateLimitResponse{}, nil +} diff --git a/modules/apps/rate-limiting/keeper/msg_server_test.go b/modules/apps/rate-limiting/keeper/msg_server_test.go new file mode 100644 index 00000000000..27c38dc1ce2 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/msg_server_test.go @@ -0,0 +1,246 @@ +package keeper_test + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +var ( + authority = authtypes.NewModuleAddress(govtypes.ModuleName).String() + + addRateLimitMsg = types.MsgAddRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(20), + DurationHours: 30, + } + + updateRateLimitMsg = types.MsgUpdateRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + MaxPercentRecv: sdkmath.NewInt(20), + MaxPercentSend: sdkmath.NewInt(30), + DurationHours: 40, + } + + removeRateLimitMsg = types.MsgRemoveRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + } + + resetRateLimitMsg = types.MsgResetRateLimit{ + Signer: authority, + Denom: "denom", + ChannelOrClientId: "channel-0", + } +) + +// Helper function to create a channel and prevent a channel not exists error +func (s *KeeperTestSuite) createChannel(channelID string) { + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetChannel(s.chainA.GetContext(), transfertypes.PortID, channelID, channeltypes.Channel{}) +} + +// Helper function to mint tokens and create channel value to prevent a zero channel value error +func (s *KeeperTestSuite) createChannelValue(_ string, channelValue sdkmath.Int) { + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(addRateLimitMsg.Denom, channelValue))) + s.Require().NoError(err) +} + +// Helper function to add a rate limit with an optional error expectation +func (s *KeeperTestSuite) addRateLimit(msgAddRateLimit types.MsgAddRateLimit, expectedErr *errorsmod.Error) { + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + _, actualErr := msgServer.AddRateLimit(s.chainA.GetContext(), &msgAddRateLimit) + + // If it should have been added successfully, confirm no error + // and confirm the rate limit was created + if expectedErr == nil { + s.Require().NoError(actualErr) + + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), addRateLimitMsg.Denom, addRateLimitMsg.ChannelOrClientId) + s.Require().True(found) + } else { + // If it should have failed, check the error + s.Require().ErrorIs(actualErr, expectedErr) + } +} + +// Helper function to add a rate limit successfully +func (s *KeeperTestSuite) addRateLimitSuccessful(msgAddRateLimit types.MsgAddRateLimit) { + s.addRateLimit(msgAddRateLimit, nil) +} + +// Helper function to add a rate limit with an expected error +func (s *KeeperTestSuite) addRateLimitWithError(msgAddRateLimit types.MsgAddRateLimit, expectedErr *errorsmod.Error) { + s.addRateLimit(msgAddRateLimit, expectedErr) +} + +func (s *KeeperTestSuite) TestMsgServer_AddRateLimit() { + denom := addRateLimitMsg.Denom + channelID := addRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + // First try to add a rate limit when there's no channel value, it will fail + s.addRateLimitWithError(addRateLimitMsg, types.ErrZeroChannelValue) + + // Create channel value + s.createChannelValue(denom, channelValue) + + // Then try to add a rate limit before the channel has been created, it will also fail + s.addRateLimitWithError(addRateLimitMsg, types.ErrChannelNotFound) + + // Create the channel + s.createChannel(channelID) + + // Now add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Finally, try to add the same rate limit again - it should fail + s.addRateLimitWithError(addRateLimitMsg, types.ErrRateLimitAlreadyExists) + + // Verify that signer == authority required + invalidSignerMsg := addRateLimitMsg + invalidSignerMsg.Signer = s.chainA.SenderAccount.GetAddress().String() + s.addRateLimitWithError(invalidSignerMsg, govtypes.ErrInvalidSigner) + + // Verify that valid signer required + invalidSignerMsg.Signer = ibctesting.InvalidID + s.addRateLimitWithError(invalidSignerMsg, sdkerrors.ErrInvalidAddress) +} + +func (s *KeeperTestSuite) TestMsgServer_UpdateRateLimit() { + denom := updateRateLimitMsg.Denom + channelID := updateRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + + // Create channel and channel value + s.createChannel(channelID) + s.createChannelValue(denom, channelValue) + + // Attempt to update a rate limit that does not exist + _, err := msgServer.UpdateRateLimit(s.chainA.GetContext(), &updateRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Update the rate limit successfully + _, err = msgServer.UpdateRateLimit(s.chainA.GetContext(), &updateRateLimitMsg) + s.Require().NoError(err) + + // Check ratelimit quota is updated correctly + updatedRateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(updatedRateLimit.Quota, &types.Quota{ + MaxPercentSend: updateRateLimitMsg.MaxPercentSend, + MaxPercentRecv: updateRateLimitMsg.MaxPercentRecv, + DurationHours: updateRateLimitMsg.DurationHours, + }) + + // Attempt to update a rate limit that has invalid authority + invalidSignerMsg := updateRateLimitMsg + invalidSignerMsg.Signer = s.chainA.SenderAccount.GetAddress().String() + _, err = msgServer.UpdateRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, govtypes.ErrInvalidSigner) + + // Verify that valid signer required + invalidSignerMsg.Signer = ibctesting.InvalidID + _, err = msgServer.UpdateRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, sdkerrors.ErrInvalidAddress) +} + +func (s *KeeperTestSuite) TestMsgServer_RemoveRateLimit() { + denom := removeRateLimitMsg.Denom + channelID := removeRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + + s.createChannel(channelID) + s.createChannelValue(denom, channelValue) + + // Attempt to remove a rate limit that does not exist + _, err := msgServer.RemoveRateLimit(s.chainA.GetContext(), &removeRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Remove the rate limit successfully + _, err = msgServer.RemoveRateLimit(s.chainA.GetContext(), &removeRateLimitMsg) + s.Require().NoError(err) + + // Confirm it was removed + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().False(found) + + // Attempt to Remove a rate limit that has invalid authority + invalidSignerMsg := removeRateLimitMsg + invalidSignerMsg.Signer = s.chainA.SenderAccount.GetAddress().String() + _, err = msgServer.RemoveRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, govtypes.ErrInvalidSigner) + + // Verify that valid signer required + invalidSignerMsg.Signer = ibctesting.InvalidID + _, err = msgServer.RemoveRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, sdkerrors.ErrInvalidAddress) +} + +func (s *KeeperTestSuite) TestMsgServer_ResetRateLimit() { + denom := resetRateLimitMsg.Denom + channelID := resetRateLimitMsg.ChannelOrClientId + channelValue := sdkmath.NewInt(100) + + msgServer := keeper.NewMsgServerImpl(s.chainA.GetSimApp().RateLimitKeeper) + + s.createChannel(channelID) + s.createChannelValue(denom, channelValue) + + // Attempt to reset a rate limit that does not exist + _, err := msgServer.ResetRateLimit(s.chainA.GetContext(), &resetRateLimitMsg) + s.Require().Equal(err, types.ErrRateLimitNotFound) + + // Add a rate limit successfully + s.addRateLimitSuccessful(addRateLimitMsg) + + // Reset the rate limit successfully + _, err = msgServer.ResetRateLimit(s.chainA.GetContext(), &resetRateLimitMsg) + s.Require().NoError(err) + + // Check ratelimit quota is flow correctly + resetRateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(resetRateLimit.Flow, &types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + }) + + // Attempt to Remove a rate limit that has invalid authority + invalidSignerMsg := resetRateLimitMsg + invalidSignerMsg.Signer = s.chainA.SenderAccount.GetAddress().String() + _, err = msgServer.ResetRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, govtypes.ErrInvalidSigner) + + // Verify that valid signer required + invalidSignerMsg.Signer = ibctesting.InvalidID + _, err = msgServer.ResetRateLimit(s.chainA.GetContext(), &invalidSignerMsg) + s.Require().ErrorIs(err, sdkerrors.ErrInvalidAddress) +} diff --git a/modules/apps/rate-limiting/keeper/packet.go b/modules/apps/rate-limiting/keeper/packet.go new file mode 100644 index 00000000000..75eb3ed3bc3 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/packet.go @@ -0,0 +1,269 @@ +package keeper + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" +) + +type RateLimitedPacketInfo struct { + ChannelID string + Denom string + Amount sdkmath.Int + Sender string + Receiver string +} + +// CheckAcknowledementSucceeded unmarshals IBC Acknowledgements, and determines +// whether the tx was successful +func (k *Keeper) CheckAcknowledementSucceeded(ctx sdk.Context, ack []byte) (bool, error) { + // Check if the ack is the IBC v2 universal error acknowledgement + if bytes.Equal(ack, channeltypesv2.ErrorAcknowledgement[:]) { + return false, nil + } + + // Unmarshal the raw ack response + var acknowledgement channeltypes.Acknowledgement + if err := transfertypes.ModuleCdc.UnmarshalJSON(ack, &acknowledgement); err != nil { + return false, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %s", err.Error()) + } + + // The ack can come back as either AcknowledgementResult or AcknowledgementError + // If it comes back as AcknowledgementResult, the messages are encoded differently depending on the SDK version + switch response := acknowledgement.Response.(type) { + case *channeltypes.Acknowledgement_Result: + if len(response.Result) == 0 { + return false, errorsmod.Wrapf(channeltypes.ErrInvalidAcknowledgement, "acknowledgement result cannot be empty") + } + return true, nil + + case *channeltypes.Acknowledgement_Error: + k.Logger(ctx).Error(fmt.Sprintf("acknowledgement error: %s", response.Error)) + return false, nil + + default: + return false, errorsmod.Wrapf(channeltypes.ErrInvalidAcknowledgement, "unsupported acknowledgement response field type %T", response) + } +} + +// ParseDenomFromSendPacket parses the denom from the Send Packet. +// The denom that the rate limiter will use for a SEND packet depends on whether +// it was a NATIVE token (e.g. ustrd, stuatom, etc.) or NON-NATIVE token (e.g. ibc/...)... +// +// We can identify if the token is native or not by parsing the trace denom from the packet +// If the token is NATIVE, it will not have a prefix (e.g. ustrd), +// and if it is NON-NATIVE, it will have a prefix (e.g. transfer/channel-2/uosmo) +// +// For NATIVE denoms, return as is (e.g. ustrd) +// For NON-NATIVE denoms, take the ibc hash (e.g. hash "transfer/channel-2/usoms" into "ibc/...") +func ParseDenomFromSendPacket(packet transfertypes.FungibleTokenPacketData) string { + // Check if the denom is already an IBC denom (starts with "ibc/") + if strings.HasPrefix(packet.Denom, "ibc/") { + return packet.Denom + } + + // Determine the denom by looking at the denom trace path + denom := transfertypes.ExtractDenomFromPath(packet.Denom) + return denom.IBCDenom() +} + +// ParseDenomFromRecvPacket parses the denom from the Recv Packet that will be used by the rate limit module. +// The denom that the rate limiter will use for a RECEIVE packet depends on whether it was a source or sink. +// +// Sink: The token moves forward, to a chain different than its previous hop +// The new port and channel are APPENDED to the denom trace. +// (e.g. A -> B, B is a sink) (e.g. A -> B -> C, C is a sink) +// +// Source: The token moves backwards (i.e. revisits the last chain it was sent from) +// The port and channel are REMOVED from the denom trace - undoing the last hop. +// (e.g. A -> B -> A, A is a source) (e.g. A -> B -> C -> B, B is a source) +// +// If the chain is acting as a SINK: We add on the port and channel and hash it +// Ex1: uosmo sent from Osmosis to Stride +// Packet Denom: uosmo +// -> Add Prefix: transfer/channel-X/uosmo +// -> Hash: ibc/... +// +// Ex2: ujuno sent from Osmosis to Stride +// PacketDenom: transfer/channel-Y/ujuno (channel-Y is the Juno <> Osmosis channel) +// -> Add Prefix: transfer/channel-X/transfer/channel-Y/ujuno +// -> Hash: ibc/... +// +// If the chain is acting as a SOURCE: First, remove the prefix. Then if there is still a denom trace, hash it +// Ex1: ustrd sent back to Stride from Osmosis +// Packet Denom: transfer/channel-X/ustrd +// -> Remove Prefix: ustrd +// -> Leave as is: ustrd +// +// Ex2: juno was sent to Stride, then to Osmosis, then back to Stride +// Packet Denom: transfer/channel-X/transfer/channel-Z/ujuno +// -> Remove Prefix: transfer/channel-Z/ujuno +// -> Hash: ibc/... +func ParseDenomFromRecvPacket(packet channeltypes.Packet, packetData transfertypes.FungibleTokenPacketData) string { + sourcePort := packet.SourcePort + sourceChannel := packet.SourceChannel + + // To determine the denom, first check whether Stride is acting as source + // Build the source prefix and check if the denom starts with it + hop := transfertypes.NewHop(sourcePort, sourceChannel) + sourcePrefix := hop.String() + "/" + + if strings.HasPrefix(packetData.Denom, sourcePrefix) { + // Remove the source prefix (e.g. transfer/channel-X/transfer/channel-Z/ujuno -> transfer/channel-Z/ujuno) + unprefixedDenom := packetData.Denom[len(sourcePrefix):] + + // Native assets will have an empty trace path and can be returned as is + denom := transfertypes.ExtractDenomFromPath(unprefixedDenom) + return denom.IBCDenom() + } + // Prefix the destination channel - this will contain the trailing slash (e.g. transfer/channel-X/) + destinationPrefix := transfertypes.NewHop(packet.GetDestPort(), packet.GetDestChannel()) + prefixedDenom := destinationPrefix.String() + "/" + packetData.Denom + + // Hash the denom trace + denom := transfertypes.ExtractDenomFromPath(prefixedDenom) + return denom.IBCDenom() +} + +// ParsePacketInfo parses the sender and channelId and denom for the corresponding RateLimit object, and +// the sender/receiver/transfer amount +// +// The channelID should always be used as the key for the RateLimit object (not the counterparty channelID) +// For a SEND packet, the channelID is the SOURCE channel +// For a RECEIVE packet, the channelID is the DESTINATION channel +// +// The Source and Destination are defined from the perspective of a packet recipient. +func ParsePacketInfo(packet channeltypes.Packet, direction types.PacketDirection) (RateLimitedPacketInfo, error) { + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &packetData); err != nil { + return RateLimitedPacketInfo{}, err + } + + var channelID, denom string + if direction == types.PACKET_SEND { + channelID = packet.GetSourceChannel() + denom = ParseDenomFromSendPacket(packetData) + } else { + channelID = packet.GetDestChannel() + denom = ParseDenomFromRecvPacket(packet, packetData) + } + + amount, ok := sdkmath.NewIntFromString(packetData.Amount) + if !ok { + return RateLimitedPacketInfo{}, + errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "Unable to cast packet amount '%s' to sdkmath.Int", packetData.Amount) + } + + packetInfo := RateLimitedPacketInfo{ + ChannelID: channelID, + Denom: denom, + Amount: amount, + Sender: packetData.Sender, + Receiver: packetData.Receiver, + } + + return packetInfo, nil +} + +// Middleware implementation for SendPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, sends the packet +func (k *Keeper) SendRateLimitedPacket(ctx sdk.Context, sourcePort, sourceChannel string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte) error { + seq, found := k.channelKeeper.GetNextSequenceSend(ctx, sourcePort, sourceChannel) + if !found { + return errorsmod.Wrapf(channeltypes.ErrSequenceSendNotFound, "source port: %s, source channel: %s", sourcePort, sourceChannel) + } + + packet := channeltypes.Packet{ + Sequence: seq, + SourcePort: sourcePort, + SourceChannel: sourceChannel, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + Data: data, + } + + packetInfo, err := ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + // Check if the packet would exceed the outflow rate limit + updatedFlow, err := k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_SEND, packetInfo) + if err != nil { + return err + } + + // Store the sequence number of the packet so that if the transfer fails, + // we can identify if it was sent during this quota and can revert the outflow + if updatedFlow { + k.SetPendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + } + + return nil +} + +// Middleware implementation for RecvPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, allows the packet +func (k *Keeper) ReceiveRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := ParsePacketInfo(packet, types.PACKET_RECV) + if err != nil { + // If the packet data is unparseable, we can't apply rate limiting. + // Log the error and allow the packet to proceed to the underlying app + // which is responsible for handling invalid packet data. + k.Logger(ctx).Error("Unable to parse packet data for rate limiting", "error", err) + return nil // Returning nil allows the packet to continue down the stack + } + + // If parsing was successful, check the rate limit + _, err = k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_RECV, packetInfo) + // If CheckRateLimitAndUpdateFlow returns an error (e.g., quota exceeded), return it to generate an error ack. + return err +} + +// AcknowledgeRateLimitedPacket implements for OnAckPacket for porttypes.Middleware. +// If the packet failed, we should decrement the Outflow. +func (k *Keeper) AcknowledgeRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet, acknowledgement []byte) error { + ackSuccess, err := k.CheckAcknowledementSucceeded(ctx, acknowledgement) + if err != nil { + return err + } + + // Parse the denom, channelId, and amount from the packet + packetInfo, err := ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + // If the ack was successful, remove the pending packet + if ackSuccess { + k.RemovePendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + return nil + } + + // If the ack failed, undo the change to the rate limit Outflow + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) +} + +// Middleware implementation for OnAckPacket with rate limiting +// The Outflow should be decremented from the failed packet +func (k *Keeper) TimeoutRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) +} diff --git a/modules/apps/rate-limiting/keeper/packet_test.go b/modules/apps/rate-limiting/keeper/packet_test.go new file mode 100644 index 00000000000..28406207864 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/packet_test.go @@ -0,0 +1,789 @@ +package keeper_test + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + tmbytes "github.com/cometbft/cometbft/libs/bytes" + + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +const ( + transferPort = "transfer" + uosmo = "uosmo" + ujuno = "ujuno" + ustrd = "ustrd" + stuatom = "stuatom" + channelOnStride = "channel-0" + channelOnHost = "channel-1" +) + +func hashDenomTrace(denomTrace string) string { + trace32byte := sha256.Sum256([]byte(denomTrace)) + var traceTmByte tmbytes.HexBytes = trace32byte[:] + return fmt.Sprintf("ibc/%s", traceTmByte) +} + +func TestParseDenomFromSendPacket(t *testing.T) { + testCases := []struct { + name string + packetDenomTrace string + expectedDenom string + }{ + // Native assets stay as is + { + name: "ustrd", + packetDenomTrace: ustrd, + expectedDenom: ustrd, + }, + { + name: "stuatom", + packetDenomTrace: stuatom, + expectedDenom: stuatom, + }, + // Non-native assets are hashed + { + name: "uosmo_one_hop", + packetDenomTrace: "transfer/channel-0/usomo", + expectedDenom: hashDenomTrace("transfer/channel-0/usomo"), + }, + { + name: "uosmo_two_hops", + packetDenomTrace: "transfer/channel-2/transfer/channel-1/usomo", + expectedDenom: hashDenomTrace("transfer/channel-2/transfer/channel-1/usomo"), + }, + // IBC denoms are passed through as is + { + name: "ibc_denom", + packetDenomTrace: "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + expectedDenom: "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + packet := transfertypes.FungibleTokenPacketData{ + Denom: tc.packetDenomTrace, + } + + parsedDenom := keeper.ParseDenomFromSendPacket(packet) + require.Equal(t, tc.expectedDenom, parsedDenom, tc.name) + }) + } +} + +func TestParseDenomFromRecvPacket(t *testing.T) { + osmoChannelOnStride := "channel-0" + strideChannelOnOsmo := "channel-100" + junoChannelOnOsmo := "channel-200" + junoChannelOnStride := "channel-300" + + testCases := []struct { + name string + packetDenomTrace string + sourceChannel string + destinationChannel string + expectedDenom string + }{ + // Sink asset one hop away: + // uosmo sent from Osmosis to Stride (uosmo) + // -> tack on prefix (transfer/channel-0/uosmo) and hash + { + name: "sink_one_hop", + packetDenomTrace: uosmo, + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, osmoChannelOnStride, uosmo)), + }, + // Sink asset two hops away: + // ujuno sent from Juno to Osmosis to Stride (transfer/channel-200/ujuno) + // -> tack on prefix (transfer/channel-0/transfer/channel-200/ujuno) and hash + { + name: "sink_two_hops", + packetDenomTrace: fmt.Sprintf("%s/%s/%s", transferPort, junoChannelOnOsmo, ujuno), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s/%s/%s", transferPort, osmoChannelOnStride, transferPort, junoChannelOnOsmo, ujuno)), + }, + // Native source assets + // ustrd sent from Stride to Osmosis and then back to Stride (transfer/channel-0/ustrd) + // -> remove prefix and leave as is (ustrd) + { + name: "native_source", + packetDenomTrace: fmt.Sprintf("%s/%s/%s", transferPort, strideChannelOnOsmo, ustrd), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: ustrd, + }, + // Non-native source assets + // ujuno was sent from Juno to Stride, then to Osmosis, then back to Stride (transfer/channel-0/transfer/channel-300/ujuno) + // -> remove prefix (transfer/channel-300/ujuno) and hash + { + name: "non_native_source", + packetDenomTrace: fmt.Sprintf("%s/%s/%s/%s/%s", transferPort, strideChannelOnOsmo, transferPort, junoChannelOnStride, ujuno), + sourceChannel: strideChannelOnOsmo, + destinationChannel: osmoChannelOnStride, + expectedDenom: hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, junoChannelOnStride, ujuno)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + packet := channeltypes.Packet{ + SourcePort: transferPort, + DestinationPort: transferPort, + SourceChannel: tc.sourceChannel, + DestinationChannel: tc.destinationChannel, + } + packetData := transfertypes.FungibleTokenPacketData{ + Denom: tc.packetDenomTrace, + } + + parsedDenom := keeper.ParseDenomFromRecvPacket(packet, packetData) + require.Equal(t, tc.expectedDenom, parsedDenom, tc.name) + }) + } +} + +func (s *KeeperTestSuite) TestParsePacketInfo() { + sourceChannel := "channel-100" + destinationChannel := "channel-200" + denom := "denom" + amountString := "100" + amountInt := sdkmath.NewInt(100) + sender := "sender" + receiver := "receiver" + + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{ + Denom: denom, + Amount: amountString, + Sender: sender, + Receiver: receiver, + }) + s.Require().NoError(err) + + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + } + + // Send 'denom' from channel-100 (stride) -> channel-200 + // Since the 'denom' is native, it's kept as is for the rate limit object + expectedSendPacketInfo := keeper.RateLimitedPacketInfo{ + ChannelID: sourceChannel, + Denom: denom, + Amount: amountInt, + Sender: sender, + Receiver: receiver, + } + actualSendPacketInfo, err := keeper.ParsePacketInfo(packet, types.PACKET_SEND) + s.Require().NoError(err, "no error expected when parsing send packet") + s.Require().Equal(expectedSendPacketInfo, actualSendPacketInfo, "send packet") + + // Receive 'denom' from channel-100 -> channel-200 (stride) + // The stride channel (channel-200) should be tacked onto the end and the denom should be hashed + expectedRecvPacketInfo := keeper.RateLimitedPacketInfo{ + ChannelID: destinationChannel, + Denom: hashDenomTrace(fmt.Sprintf("transfer/%s/%s", destinationChannel, denom)), + Amount: amountInt, + Sender: sender, + Receiver: receiver, + } + actualRecvPacketInfo, err := keeper.ParsePacketInfo(packet, types.PACKET_RECV) + s.Require().NoError(err, "no error expected when parsing recv packet") + s.Require().Equal(expectedRecvPacketInfo, actualRecvPacketInfo, "recv packet") +} + +func (s *KeeperTestSuite) TestCheckAcknowledgementSucceeded() { + testCases := []struct { + name string + ack []byte + wantSuccess bool + wantErr error + }{ + { + name: "success legacy format", + ack: func() []byte { + return transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Result{Result: []byte{1}}, + }) + }(), + wantSuccess: true, + wantErr: nil, + }, + { + name: "failed legacy format - empty result", + ack: func() []byte { + return transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Result{}, + }) + }(), + wantSuccess: false, + wantErr: channeltypes.ErrInvalidAcknowledgement, + }, + { + name: "failed legacy format", + ack: func() []byte { + return transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Error{Error: "some error"}, + }) + }(), + wantSuccess: false, + wantErr: nil, + }, + { + name: "failed v2 format", + ack: channeltypesv2.ErrorAcknowledgement[:], + wantSuccess: false, + wantErr: nil, + }, + { + name: "invalid format", + ack: []byte("invalid ack"), + wantSuccess: false, + wantErr: sdkerrors.ErrUnknownRequest, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + success, err := s.chainA.GetSimApp().RateLimitKeeper.CheckAcknowledementSucceeded(s.chainA.GetContext(), tc.ack) + + if tc.wantErr != nil { + s.Require().ErrorIs(err, tc.wantErr, tc.name) + } else { + s.Require().NoError(err, "unexpected error for %s", tc.name) + } + + s.Require().Equal(tc.wantSuccess, success, + "expected success=%v for %s", tc.wantSuccess, tc.name) + }) + } +} + +func (s *KeeperTestSuite) createRateLimitCloseToQuota(denom string, channelID string, direction types.PacketDirection) { + channelValue := sdkmath.NewInt(100) + threshold := sdkmath.NewInt(10) + + // Set inflow/outflow close to threshold, depending on which direction we're going in + inflow := sdkmath.ZeroInt() + outflow := sdkmath.ZeroInt() + if direction == types.PACKET_RECV { + inflow = sdkmath.NewInt(9) + } else { + outflow = sdkmath.NewInt(9) + } + + // Store rate limit + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{ + Denom: denom, + ChannelOrClientId: channelID, + }, + Quota: &types.Quota{ + MaxPercentSend: threshold, + MaxPercentRecv: threshold, + }, + Flow: &types.Flow{ + Inflow: inflow, + Outflow: outflow, + ChannelValue: channelValue, + }, + }) +} + +func (s *KeeperTestSuite) TestSendRateLimitedPacket() { + // For send packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + amountToExceed := "5" + sequence := uint64(10) + + // Create rate limit (for SEND, use SOURCE channel) + s.createRateLimitCloseToQuota(denom, sourceChannel, types.PACKET_SEND) + + // This packet should cause an Outflow quota exceed error + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: amountToExceed}) + s.Require().NoError(err) + + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.SetNextSequenceSend(s.chainA.GetContext(), transferPort, sourceChannel, sequence) + // We check for a quota error because it doesn't appear until the end of the function + // We're avoiding checking for a success here because we can get a false positive if the rate limit doesn't exist + err = s.chainA.GetSimApp().RateLimitKeeper.SendRateLimitedPacket(s.chainA.GetContext(), transferPort, sourceChannel, clienttypes.Height{}, 0, packetData) + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error type") + s.Require().ErrorContains(err, "Outflow exceeds quota", "error text") + + // Reset the rate limit and try again + err = s.chainA.GetSimApp().RateLimitKeeper.ResetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().NoError(err, "no error expected when resetting rate limit") + + err = s.chainA.GetSimApp().RateLimitKeeper.SendRateLimitedPacket(s.chainA.GetContext(), transferPort, sourceChannel, clienttypes.Height{}, 0, packetData) + s.Require().NoError(err, "no error expected when sending packet after reset") + + // Check that the pending packet was stored + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), sourceChannel, sequence) + s.Require().True(found, "pending send packet") +} + +func (s *KeeperTestSuite) TestReceiveRateLimitedPacket() { + // For receive packets, the source will be the host and the destination will be stride + packetDenom := uosmo + sourceChannel := channelOnHost + destinationChannel := channelOnStride + amountToExceed := "5" + + // When the packet is received, the port and channel prefix will be added and the denom will be hashed + // before the rate limit is found from the store + rateLimitDenom := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, channelOnStride, packetDenom)) + + // Create rate limit (for RECV, use DESTINATION channel) + s.createRateLimitCloseToQuota(rateLimitDenom, destinationChannel, types.PACKET_RECV) + + // This packet should cause an Outflow quota exceed error + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: packetDenom, Amount: amountToExceed}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + } + + // We check for a quota error because it doesn't appear until the end of the function + // We're avoiding checking for a success here because we can get a false positive if the rate limit doesn't exist + err = s.chainA.GetSimApp().RateLimitKeeper.ReceiveRateLimitedPacket(s.chainA.GetContext(), packet) + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error type") + s.Require().ErrorContains(err, "Inflow exceeds quota", "error text") +} + +func (s *KeeperTestSuite) TestAcknowledgeRateLimitedPacket_AckSuccess() { + // For ack packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + sequence := uint64(10) + + // Create rate limit - the flow and quota does not matter for this test + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + }) + + // Store the pending packet for this sequence number + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), sourceChannel, sequence) + + // Build the ack packet + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: "10"}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + Sequence: sequence, + } + ackSuccess := transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Result{Result: []byte{1}}, + }) + + // Call AckPacket with the successful ack + err = s.chainA.GetSimApp().RateLimitKeeper.AcknowledgeRateLimitedPacket(s.chainA.GetContext(), packet, ackSuccess) + s.Require().NoError(err, "no error expected during AckPacket") + + // Confirm the pending packet was removed + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), sourceChannel, sequence) + s.Require().False(found, "send packet should have been removed") +} + +func (s *KeeperTestSuite) TestAcknowledgeRateLimitedPacket_AckFailure() { + // For ack packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + initialOutflow := sdkmath.NewInt(100) + packetAmount := sdkmath.NewInt(10) + sequence := uint64(10) + + // Create rate limit - only outflow is needed to this tests + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + Flow: &types.Flow{Outflow: initialOutflow}, + }) + + // Store the pending packet for this sequence number + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), sourceChannel, sequence) + + // Build the ack packet + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: packetAmount.String()}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + Sequence: sequence, + } + ackFailure := transfertypes.ModuleCdc.MustMarshalJSON(&channeltypes.Acknowledgement{ + Response: &channeltypes.Acknowledgement_Error{Error: "error"}, + }) + + // Call OnTimeoutPacket with the failed ack + err = s.chainA.GetSimApp().RateLimitKeeper.AcknowledgeRateLimitedPacket(s.chainA.GetContext(), packet, ackFailure) + s.Require().NoError(err, "no error expected during AckPacket") + + // Confirm the pending packet was removed + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), sourceChannel, sequence) + s.Require().False(found, "send packet should have been removed") + + // Confirm the flow was adjusted + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, sourceChannel) + s.Require().True(found) + s.Require().Equal(initialOutflow.Sub(packetAmount).Int64(), rateLimit.Flow.Outflow.Int64(), "outflow") +} + +func (s *KeeperTestSuite) TestTimeoutRateLimitedPacket() { + // For timeout packets, the source will be stride and the destination will be the host + denom := ustrd + sourceChannel := channelOnStride + destinationChannel := channelOnHost + initialOutflow := sdkmath.NewInt(100) + packetAmount := sdkmath.NewInt(10) + sequence := uint64(10) + + // Create rate limit - only outflow is needed to this tests + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: denom, ChannelOrClientId: channelID}, + Flow: &types.Flow{Outflow: initialOutflow}, + }) + + // Store the pending packet for this sequence number + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), sourceChannel, sequence) + + // Build the timeout packet + packetData, err := json.Marshal(transfertypes.FungibleTokenPacketData{Denom: denom, Amount: packetAmount.String()}) + s.Require().NoError(err) + packet := channeltypes.Packet{ + SourcePort: transferPort, + SourceChannel: sourceChannel, + DestinationPort: transferPort, + DestinationChannel: destinationChannel, + Data: packetData, + Sequence: sequence, + } + + // Call OnTimeoutPacket - the outflow should get decremented + err = s.chainA.GetSimApp().RateLimitKeeper.TimeoutRateLimitedPacket(s.chainA.GetContext(), packet) + s.Require().NoError(err, "no error expected when calling timeout packet") + + expectedOutflow := initialOutflow.Sub(packetAmount) + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(expectedOutflow.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow decremented") + + // Check that the pending packet has been removed + found = s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, sequence) + s.Require().False(found, "pending packet should have been removed") + + // Call OnTimeoutPacket again with a different sequence number + // (to simulate a timeout that arrived in a different quota window from where the send occurred) + // The outflow should not change + packet.Sequence-- + err = s.chainA.GetSimApp().RateLimitKeeper.TimeoutRateLimitedPacket(s.chainA.GetContext(), packet) + s.Require().NoError(err, "no error expected when calling timeout packet again") + + rateLimit, found = s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found) + s.Require().Equal(expectedOutflow.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow should not have changed") +} + +// --- Middleware Tests --- + +// TestOnRecvPacket_Allowed tests the middleware's OnRecvPacket when the packet is allowed +func (s *KeeperTestSuite) TestOnRecvPacket_Allowed() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Define recipient and calculate expected voucher denom on chain B + recipientAddr := s.chainB.SenderAccount.GetAddress() + voucherDenomStr := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, path.EndpointB.ChannelID, uosmo)) + + // Fund recipient account with native denom + fundAmount := sdkmath.NewInt(1000000) + bondDenom, err := s.chainB.GetSimApp().StakingKeeper.BondDenom(s.chainB.GetContext()) + s.Require().NoError(err, "getting bond denom failed") + fundCoins := sdk.NewCoins(sdk.NewCoin(bondDenom, fundAmount)) + // Mint native denom to transfer module + err = s.chainB.GetSimApp().BankKeeper.MintCoins(s.chainB.GetContext(), transfertypes.ModuleName, fundCoins) + s.Require().NoError(err, "minting native denom coins to transfer module failed") + // Send native denom from transfer module to recipient + err = s.chainB.GetSimApp().BankKeeper.SendCoinsFromModuleToAccount(s.chainB.GetContext(), transfertypes.ModuleName, recipientAddr, fundCoins) + s.Require().NoError(err, "funding recipient account with native denom failed") + + // Create the test packet data + testAmountStr := "10" + testAmountInt, _ := sdkmath.NewIntFromString(testAmountStr) + packetDataBz, err := json.Marshal(transfertypes.FungibleTokenPacketData{ + Denom: uosmo, + Amount: testAmountStr, + Sender: s.chainA.SenderAccount.GetAddress().String(), + Receiver: recipientAddr.String(), + }) + s.Require().NoError(err) + + // Set the rate limit using the voucher denom string + simulatedSupply := sdkmath.NewInt(1000) // Keep simulated supply for rate limit calculation + s.chainB.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainB.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: voucherDenomStr, ChannelOrClientId: path.EndpointB.ChannelID}, + Quota: &types.Quota{MaxPercentRecv: sdkmath.NewInt(100), DurationHours: 1}, // High quota + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: simulatedSupply}, + }) + + timeoutTS := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + // Commit the packet on chain A so that RelayPacket can find the commitment + seq, err := path.EndpointA.SendPacket(clienttypes.ZeroHeight(), timeoutTS, packetDataBz) + s.Require().NoError(err, "sending packet on chain A failed") + + packet := channeltypes.Packet{ + Sequence: seq, + SourcePort: path.EndpointA.ChannelConfig.PortID, + SourceChannel: path.EndpointA.ChannelID, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + Data: packetDataBz, + TimeoutHeight: clienttypes.ZeroHeight(), + TimeoutTimestamp: timeoutTS, + } + + // Relay the packet. This will call OnRecvPacket on chain B through the integrated middleware stack. + err = path.RelayPacket(packet) + s.Require().NoError(err, "relaying packet failed") + + // Check acknowledgement on chain B + ackBz, found := s.chainB.GetSimApp().IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + s.Require().True(found, "acknowledgement not found") + s.Require().NotNil(ackBz, "ack should not be nil") + + expectedAck := channeltypes.NewResultAcknowledgement([]byte{1}) + expBz := channeltypes.CommitAcknowledgement(expectedAck.Acknowledgement()) + s.Require().Equal(expBz, ackBz) + + // Check flow was updated + rateLimit, found := s.chainB.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainB.GetContext(), voucherDenomStr, path.EndpointB.ChannelID) + s.Require().True(found) + s.Require().Equal(testAmountInt.Int64(), rateLimit.Flow.Inflow.Int64(), "inflow should be updated") +} + +// TestOnRecvPacket_Denied tests the middleware's OnRecvPacket when the packet is denied +func (s *KeeperTestSuite) TestOnRecvPacket_Denied() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Create rate limit with zero quota for recv + rateLimitDenom := hashDenomTrace(fmt.Sprintf("%s/%s/%s", transferPort, path.EndpointB.ChannelID, sdk.DefaultBondDenom)) + s.chainB.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainB.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: rateLimitDenom, ChannelOrClientId: path.EndpointB.ChannelID}, + Quota: &types.Quota{MaxPercentRecv: sdkmath.ZeroInt(), DurationHours: 1}, // Zero quota + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: sdkmath.NewInt(1000)}, + }) + + sender := s.chainA.SenderAccount.GetAddress() + receiver := s.chainB.SenderAccount.GetAddress() + sendCoin := ibctesting.TestCoin + + // Create packet data + packetDataBz, err := json.Marshal(transfertypes.FungibleTokenPacketData{ + Denom: sendCoin.Denom, + Amount: sendCoin.Amount.String(), + Sender: sender.String(), + Receiver: receiver.String(), + }) + s.Require().NoError(err) + + timeoutTS := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + timeoutHeight := clienttypes.ZeroHeight() + sourcePort := path.EndpointA.ChannelConfig.PortID + sourceChannel := path.EndpointA.ChannelID + senderInitialBal := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), sender, sdk.DefaultBondDenom) + + // Commit the packet on chain A so that RelayPacket can find the commitment + transferMsg := transfertypes.NewMsgTransfer(sourcePort, sourceChannel, sendCoin, sender.String(), receiver.String(), timeoutHeight, timeoutTS, "") + resp, err := s.chainA.GetSimApp().TransferKeeper.Transfer(s.chainA.GetContext(), transferMsg) + s.Require().NoError(err) + + // After sending the transfer, "sendCoin" should be taken from the sender to escrow. + senderIntermedBal := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), sender, sdk.DefaultBondDenom) + s.Require().Equal(senderInitialBal.Sub(sendCoin), senderIntermedBal) + + // Manully commit block on Chain A + s.coordinator.CommitBlock(s.chainA) + + packet := channeltypes.Packet{ + Sequence: resp.Sequence, + SourcePort: sourcePort, + SourceChannel: sourceChannel, + DestinationPort: path.EndpointB.ChannelConfig.PortID, + DestinationChannel: path.EndpointB.ChannelID, + Data: packetDataBz, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTS, + } + + // Relay the packet. This will call OnRecvPacket on chain B through the integrated middleware stack. + err = path.RelayPacket(packet) + s.Require().NoError(err, "relaying packet failed") + + // Check acknowledgement on chain B + ackBytes, found := s.chainB.GetSimApp().IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + s.Require().True(found, "acknowledgement not found") + s.Require().NotNil(ackBytes, "ack bytes should not be nil") + + expectedAck := channeltypes.NewErrorAcknowledgement(types.ErrQuotaExceeded) + expBz := channeltypes.CommitAcknowledgement(expectedAck.Acknowledgement()) + s.Require().Equal(expBz, ackBytes) + + // Check flow was NOT updated + rateLimit, found := s.chainB.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainB.GetContext(), rateLimitDenom, path.EndpointB.ChannelID) + s.Require().True(found) + s.Require().True(rateLimit.Flow.Inflow.IsZero(), "inflow should NOT be updated") + + // Sender should be refunded + senderEndBal := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), sender, sdk.DefaultBondDenom) + s.Require().Equal(senderInitialBal, senderEndBal) +} + +// TestSendPacket_Allowed tests the middleware's SendPacket when the packet is allowed by directly calling the middleware +func (s *KeeperTestSuite) TestSendPacket_Allowed() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Create rate limit with sufficient quota + rateLimitDenom := ustrd // Native denom + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: rateLimitDenom, ChannelOrClientId: path.EndpointA.ChannelID}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(100), DurationHours: 1}, // High quota + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: sdkmath.NewInt(1000)}, + }) + + timeoutTimestamp := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + amount := sdkmath.NewInt(10) + + // Create packet data + packetData := transfertypes.FungibleTokenPacketData{ + Denom: ustrd, + Amount: amount.String(), + Sender: s.chainA.SenderAccount.GetAddress().String(), + Receiver: s.chainB.SenderAccount.GetAddress().String(), + Memo: "", + } + packetDataBz, err := json.Marshal(packetData) + s.Require().NoError(err) + + // We need the transfer keeper's ICS4Wrapper which *is* the ratelimiting middleware + middleware, ok := s.chainA.GetSimApp().PFMKeeper.ICS4Wrapper().(*ratelimiting.IBCMiddleware) + s.Require().Truef(ok, "PFM's ICS4Wrapper should be the Rate Limit Middleware. Found %T", s.chainA.GetSimApp().TransferKeeper.GetICS4Wrapper()) + + // Directly call the middleware's SendPacket + seq, err := middleware.SendPacket( + s.chainA.GetContext(), + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + clienttypes.ZeroHeight(), // timeout height + timeoutTimestamp, + packetDataBz, + ) + + // Assert SendPacket succeeded + s.Require().NoError(err, "middleware.SendPacket should succeed") + s.Require().Equal(uint64(1), seq, "sequence should be 1") + + // Commit block and update context to ensure state updates are visible + s.coordinator.CommitBlock(s.chainA) + ctx := s.chainA.GetContext() // Get the latest context after commit + + // Check flow was updated using the latest context + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(ctx, rateLimitDenom, path.EndpointA.ChannelID) + s.Require().True(found) + s.Require().Equal(amount.Int64(), rateLimit.Flow.Outflow.Int64(), "outflow should be updated") + + // Check pending packet was stored using the latest context + found = s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(ctx, path.EndpointA.ChannelID, seq) + s.Require().True(found, "pending packet should be stored") +} + +// TestSendPacket_Denied tests the middleware's SendPacket when the packet is denied by directly calling the middleware +func (s *KeeperTestSuite) TestSendPacket_Denied() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // Create rate limit with a tiny quota that will be exceeded + rateLimitDenom := ustrd // Native denom + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), types.RateLimit{ + Path: &types.Path{Denom: rateLimitDenom, ChannelOrClientId: path.EndpointA.ChannelID}, + Quota: &types.Quota{MaxPercentSend: sdkmath.NewInt(1), DurationHours: 1}, // Set quota to 1% (will allow < 10 with ChannelValue 1000) + Flow: &types.Flow{Inflow: sdkmath.ZeroInt(), Outflow: sdkmath.ZeroInt(), ChannelValue: sdkmath.NewInt(1000)}, + }) + + timeoutTimestamp := uint64(s.coordinator.CurrentTime.Add(time.Hour).UnixNano()) + amount := sdkmath.NewInt(11) // amount 11 will exceed 1% of 1000 (threshold is 10, check is GT) + + // Create packet data + packetData := transfertypes.FungibleTokenPacketData{ + Denom: ustrd, + Amount: amount.String(), + Sender: s.chainA.SenderAccount.GetAddress().String(), + Receiver: s.chainB.SenderAccount.GetAddress().String(), + Memo: "", + } + packetDataBz, err := json.Marshal(packetData) + s.Require().NoError(err) + + // Get the middleware instance + middleware, ok := s.chainA.GetSimApp().PFMKeeper.ICS4Wrapper().(*ratelimiting.IBCMiddleware) + s.Require().Truef(ok, "Packet forward middleware keeper's ICS4Wrapper should be the RateLimit middleware. Found: %T", middleware) + + // Directly call the middleware's SendPacket + _, err = middleware.SendPacket( + s.chainA.GetContext(), + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + clienttypes.ZeroHeight(), // timeout height + timeoutTimestamp, + packetDataBz, + ) + + // Check error is quota exceeded + s.Require().Error(err, "middleware.SendPacket should fail") + s.Require().ErrorIs(err, types.ErrQuotaExceeded, "error should be quota exceeded") + + // Commit block and update context + s.coordinator.CommitBlock(s.chainA) + ctx := s.chainA.GetContext() // Get latest context + + // Check flow was NOT updated + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(ctx, rateLimitDenom, path.EndpointA.ChannelID) + s.Require().True(found) + s.Require().True(rateLimit.Flow.Outflow.IsZero(), "outflow should NOT be updated") +} diff --git a/modules/apps/rate-limiting/keeper/pending_send.go b/modules/apps/rate-limiting/keeper/pending_send.go new file mode 100644 index 00000000000..86ac633751e --- /dev/null +++ b/modules/apps/rate-limiting/keeper/pending_send.go @@ -0,0 +1,80 @@ +package keeper + +import ( + "encoding/binary" + "fmt" + "strings" + + "cosmossdk.io/store/prefix" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Sets the sequence number of a packet that was just sent +func (k *Keeper) SetPendingSendPacket(ctx sdk.Context, channelID string, sequence uint64) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + key := types.PendingSendPacketKey(channelID, sequence) + store.Set(key, []byte{1}) +} + +// Remove a pending packet sequence number from the store +// Used after the ack or timeout for a packet has been received +func (k *Keeper) RemovePendingSendPacket(ctx sdk.Context, channelID string, sequence uint64) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + key := types.PendingSendPacketKey(channelID, sequence) + store.Delete(key) +} + +// Checks whether the packet sequence number is in the store - indicating that it was +// sent during the current quota +func (k *Keeper) CheckPacketSentDuringCurrentQuota(ctx sdk.Context, channelID string, sequence uint64) bool { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + key := types.PendingSendPacketKey(channelID, sequence) + valueBz := store.Get(key) + found := len(valueBz) != 0 + return found +} + +// Get all pending packet sequence numbers +func (k *Keeper) GetAllPendingSendPackets(ctx sdk.Context) []string { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + pendingPackets := make([]string, 0) + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + + channelID := string(key[:types.PendingSendPacketChannelLength]) + channelID = strings.TrimRight(channelID, "\x00") // removes null bytes from suffix + sequence := binary.BigEndian.Uint64(key[types.PendingSendPacketChannelLength:]) + + packetID := fmt.Sprintf("%s/%d", channelID, sequence) + pendingPackets = append(pendingPackets, packetID) + } + + return pendingPackets +} + +// Remove all pending sequence numbers from the store +// This is executed when the quota resets +func (k *Keeper) RemoveAllChannelPendingSendPackets(ctx sdk.Context, channelID string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.PendingSendPacketPrefix) + + iterator := storetypes.KVStorePrefixIterator(store, []byte(channelID)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } +} diff --git a/modules/apps/rate-limiting/keeper/pending_send_test.go b/modules/apps/rate-limiting/keeper/pending_send_test.go new file mode 100644 index 00000000000..f5066da5030 --- /dev/null +++ b/modules/apps/rate-limiting/keeper/pending_send_test.go @@ -0,0 +1,40 @@ +package keeper_test + +import "fmt" + +func (s *KeeperTestSuite) TestPendingSendPacketPrefix() { + // Store 5 packets across two channels + sendPackets := []string{} + for _, channelID := range []string{"channel-0", "channel-1"} { + for sequence := range uint64(5) { + s.chainA.GetSimApp().RateLimitKeeper.SetPendingSendPacket(s.chainA.GetContext(), channelID, sequence) + sendPackets = append(sendPackets, fmt.Sprintf("%s/%d", channelID, sequence)) + } + } + + // Check that they each sequence number is found + for _, channelID := range []string{"channel-0", "channel-1"} { + for sequence := range uint64(5) { + found := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, sequence) + s.Require().True(found, "send packet should have been found - channel %s, sequence: %d", channelID, sequence) + } + } + + // Check lookup of all sequence numbers + actualSendPackets := s.chainA.GetSimApp().RateLimitKeeper.GetAllPendingSendPackets(s.chainA.GetContext()) + s.Require().Equal(sendPackets, actualSendPackets, "all send packets") + + // Remove 0 sequence numbers and all sequence numbers from channel-0 + s.chainA.GetSimApp().RateLimitKeeper.RemovePendingSendPacket(s.chainA.GetContext(), "channel-0", 0) + s.chainA.GetSimApp().RateLimitKeeper.RemovePendingSendPacket(s.chainA.GetContext(), "channel-1", 0) + s.chainA.GetSimApp().RateLimitKeeper.RemoveAllChannelPendingSendPackets(s.chainA.GetContext(), "channel-0") + + // Check that only the remaining sequences are found + for _, channelID := range []string{"channel-0", "channel-1"} { + for sequence := range uint64(5) { + expected := (channelID == "channel-1") && (sequence != 0) + actual := s.chainA.GetSimApp().RateLimitKeeper.CheckPacketSentDuringCurrentQuota(s.chainA.GetContext(), channelID, sequence) + s.Require().Equal(expected, actual, "send packet after removal - channel: %s, sequence: %d", channelID, sequence) + } + } +} diff --git a/modules/apps/rate-limiting/keeper/rate_limit.go b/modules/apps/rate-limiting/keeper/rate_limit.go new file mode 100644 index 00000000000..79937a564cf --- /dev/null +++ b/modules/apps/rate-limiting/keeper/rate_limit.go @@ -0,0 +1,175 @@ +package keeper + +import ( + sdkmath "cosmossdk.io/math" + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +// Stores/Updates a rate limit object in the store +func (k *Keeper) SetRateLimit(ctx sdk.Context, rateLimit types.RateLimit) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + + rateLimitKey := types.RateLimitItemKey(rateLimit.Path.Denom, rateLimit.Path.ChannelOrClientId) + rateLimitValue := k.cdc.MustMarshal(&rateLimit) + + store.Set(rateLimitKey, rateLimitValue) +} + +// Removes a rate limit object from the store using denom and channel-id +func (k *Keeper) RemoveRateLimit(ctx sdk.Context, denom string, channelID string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + rateLimitKey := types.RateLimitItemKey(denom, channelID) + store.Delete(rateLimitKey) +} + +// Grabs and returns a rate limit object from the store using denom and channel-id +func (k *Keeper) GetRateLimit(ctx sdk.Context, denom string, channelID string) (types.RateLimit, bool) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + + rateLimitKey := types.RateLimitItemKey(denom, channelID) + rateLimitValue := store.Get(rateLimitKey) + + if len(rateLimitValue) == 0 { + return types.RateLimit{}, false + } + + var rateLimit types.RateLimit + k.cdc.MustUnmarshal(rateLimitValue, &rateLimit) + return rateLimit, true +} + +// Returns all rate limits stored +func (k *Keeper) GetAllRateLimits(ctx sdk.Context) []types.RateLimit { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.RateLimitKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allRateLimits := []types.RateLimit{} + for ; iterator.Valid(); iterator.Next() { + rateLimit := types.RateLimit{} + if err := k.cdc.Unmarshal(iterator.Value(), &rateLimit); err != nil { + // Log the error and skip this entry if unmarshalling fails + k.Logger(ctx).Error("failed to unmarshal rate limit", "key", string(iterator.Key()), "error", err) + continue + } + allRateLimits = append(allRateLimits, rateLimit) + } + + return allRateLimits +} + +// Adds a new rate limit. Fails if the rate limit already exists or the channel value is 0 +func (k *Keeper) AddRateLimit(ctx sdk.Context, msg *types.MsgAddRateLimit) error { + channelValue := k.GetChannelValue(ctx, msg.Denom) + if channelValue.IsZero() { + return types.ErrZeroChannelValue + } + + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + if found { + return types.ErrRateLimitAlreadyExists + } + + // Confirm the channel or client exists + _, found = k.channelKeeper.GetChannel(ctx, transfertypes.PortID, msg.ChannelOrClientId) + if !found { + // Check if the channelId is actually a clientId + status := k.clientKeeper.GetClientStatus(ctx, msg.ChannelOrClientId) + // If the status is Unauthorized or Unknown, it means the client doesn't exist or is invalid + if status == ibcexported.Unknown || status == ibcexported.Unauthorized { + // Return specific error indicating neither channel nor client was found + return types.ErrChannelNotFound + } + // If status is Active, Expired, or Frozen, the client exists, proceed. + } + + // Create and store the rate limit object + path := types.Path{ + Denom: msg.Denom, + ChannelOrClientId: msg.ChannelOrClientId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: channelValue, + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Updates an existing rate limit. Fails if the rate limit doesn't exist +func (k *Keeper) UpdateRateLimit(ctx sdk.Context, msg *types.MsgUpdateRateLimit) error { + _, found := k.GetRateLimit(ctx, msg.Denom, msg.ChannelOrClientId) + if !found { + return types.ErrRateLimitNotFound + } + + // Update the rate limit object with the new quota information + // The flow should also get reset to 0 + path := types.Path{ + Denom: msg.Denom, + ChannelOrClientId: msg.ChannelOrClientId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, msg.Denom), + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Reset the rate limit after expiration +// The inflow and outflow should get reset to 0, the channelValue should be updated, +// and all pending send packet sequence numbers should be removed +func (k *Keeper) ResetRateLimit(ctx sdk.Context, denom string, channelID string) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelID) + if !found { + return types.ErrRateLimitNotFound + } + + flow := types.Flow{ + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + rateLimit.Flow = &flow + + k.SetRateLimit(ctx, rateLimit) + k.RemoveAllChannelPendingSendPackets(ctx, channelID) + return nil +} diff --git a/modules/apps/rate-limiting/keeper/rate_limit_test.go b/modules/apps/rate-limiting/keeper/rate_limit_test.go new file mode 100644 index 00000000000..cc5bd6379fa --- /dev/null +++ b/modules/apps/rate-limiting/keeper/rate_limit_test.go @@ -0,0 +1,131 @@ +package keeper_test + +import ( + "strconv" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +const ( + denom = "denom" + channelID = "channel-0" + sender = "sender" + receiver = "receiver" +) + +// Helper function to create 5 rate limit objects with various attributes +func (s *KeeperTestSuite) createRateLimits() []types.RateLimit { + rateLimits := []types.RateLimit{} + for i := 1; i <= 5; i++ { + suffix := strconv.Itoa(i) + rateLimit := types.RateLimit{ + Path: &types.Path{Denom: "denom-" + suffix, ChannelOrClientId: "channel-" + suffix}, + Flow: &types.Flow{Inflow: sdkmath.NewInt(10), Outflow: sdkmath.NewInt(10)}, + } + + rateLimits = append(rateLimits, rateLimit) + s.chainA.GetSimApp().RateLimitKeeper.SetRateLimit(s.chainA.GetContext(), rateLimit) + } + return rateLimits +} + +func (s *KeeperTestSuite) TestGetRateLimit() { + rateLimits := s.createRateLimits() + + expectedRateLimit := rateLimits[0] + denom := expectedRateLimit.Path.Denom + channelID := expectedRateLimit.Path.ChannelOrClientId + + actualRateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denom, channelID) + s.Require().True(found, "element should have been found, but was not") + s.Require().Equal(expectedRateLimit, actualRateLimit) +} + +func (s *KeeperTestSuite) TestRemoveRateLimit() { + rateLimits := s.createRateLimits() + + rateLimitToRemove := rateLimits[0] + denomToRemove := rateLimitToRemove.Path.Denom + channelIDToRemove := rateLimitToRemove.Path.ChannelOrClientId + + s.chainA.GetSimApp().RateLimitKeeper.RemoveRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + s.Require().False(found, "the removed element should not have been found, but it was") +} + +func (s *KeeperTestSuite) TestResetRateLimit() { + rateLimits := s.createRateLimits() + + rateLimitToReset := rateLimits[0] + denomToRemove := rateLimitToReset.Path.Denom + channelIDToRemove := rateLimitToReset.Path.ChannelOrClientId + + err := s.chainA.GetSimApp().RateLimitKeeper.ResetRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + s.Require().NoError(err) + + rateLimit, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), denomToRemove, channelIDToRemove) + s.Require().True(found, "element should have been found, but was not") + s.Require().Zero(rateLimit.Flow.Inflow.Int64(), "Inflow should have been reset to 0") + s.Require().Zero(rateLimit.Flow.Outflow.Int64(), "Outflow should have been reset to 0") +} + +func (s *KeeperTestSuite) TestGetAllRateLimits() { + expectedRateLimits := s.createRateLimits() + actualRateLimits := s.chainA.GetSimApp().RateLimitKeeper.GetAllRateLimits(s.chainA.GetContext()) + s.Require().Len(actualRateLimits, len(expectedRateLimits)) + s.Require().ElementsMatch(expectedRateLimits, actualRateLimits, "all rate limits") +} + +func (s *KeeperTestSuite) TestAddRateLimit_ClientId() { + // Setup client between chain A and chain B + path := ibctesting.NewPath(s.chainA, s.chainB) + s.coordinator.SetupClients(path) + clientID := path.EndpointA.ClientID + + // Mock GetChannelValue to return non-zero + // Note: This might require adjusting the test suite setup if GetChannelValue isn't easily mockable. + // For now, assume it works or the underlying bank keeper has supply. + // A more robust test might involve actually sending tokens. + // Mint some tokens for the denom to ensure channel value is non-zero + mintAmount := sdkmath.NewInt(1000) + mintCoins := sdk.NewCoins(sdk.NewCoin("clientdenom", mintAmount)) + // Revert: Mint back to the transfer module account + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), transfertypes.ModuleName, mintCoins) + s.Require().NoError(err, "minting coins failed") + + msg := &types.MsgAddRateLimit{ + Signer: s.chainA.GetSimApp().RateLimitKeeper.GetAuthority(), + Denom: "clientdenom", + ChannelOrClientId: clientID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + } + + // Add the rate limit using the client ID + err = s.chainA.GetSimApp().RateLimitKeeper.AddRateLimit(s.chainA.GetContext(), msg) + s.Require().NoError(err, "adding rate limit with client ID should succeed") + + // Verify the rate limit was stored correctly + _, found := s.chainA.GetSimApp().RateLimitKeeper.GetRateLimit(s.chainA.GetContext(), msg.Denom, clientID) + s.Require().True(found, "rate limit added with client ID should be found") + + // Test adding with an invalid ID (neither channel nor client) + invalidID := "invalid-id" + msgInvalid := &types.MsgAddRateLimit{ + Signer: s.chainA.GetSimApp().RateLimitKeeper.GetAuthority(), + Denom: "clientdenom", + ChannelOrClientId: invalidID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + } + err = s.chainA.GetSimApp().RateLimitKeeper.AddRateLimit(s.chainA.GetContext(), msgInvalid) + s.Require().ErrorIs(err, types.ErrChannelNotFound, "adding rate limit with invalid ID should fail") +} diff --git a/modules/apps/rate-limiting/keeper/whitelist.go b/modules/apps/rate-limiting/keeper/whitelist.go new file mode 100644 index 00000000000..28fc4487e2c --- /dev/null +++ b/modules/apps/rate-limiting/keeper/whitelist.go @@ -0,0 +1,58 @@ +package keeper + +import ( + "cosmossdk.io/store/prefix" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +// Adds an pair of sender and receiver addresses to the whitelist to allow all +// IBC transfers between those addresses to skip all flow calculations +func (k *Keeper) SetWhitelistedAddressPair(ctx sdk.Context, whitelist types.WhitelistedAddressPair) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + key := types.AddressWhitelistKey(whitelist.Sender, whitelist.Receiver) + value := k.cdc.MustMarshal(&whitelist) + store.Set(key, value) +} + +// Removes a whitelisted address pair so that it's transfers are counted in the quota +func (k *Keeper) RemoveWhitelistedAddressPair(ctx sdk.Context, sender, receiver string) { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + key := types.AddressWhitelistKey(sender, receiver) + store.Delete(key) +} + +// Check if a sender/receiver address pair is currently whitelisted +func (k *Keeper) IsAddressPairWhitelisted(ctx sdk.Context, sender, receiver string) bool { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + + key := types.AddressWhitelistKey(sender, receiver) + value := store.Get(key) + found := len(value) != 0 + + return found +} + +// Get all the whitelisted addresses +func (k *Keeper) GetAllWhitelistedAddressPairs(ctx sdk.Context) []types.WhitelistedAddressPair { + adapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) + store := prefix.NewStore(adapter, types.AddressWhitelistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allWhitelistedAddresses := []types.WhitelistedAddressPair{} + for ; iterator.Valid(); iterator.Next() { + whitelist := types.WhitelistedAddressPair{} + k.cdc.MustUnmarshal(iterator.Value(), &whitelist) + allWhitelistedAddresses = append(allWhitelistedAddresses, whitelist) + } + + return allWhitelistedAddresses +} diff --git a/modules/apps/rate-limiting/keeper/whitelist_test.go b/modules/apps/rate-limiting/keeper/whitelist_test.go new file mode 100644 index 00000000000..7bf24b0a2be --- /dev/null +++ b/modules/apps/rate-limiting/keeper/whitelist_test.go @@ -0,0 +1,54 @@ +package keeper_test + +import ( + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func (s *KeeperTestSuite) TestAddressWhitelist() { + // Store addresses in whitelist + expectedWhitelist := []types.WhitelistedAddressPair{ + {Sender: "sender-1", Receiver: "receiver-1"}, + {Sender: "sender-2", Receiver: "receiver-2"}, + {Sender: "sender-3", Receiver: "receiver-3"}, + } + for _, addressPair := range expectedWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.SetWhitelistedAddressPair(s.chainA.GetContext(), addressPair) + } + + // Confirm that each was found + for _, addressPair := range expectedWhitelist { + found := s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), addressPair.Sender, addressPair.Receiver) + s.Require().True(found, "address pair should have been whitelisted (%s/%s)", + addressPair.Sender, addressPair.Receiver) + } + + // Confirm that looking both the sender and receiver must match for the pair to be whitelisted + for _, addressPair := range expectedWhitelist { + found := s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), addressPair.Sender, "fake-receiver") + s.Require().False(found, "address pair should not have been whitelisted (%s/%s)", + addressPair.Sender, "fake-receiver") + + found = s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), "fake-sender", addressPair.Receiver) + s.Require().False(found, "address pair should not have been whitelisted (%s/%s)", + "fake-sender", addressPair.Receiver) + } + + // Check GetAll + actualWhitelist := s.chainA.GetSimApp().RateLimitKeeper.GetAllWhitelistedAddressPairs(s.chainA.GetContext()) + s.Require().Equal(expectedWhitelist, actualWhitelist, "whitelist get all") + + // Finally, remove each from whitelist + for _, addressPair := range expectedWhitelist { + s.chainA.GetSimApp().RateLimitKeeper.RemoveWhitelistedAddressPair(s.chainA.GetContext(), addressPair.Sender, addressPair.Receiver) + } + + // Confirm there are no longer any whitelisted pairs + actualWhitelist = s.chainA.GetSimApp().RateLimitKeeper.GetAllWhitelistedAddressPairs(s.chainA.GetContext()) + s.Require().Empty(actualWhitelist, "whitelist should have been cleared") + + for _, addressPair := range expectedWhitelist { + found := s.chainA.GetSimApp().RateLimitKeeper.IsAddressPairWhitelisted(s.chainA.GetContext(), addressPair.Sender, addressPair.Receiver) + s.Require().False(found, "address pair should no longer be whitelisted (%s/%s)", + addressPair.Sender, addressPair.Receiver) + } +} diff --git a/modules/apps/rate-limiting/module.go b/modules/apps/rate-limiting/module.go new file mode 100644 index 00000000000..b3ddafb7865 --- /dev/null +++ b/modules/apps/rate-limiting/module.go @@ -0,0 +1,137 @@ +package ratelimiting + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + "cosmossdk.io/core/appmodule" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/client/cli" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +var ( + _ module.AppModule = (*AppModule)(nil) + _ module.AppModuleBasic = (*AppModuleBasic)(nil) + _ module.HasGenesis = (*AppModule)(nil) + _ module.HasName = (*AppModule)(nil) + _ module.HasConsensusVersion = (*AppModule)(nil) + _ module.HasServices = (*AppModule)(nil) + _ appmodule.AppModule = (*AppModule)(nil) + _ appmodule.HasBeginBlocker = (*AppModule)(nil) + + // Note: IBCMiddleware implements porttypes.Middleware and porttypes.ICS4Wrapper +) + +// AppModuleBasic is the rate-limiting AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// IsOnePerModuleType implements the depinject.OnePerModuleType interface. +func (AppModule) IsOnePerModuleType() {} + +// IsAppModule implements the appmodule.AppModule interface. +func (AppModule) IsAppModule() {} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) { + types.RegisterInterfaces(registry) +} + +// DefaultGenesis returns default genesis state as raw bytes for the rate-limiting +// module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis performs genesis state validation for the rate-limiting module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + var gs types.GenesisState + if err := cdc.UnmarshalJSON(bz, &gs); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return gs.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the rate-limiting module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper *keeper.Keeper +} + +// NewAppModule creates a new rate-limiting module +func NewAppModule(k *keeper.Keeper) AppModule { + return AppModule{ + keeper: k, + } +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) // Use the msgServer implementation + types.RegisterQueryServer(cfg.QueryServer(), keeper.NewQuerier(am.keeper)) +} + +// InitGenesis performs genesis initialization for the rate-limiting module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) +} + +// ExportGenesis returns the exported genesis state as raw bytes for the rate-limiting +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion defining the current version of rate-limiting. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx context.Context) error { + sdkCtx := sdk.UnwrapSDKContext(ctx) + am.keeper.BeginBlocker(sdkCtx) + // we do not want to raise an error in block processing if rate limit reset fails + return nil +} diff --git a/modules/apps/rate-limiting/types/codec.go b/modules/apps/rate-limiting/types/codec.go new file mode 100644 index 00000000000..7f9d7959e9a --- /dev/null +++ b/modules/apps/rate-limiting/types/codec.go @@ -0,0 +1,34 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +// RegisterLegacyAminoCodec registers the necessary rate-limiting interfaces and concrete types +// on the provided LegacyAmino codec. These types are used for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgAddRateLimit{}, "ratelimit/MsgAddRateLimit", nil) + cdc.RegisterConcrete(&MsgUpdateRateLimit{}, "ratelimit/MsgUpdateRateLimit", nil) + cdc.RegisterConcrete(&MsgRemoveRateLimit{}, "ratelimit/MsgRemoveRateLimit", nil) + cdc.RegisterConcrete(&MsgResetRateLimit{}, "ratelimit/MsgResetRateLimit", nil) +} + +// RegisterInterfaces registers the rate-limiting interfaces types with the interface registry +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgAddRateLimit{}, + &MsgUpdateRateLimit{}, + &MsgRemoveRateLimit{}, + &MsgResetRateLimit{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +// ModuleCdc references the global rate-limiting module codec. Note, the codec should +// ONLY be used in certain instances of tests and for JSON encoding. +var ModuleCdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) diff --git a/modules/apps/rate-limiting/types/codec_test.go b/modules/apps/rate-limiting/types/codec_test.go new file mode 100644 index 00000000000..9fefd780e59 --- /dev/null +++ b/modules/apps/rate-limiting/types/codec_test.go @@ -0,0 +1,62 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestCodecTypeRegistration(t *testing.T) { + testCases := []struct { + name string + typeURL string + errMsg string + }{ + { + "success: MsgAddRateLimit", + sdk.MsgTypeURL(&types.MsgAddRateLimit{}), + "", + }, + { + "success: MsgUpdateRateLimit", + sdk.MsgTypeURL(&types.MsgUpdateRateLimit{}), + "", + }, + { + "success: MsgRemoveRateLimit", + sdk.MsgTypeURL(&types.MsgRemoveRateLimit{}), + "", + }, + { + "success: MsgResetRateLimit", + sdk.MsgTypeURL(&types.MsgResetRateLimit{}), + "", + }, + { + "type not registered on codec", + "ibc.invalid.MsgTypeURL", + "unable to resolve type URL", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + encodingCfg := moduletestutil.MakeTestEncodingConfig(ratelimiting.AppModuleBasic{}) + msg, err := encodingCfg.Codec.InterfaceRegistry().Resolve(tc.typeURL) + + if tc.errMsg == "" { + require.NotNil(t, msg) + require.NoError(t, err) + } else { + require.Nil(t, msg) + require.ErrorContains(t, err, tc.errMsg) + } + }) + } +} diff --git a/modules/apps/rate-limiting/types/errors.go b/modules/apps/rate-limiting/types/errors.go new file mode 100644 index 00000000000..bd2f1f1c4f3 --- /dev/null +++ b/modules/apps/rate-limiting/types/errors.go @@ -0,0 +1,19 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +var ( + ErrRateLimitAlreadyExists = errorsmod.Register(ModuleName, 1, "ratelimit key duplicated") + ErrRateLimitNotFound = errorsmod.Register(ModuleName, 2, "rate limit not found") + ErrZeroChannelValue = errorsmod.Register(ModuleName, 3, "channel value is zero") + ErrQuotaExceeded = errorsmod.Register(ModuleName, 4, "quota exceeded") + ErrInvalidClientState = errorsmod.Register(ModuleName, 5, "unable to determine client state from channelId") + ErrChannelNotFound = errorsmod.Register(ModuleName, 6, "channel does not exist") + ErrDenomIsBlacklisted = errorsmod.Register(ModuleName, 7, "denom is blacklisted") + ErrUnsupportedAttribute = errorsmod.Register(ModuleName, 8, "unsupported attribute") + ErrEpochNotFound = errorsmod.Register(ModuleName, 9, "hour epoch not found in store") + ErrUnmarshalEpoch = errorsmod.Register(ModuleName, 10, "could not unmarshal epochBz") + ErrInvalidEpoce = errorsmod.Register(ModuleName, 11, "invalid hour epoch") +) diff --git a/modules/apps/rate-limiting/types/events.go b/modules/apps/rate-limiting/types/events.go new file mode 100644 index 00000000000..1bf6f59583e --- /dev/null +++ b/modules/apps/rate-limiting/types/events.go @@ -0,0 +1,16 @@ +package types + +var ( + EventTransferDenied = "transfer_denied" + + EventRateLimitExceeded = "rate_limit_exceeded" + EventBlacklistedDenom = "blacklisted_denom" + + AttributeKeyReason = "reason" + AttributeKeyModule = "module" + AttributeKeyAction = "action" + AttributeKeyDenom = "denom" + AttributeKeyChannelOrClient = "channel_or_client" + AttributeKeyAmount = "amount" + AttributeKeyError = "error" +) diff --git a/modules/apps/rate-limiting/types/expected_keepers.go b/modules/apps/rate-limiting/types/expected_keepers.go new file mode 100644 index 00000000000..916638db9b4 --- /dev/null +++ b/modules/apps/rate-limiting/types/expected_keepers.go @@ -0,0 +1,30 @@ +package types + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + "github.com/cosmos/ibc-go/v10/modules/core/exported" +) + +// BankKeeper defines the expected bank keeper +type BankKeeper interface { + GetSupply(ctx context.Context, denom string) sdk.Coin +} + +// ChannelKeeper defines the expected IBC channel keeper +type ChannelKeeper interface { + porttypes.ICS4Wrapper + GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) + GetChannelClientState(ctx sdk.Context, portID, channelID string) (clientID string, clientState exported.ClientState, err error) + GetNextSequenceSend(ctx sdk.Context, sourcePort, sourceChannel string) (uint64, bool) +} + +// ClientKeeper defines the expected IBC client keeper +type ClientKeeper interface { + GetClientState(ctx sdk.Context, clientID string) (exported.ClientState, bool) + GetClientStatus(ctx sdk.Context, clientID string) exported.Status +} diff --git a/modules/apps/rate-limiting/types/flow.go b/modules/apps/rate-limiting/types/flow.go new file mode 100644 index 00000000000..916eed11d6b --- /dev/null +++ b/modules/apps/rate-limiting/types/flow.go @@ -0,0 +1,43 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" +) + +// Initializes a new flow from the channel value +func NewFlow(channelValue sdkmath.Int) Flow { + flow := Flow{ + ChannelValue: channelValue, + Inflow: sdkmath.ZeroInt(), + Outflow: sdkmath.ZeroInt(), + } + + return flow +} + +// Adds an amount to the rate limit's flow after an incoming packet was received +// Returns an error if the new inflow will cause the rate limit to exceed its quota +func (f *Flow) AddInflow(amount sdkmath.Int, quota Quota) error { + netInflow := f.Inflow.Sub(f.Outflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_RECV, netInflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, "Inflow exceeds quota - Net Inflow: %v, Channel Value: %v, Threshold: %v%%", netInflow, f.ChannelValue, quota.MaxPercentRecv) + } + + f.Inflow = f.Inflow.Add(amount) + return nil +} + +// Adds an amount to the rate limit's flow after a packet was sent +// Returns an error if the new outflow will cause the rate limit to exceed its quota +func (f *Flow) AddOutflow(amount sdkmath.Int, quota Quota) error { + netOutflow := f.Outflow.Sub(f.Inflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_SEND, netOutflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, "Outflow exceeds quota - Net Outflow: %v, Channel Value: %v, Threshold: %v%%", netOutflow, f.ChannelValue, quota.MaxPercentSend) + } + + f.Outflow = f.Outflow.Add(amount) + return nil +} diff --git a/modules/apps/rate-limiting/types/genesis.go b/modules/apps/rate-limiting/types/genesis.go new file mode 100644 index 00000000000..6d6840d52f3 --- /dev/null +++ b/modules/apps/rate-limiting/types/genesis.go @@ -0,0 +1,70 @@ +package types + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + + errorsmod "cosmossdk.io/errors" +) + +// Splits a pending send packet of the form {channelId}/{sequenceNumber} into the channel Id +// and sequence number respectively +func ParsePendingPacketID(pendingPacketID string) (string, uint64, error) { + splits := strings.Split(pendingPacketID, "/") + if len(splits) != 2 { + return "", 0, fmt.Errorf("invalid pending send packet (%s), must be of form: {channelId}/{sequenceNumber}", pendingPacketID) + } + channelID := splits[0] + sequenceString := splits[1] + + sequence, err := strconv.ParseUint(sequenceString, 10, 64) + if err != nil { + return "", 0, errorsmod.Wrapf(err, "unable to parse sequence number (%s) from pending send packet, %s", sequenceString, err) + } + + return channelID, sequence, nil +} + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + RateLimits: []RateLimit{}, + WhitelistedAddressPairs: []WhitelistedAddressPair{}, + BlacklistedDenoms: make([]string, 0), + PendingSendPacketSequenceNumbers: make([]string, 0), + HourEpoch: HourEpoch{ + EpochNumber: 0, + Duration: time.Hour, + }, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func (gs GenesisState) Validate() error { + for _, pendingPacketID := range gs.PendingSendPacketSequenceNumbers { + if _, _, err := ParsePendingPacketID(pendingPacketID); err != nil { + return err + } + } + + // Verify the epoch hour duration is specified + if gs.HourEpoch.Duration == 0 { + return errors.New("hour epoch duration must be specified") + } + + // If the hour epoch has been initialized already (epoch number != 0), validate and then use it + if gs.HourEpoch.EpochNumber > 0 { + if gs.HourEpoch.EpochStartTime.Equal(time.Time{}) { + return errors.New("if hour epoch number is non-empty, epoch time must be initialized") + } + if gs.HourEpoch.EpochStartHeight == 0 { + return errors.New("if hour epoch number is non-empty, epoch height must be initialized") + } + } + + return nil +} diff --git a/modules/apps/rate-limiting/types/genesis.pb.go b/modules/apps/rate-limiting/types/genesis.pb.go new file mode 100644 index 00000000000..82f9be449d3 --- /dev/null +++ b/modules/apps/rate-limiting/types/genesis.pb.go @@ -0,0 +1,569 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ratelimit module's genesis state. +type GenesisState struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` + WhitelistedAddressPairs []WhitelistedAddressPair `protobuf:"bytes,2,rep,name=whitelisted_address_pairs,json=whitelistedAddressPairs,proto3" json:"whitelisted_address_pairs"` + BlacklistedDenoms []string `protobuf:"bytes,3,rep,name=blacklisted_denoms,json=blacklistedDenoms,proto3" json:"blacklisted_denoms,omitempty"` + PendingSendPacketSequenceNumbers []string `protobuf:"bytes,4,rep,name=pending_send_packet_sequence_numbers,json=pendingSendPacketSequenceNumbers,proto3" json:"pending_send_packet_sequence_numbers,omitempty"` + HourEpoch HourEpoch `protobuf:"bytes,5,opt,name=hour_epoch,json=hourEpoch,proto3" json:"hour_epoch"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_0f0dbc611075e553, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +func (m *GenesisState) GetWhitelistedAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.WhitelistedAddressPairs + } + return nil +} + +func (m *GenesisState) GetBlacklistedDenoms() []string { + if m != nil { + return m.BlacklistedDenoms + } + return nil +} + +func (m *GenesisState) GetPendingSendPacketSequenceNumbers() []string { + if m != nil { + return m.PendingSendPacketSequenceNumbers + } + return nil +} + +func (m *GenesisState) GetHourEpoch() HourEpoch { + if m != nil { + return m.HourEpoch + } + return HourEpoch{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "ibc.applications.rate_limiting.v1.GenesisState") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/genesis.proto", fileDescriptor_0f0dbc611075e553) +} + +var fileDescriptor_0f0dbc611075e553 = []byte{ + // 400 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x3d, 0x6f, 0xd4, 0x30, + 0x18, 0x4e, 0x48, 0x41, 0xaa, 0xcb, 0x42, 0x84, 0x44, 0xe8, 0x10, 0x02, 0x62, 0xe8, 0xc0, 0xc5, + 0x1c, 0x88, 0x81, 0x81, 0x01, 0x04, 0x82, 0x01, 0x55, 0xe5, 0x32, 0x54, 0x62, 0xb1, 0x1c, 0xfb, + 0x55, 0x62, 0x35, 0xb1, 0x8d, 0x5f, 0xe7, 0x2a, 0xc4, 0x9f, 0xe0, 0xb7, 0xf0, 0x2b, 0x3a, 0x76, + 0x64, 0x42, 0xe8, 0xee, 0x8f, 0xa0, 0x7c, 0xd0, 0x0f, 0xa9, 0xd2, 0xdd, 0x66, 0xbf, 0xcf, 0xd7, + 0x6b, 0xeb, 0x21, 0x54, 0x95, 0x82, 0x72, 0x6b, 0x1b, 0x25, 0xb8, 0x57, 0x46, 0x23, 0x75, 0xdc, + 0x03, 0x6b, 0x54, 0xab, 0xbc, 0xd2, 0x15, 0x5d, 0xce, 0x69, 0x05, 0x1a, 0x50, 0x61, 0x6e, 0x9d, + 0xf1, 0x26, 0x7e, 0xac, 0x4a, 0x91, 0x5f, 0x15, 0xe4, 0xd7, 0x04, 0xf9, 0x72, 0xbe, 0x7f, 0xbf, + 0x32, 0x95, 0x19, 0xd8, 0xb4, 0x3f, 0x8d, 0xc2, 0xfd, 0x57, 0x9b, 0x93, 0xae, 0x3b, 0x0d, 0xb2, + 0x27, 0xbf, 0x22, 0x72, 0xf7, 0xe3, 0xb8, 0x41, 0xe1, 0xb9, 0x87, 0xb8, 0x20, 0x7b, 0x97, 0x3c, + 0x4c, 0xc2, 0x2c, 0x3a, 0xd8, 0x7b, 0xf1, 0x2c, 0xdf, 0xb8, 0x56, 0xbe, 0xe0, 0x1e, 0x3e, 0xf7, + 0xf7, 0x77, 0x3b, 0x67, 0x7f, 0x1e, 0x05, 0x0b, 0xe2, 0xfe, 0x0f, 0x30, 0xfe, 0x41, 0x1e, 0x9e, + 0xd6, 0xca, 0x43, 0xa3, 0xd0, 0x83, 0x64, 0x5c, 0x4a, 0x07, 0x88, 0xcc, 0x72, 0xe5, 0x30, 0xb9, + 0x35, 0x44, 0xbc, 0xde, 0x22, 0xe2, 0xf8, 0xd2, 0xe3, 0xed, 0x68, 0x71, 0xc4, 0x95, 0x9b, 0xf2, + 0x1e, 0x9c, 0xde, 0x88, 0x62, 0x3c, 0x23, 0x71, 0xd9, 0x70, 0x71, 0x32, 0x85, 0x4b, 0xd0, 0xa6, + 0xc5, 0x24, 0xca, 0xa2, 0x83, 0xdd, 0xc5, 0xbd, 0x2b, 0xc8, 0xfb, 0x01, 0x88, 0x0f, 0xc9, 0x53, + 0x0b, 0x5a, 0x2a, 0x5d, 0x31, 0x04, 0x2d, 0x99, 0xe5, 0xe2, 0x04, 0x3c, 0x43, 0xf8, 0xd6, 0x81, + 0x16, 0xc0, 0x74, 0xd7, 0x96, 0xe0, 0x30, 0xd9, 0x19, 0x0c, 0xb2, 0x89, 0x5b, 0x80, 0x96, 0x47, + 0x03, 0xb3, 0x98, 0x88, 0x87, 0x23, 0x2f, 0xfe, 0x42, 0x48, 0x6d, 0x3a, 0xc7, 0xc0, 0x1a, 0x51, + 0x27, 0xb7, 0xb3, 0x70, 0xcb, 0xff, 0xfc, 0x64, 0x3a, 0xf7, 0xa1, 0xd7, 0x4c, 0xef, 0xdb, 0xad, + 0x2f, 0x06, 0xc7, 0x67, 0xab, 0x34, 0x3c, 0x5f, 0xa5, 0xe1, 0xdf, 0x55, 0x1a, 0xfe, 0x5c, 0xa7, + 0xc1, 0xf9, 0x3a, 0x0d, 0x7e, 0xaf, 0xd3, 0xe0, 0xeb, 0x9b, 0x4a, 0xf9, 0xba, 0x2b, 0x73, 0x61, + 0x5a, 0x2a, 0x0c, 0xb6, 0x06, 0xfb, 0x06, 0xce, 0x2a, 0x43, 0x97, 0xf3, 0xe7, 0xb4, 0x35, 0xb2, + 0x6b, 0x00, 0xfb, 0x9a, 0x8c, 0xf5, 0x98, 0x5d, 0xd4, 0xc3, 0x7f, 0xb7, 0x80, 0xe5, 0x9d, 0xa1, + 0x14, 0x2f, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x74, 0x8b, 0x54, 0x89, 0xb7, 0x02, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.HourEpoch.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for iNdEx := len(m.PendingSendPacketSequenceNumbers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PendingSendPacketSequenceNumbers[iNdEx]) + copy(dAtA[i:], m.PendingSendPacketSequenceNumbers[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PendingSendPacketSequenceNumbers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.BlacklistedDenoms) > 0 { + for iNdEx := len(m.BlacklistedDenoms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BlacklistedDenoms[iNdEx]) + copy(dAtA[i:], m.BlacklistedDenoms[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.BlacklistedDenoms[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for iNdEx := len(m.WhitelistedAddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.WhitelistedAddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for _, e := range m.WhitelistedAddressPairs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.BlacklistedDenoms) > 0 { + for _, s := range m.BlacklistedDenoms { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for _, s := range m.PendingSendPacketSequenceNumbers { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.HourEpoch.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhitelistedAddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhitelistedAddressPairs = append(m.WhitelistedAddressPairs, WhitelistedAddressPair{}) + if err := m.WhitelistedAddressPairs[len(m.WhitelistedAddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedDenoms", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlacklistedDenoms = append(m.BlacklistedDenoms, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingSendPacketSequenceNumbers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingSendPacketSequenceNumbers = append(m.PendingSendPacketSequenceNumbers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HourEpoch", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HourEpoch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/types/genesis_test.go b/modules/apps/rate-limiting/types/genesis_test.go new file mode 100644 index 00000000000..628cb2962bb --- /dev/null +++ b/modules/apps/rate-limiting/types/genesis_test.go @@ -0,0 +1,97 @@ +package types_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestValidateGenesis(t *testing.T) { + currentHour := 13 + blockTime := time.Date(2024, 1, 1, currentHour, 55, 8, 0, time.UTC) // 13:55:08 + + testCases := []struct { + name string + genesisState types.GenesisState + expectedError string + }{ + { + name: "valid default state", + genesisState: *types.DefaultGenesis(), + }, + { + name: "valid custom state", + genesisState: types.GenesisState{ + WhitelistedAddressPairs: []types.WhitelistedAddressPair{ + {Sender: "senderA", Receiver: "receiverA"}, + {Sender: "senderB", Receiver: "receiverB"}, + }, + BlacklistedDenoms: []string{"denomA", "denomB"}, + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2/3"}, + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartTime: blockTime, + Duration: time.Minute, + EpochStartHeight: 1, + }, + }, + }, + { + name: "invalid packet sequence - wrong delimiter", + genesisState: types.GenesisState{ + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2|3"}, + }, + expectedError: "invalid pending send packet (channel-2|3), must be of form: {channelId}/{sequenceNumber}", + }, + { + name: "invalid packet sequence - invalid sequence", + genesisState: types.GenesisState{ + PendingSendPacketSequenceNumbers: []string{"channel-0/1", "channel-2/X"}, + }, + expectedError: "unable to parse sequence number (X) from pending send packet", + }, + { + name: "invalid hour epoch - no duration", + genesisState: types.GenesisState{ + HourEpoch: types.HourEpoch{}, + }, + expectedError: "hour epoch duration must be specified", + }, + { + name: "invalid hour epoch - no epoch time", + genesisState: types.GenesisState{ + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartHeight: 1, + Duration: time.Minute, + }, + }, + expectedError: "if hour epoch number is non-empty, epoch time must be initialized", + }, + { + name: "invalid hour epoch - no epoch height", + genesisState: types.GenesisState{ + HourEpoch: types.HourEpoch{ + EpochNumber: 1, + EpochStartTime: blockTime, + Duration: time.Minute, + }, + }, + expectedError: "if hour epoch number is non-empty, epoch height must be initialized", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.genesisState.Validate() + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/modules/apps/rate-limiting/types/keys.go b/modules/apps/rate-limiting/types/keys.go new file mode 100644 index 00000000000..1ba017419bd --- /dev/null +++ b/modules/apps/rate-limiting/types/keys.go @@ -0,0 +1,57 @@ +package types + +import ( + "encoding/binary" +) + +const ( + // ModuleName defines the IBC rate-limiting name + ModuleName = "ratelimiting" + + // StoreKey is the store key string for IBC rate-limiting + StoreKey = ModuleName + + // RouterKey is the message route for IBC rate-limiting + RouterKey = ModuleName + + // QuerierRoute is the querier route for IBC rate-limiting + QuerierRoute = ModuleName +) + +func bytes(p string) []byte { + return []byte(p) +} + +var ( + RateLimitKeyPrefix = bytes("rate-limit") + PendingSendPacketPrefix = bytes("pending-send-packet") + DenomBlacklistKeyPrefix = bytes("denom-blacklist") + // TODO: Fix IBCGO-2368 + AddressWhitelistKeyPrefix = bytes("address-blacklist") + HourEpochKey = bytes("hour-epoch") + + PendingSendPacketChannelLength = 16 +) + +// Get the rate limit byte key built from the denom and channelId +func RateLimitItemKey(denom string, channelID string) []byte { + return append(bytes(denom), bytes(channelID)...) +} + +// Get the pending send packet key from the channel ID and sequence number +// The channel ID must be fixed length to allow for extracting the underlying +// values from a key +func PendingSendPacketKey(channelID string, sequenceNumber uint64) []byte { + channelIDBz := make([]byte, PendingSendPacketChannelLength) + copy(channelIDBz, channelID) + + sequenceNumberBz := make([]byte, 8) + binary.BigEndian.PutUint64(sequenceNumberBz, sequenceNumber) + + return append(channelIDBz, sequenceNumberBz...) +} + +// Get the whitelist path key from a sender and receiver address +func AddressWhitelistKey(sender, receiver string) []byte { + return append(bytes(sender), bytes(receiver)...) +} diff --git a/modules/apps/rate-limiting/types/msgs.go b/modules/apps/rate-limiting/types/msgs.go new file mode 100644 index 00000000000..c296690f196 --- /dev/null +++ b/modules/apps/rate-limiting/types/msgs.go @@ -0,0 +1,193 @@ +package types + +import ( + "regexp" + "strings" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" +) + +var ( + _ sdk.Msg = &MsgAddRateLimit{} + _ sdk.Msg = &MsgUpdateRateLimit{} + _ sdk.Msg = &MsgRemoveRateLimit{} + _ sdk.Msg = &MsgResetRateLimit{} +) + +// ---------------------------------------------- +// MsgAddRateLimit +// ---------------------------------------------- + +func NewMsgAddRateLimit(denom, channelOrClientID string, maxPercentSend sdkmath.Int, maxPercentRecv sdkmath.Int, durationHours uint64) *MsgAddRateLimit { + return &MsgAddRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +func (msg *MsgAddRateLimit) ValidateBasic() error { + if strings.TrimSpace(msg.Signer) == "" { + return errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "missing sender address") + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + if msg.MaxPercentSend.GT(sdkmath.NewInt(100)) || msg.MaxPercentSend.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(sdkmath.NewInt(100)) || msg.MaxPercentRecv.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +// ---------------------------------------------- +// MsgUpdateRateLimit +// ---------------------------------------------- + +func NewMsgUpdateRateLimit(denom, channelOrClientID string, maxPercentSend sdkmath.Int, maxPercentRecv sdkmath.Int, durationHours uint64) *MsgUpdateRateLimit { + return &MsgUpdateRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +func (msg *MsgUpdateRateLimit) ValidateBasic() error { + if strings.TrimSpace(msg.Signer) == "" { + return errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "missing sender address") + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + if msg.MaxPercentSend.GT(sdkmath.NewInt(100)) || msg.MaxPercentSend.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(sdkmath.NewInt(100)) || msg.MaxPercentRecv.LT(sdkmath.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +// ---------------------------------------------- +// MsgRemoveRateLimit +// ---------------------------------------------- + +func NewMsgRemoveRateLimit(denom, channelOrClientID string) *MsgRemoveRateLimit { + return &MsgRemoveRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + } +} + +func (msg *MsgRemoveRateLimit) ValidateBasic() error { + if strings.TrimSpace(msg.Signer) == "" { + return errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "missing sender address") + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + return nil +} + +// ---------------------------------------------- +// MsgResetRateLimit +// ---------------------------------------------- + +func NewMsgResetRateLimit(denom, channelOrClientID string) *MsgResetRateLimit { + return &MsgResetRateLimit{ + Denom: denom, + ChannelOrClientId: channelOrClientID, + } +} + +func (msg *MsgResetRateLimit) ValidateBasic() error { + if strings.TrimSpace(msg.Signer) == "" { + return errorsmod.Wrap(sdkerrors.ErrInvalidAddress, "missing sender address") + } + + if msg.Denom == "" { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid denom (%s)", msg.Denom) + } + + matched, err := regexp.MatchString(`^channel-\d+$`, msg.ChannelOrClientId) + if err != nil { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "unable to verify channel-id (%s)", msg.ChannelOrClientId) + } + if !matched && !clienttypes.IsValidClientID(msg.ChannelOrClientId) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, + "invalid channel or client-id (%s), must be of the format 'channel-{N}' or a valid client-id", msg.ChannelOrClientId) + } + + return nil +} diff --git a/modules/apps/rate-limiting/types/msgs_test.go b/modules/apps/rate-limiting/types/msgs_test.go new file mode 100644 index 00000000000..067d2c37994 --- /dev/null +++ b/modules/apps/rate-limiting/types/msgs_test.go @@ -0,0 +1,493 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +type MsgsTestSuite struct { + suite.Suite + + authority string + randomAddress string + validChannelID string + validClientID string +} + +func (s *MsgsTestSuite) SetupTest() { + s.authority = "cosmos10h9stc5v6ntgeygf5xf945njqq5h32r53uquvw" + s.randomAddress = "cosmos10h9stc5v6ntgeygf5xf945njqq5h32r53uquvw" + s.validChannelID = "channel-0" + s.validClientID = "07-tendermint-0" +} + +func TestMsgsTestSuite(t *testing.T) { + suite.Run(t, new(MsgsTestSuite)) +} + +func (s *MsgsTestSuite) TestMsgAddRateLimit() { + testCases := []struct { + name string + msg *types.MsgAddRateLimit + expPass bool + }{ + { + name: "success: valid add msg with channel id", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, + }, + { + name: "success: valid add msg with client id", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validClientID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, + }, + { + name: "success: invalid authority", + msg: &types.MsgAddRateLimit{ + Signer: "invalid", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, // Note: validate basic only checks the signer is not empty, not if it's a valid authority + }, + { + name: "success: empty authority", + msg: &types.MsgAddRateLimit{ + Signer: "", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: denom can't be empty", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: invalid client ID", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "invalid-client-id", + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: invalid channel ID", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "channel", + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: max percent send > 100", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(101), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: max percent recv > 100", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(101), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: send and recv both zero", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.ZeroInt(), + MaxPercentRecv: sdkmath.ZeroInt(), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: duration is zero hours", + msg: &types.MsgAddRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 0, + }, + expPass: false, + }, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func (s *MsgsTestSuite) TestMsgUpdateRateLimit() { + testCases := []struct { + name string + msg *types.MsgUpdateRateLimit + expPass bool + }{ + { + name: "success: valid add msg with channel id", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, + }, + { + name: "success: valid add msg with client id", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validClientID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, + }, + { + name: "success: invalid authority", + msg: &types.MsgUpdateRateLimit{ + Signer: "invalid", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: true, // Note: validate basic only checks the signer is not empty, not if it's a valid authority + }, + { + name: "success: empty authority", + msg: &types.MsgUpdateRateLimit{ + Signer: "", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: denom can't be empty", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: invalid client ID", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "invalid-client-id", + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: invalid channel ID", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "channel", + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: max percent send > 100", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(101), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: max percent recv > 100", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(101), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: send and recv both zero", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.ZeroInt(), + MaxPercentRecv: sdkmath.ZeroInt(), + DurationHours: 24, + }, + expPass: false, + }, + { + name: "failure: duration is zero hours", + msg: &types.MsgUpdateRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + MaxPercentSend: sdkmath.NewInt(10), + MaxPercentRecv: sdkmath.NewInt(10), + DurationHours: 0, + }, + expPass: false, + }, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func (s *MsgsTestSuite) TestMsgRemoveRateLimit() { + testCases := []struct { + name string + msg *types.MsgRemoveRateLimit + expPass bool + }{ + { + name: "success: valid add msg with channel id", + msg: &types.MsgRemoveRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + }, + expPass: true, + }, + { + name: "success: valid add msg with client id", + msg: &types.MsgRemoveRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validClientID, + }, + expPass: true, + }, + { + name: "success: invalid authority", + msg: &types.MsgRemoveRateLimit{ + Signer: "invalid", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + }, + expPass: true, // Note: validate basic only checks the signer is not empty, not if it's a valid authority + }, + { + name: "success: empty authority", + msg: &types.MsgRemoveRateLimit{ + Signer: "", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + }, + expPass: false, + }, + { + name: "failure: denom can't be empty", + msg: &types.MsgRemoveRateLimit{ + Signer: s.authority, + Denom: "", + ChannelOrClientId: s.validChannelID, + }, + expPass: false, + }, + { + name: "failure: invalid client ID", + msg: &types.MsgRemoveRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "invalid-client-id", + }, + expPass: false, + }, + { + name: "failure: invalid channel ID", + msg: &types.MsgRemoveRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "channel", + }, + expPass: false, + }, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} + +func (s *MsgsTestSuite) TestMsgResetRateLimit() { + testCases := []struct { + name string + msg *types.MsgResetRateLimit + expPass bool + }{ + { + name: "success: valid add msg with channel id", + msg: &types.MsgResetRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + }, + expPass: true, + }, + { + name: "success: valid add msg with client id", + msg: &types.MsgResetRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: s.validClientID, + }, + expPass: true, + }, + { + name: "success: invalid authority", + msg: &types.MsgResetRateLimit{ + Signer: "invalid", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + }, + expPass: true, // Note: validate basic only checks the signer is not empty, not if it's a valid authority + }, + { + name: "success: empty authority", + msg: &types.MsgResetRateLimit{ + Signer: "", + Denom: "uatom", + ChannelOrClientId: s.validChannelID, + }, + expPass: false, + }, + { + name: "failure: denom can't be empty", + msg: &types.MsgResetRateLimit{ + Signer: s.authority, + Denom: "", + ChannelOrClientId: s.validChannelID, + }, + expPass: false, + }, + { + name: "failure: invalid client ID", + msg: &types.MsgResetRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "invalid-client-id", + }, + expPass: false, + }, + { + name: "failure: invalid channel ID", + msg: &types.MsgResetRateLimit{ + Signer: s.authority, + Denom: "uatom", + ChannelOrClientId: "channel", + }, + expPass: false, + }, + } + + for i, tc := range testCases { + err := tc.msg.ValidateBasic() + if tc.expPass { + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + } else { + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + } + } +} diff --git a/modules/apps/rate-limiting/types/query.pb.go b/modules/apps/rate-limiting/types/query.pb.go new file mode 100644 index 00000000000..8d91405093a --- /dev/null +++ b/modules/apps/rate-limiting/types/query.pb.go @@ -0,0 +1,2482 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Queries all rate limits +type QueryAllRateLimitsRequest struct { +} + +func (m *QueryAllRateLimitsRequest) Reset() { *m = QueryAllRateLimitsRequest{} } +func (m *QueryAllRateLimitsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsRequest) ProtoMessage() {} +func (*QueryAllRateLimitsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{0} +} +func (m *QueryAllRateLimitsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsRequest.Merge(m, src) +} +func (m *QueryAllRateLimitsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsRequest proto.InternalMessageInfo + +// QueryAllRateLimitsResponse returns all the rate limits stored on the chain. +type QueryAllRateLimitsResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryAllRateLimitsResponse) Reset() { *m = QueryAllRateLimitsResponse{} } +func (m *QueryAllRateLimitsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsResponse) ProtoMessage() {} +func (*QueryAllRateLimitsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{1} +} +func (m *QueryAllRateLimitsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsResponse.Merge(m, src) +} +func (m *QueryAllRateLimitsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsResponse proto.InternalMessageInfo + +func (m *QueryAllRateLimitsResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +// Queries a specific rate limit by channel ID and denom +type QueryRateLimitRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelOrClientId string `protobuf:"bytes,2,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *QueryRateLimitRequest) Reset() { *m = QueryRateLimitRequest{} } +func (m *QueryRateLimitRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitRequest) ProtoMessage() {} +func (*QueryRateLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{2} +} +func (m *QueryRateLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitRequest.Merge(m, src) +} +func (m *QueryRateLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitRequest proto.InternalMessageInfo + +func (m *QueryRateLimitRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryRateLimitRequest) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// QueryRateLimitResponse returns a rate limit by denom and channel_or_client_id combination. +type QueryRateLimitResponse struct { + RateLimit *RateLimit `protobuf:"bytes,1,opt,name=rate_limit,json=rateLimit,proto3" json:"rate_limit,omitempty"` +} + +func (m *QueryRateLimitResponse) Reset() { *m = QueryRateLimitResponse{} } +func (m *QueryRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitResponse) ProtoMessage() {} +func (*QueryRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{3} +} +func (m *QueryRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitResponse.Merge(m, src) +} +func (m *QueryRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitResponse proto.InternalMessageInfo + +func (m *QueryRateLimitResponse) GetRateLimit() *RateLimit { + if m != nil { + return m.RateLimit + } + return nil +} + +// Queries all the rate limits for a given chain +type QueryRateLimitsByChainIDRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryRateLimitsByChainIDRequest) Reset() { *m = QueryRateLimitsByChainIDRequest{} } +func (m *QueryRateLimitsByChainIDRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIDRequest) ProtoMessage() {} +func (*QueryRateLimitsByChainIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{4} +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIDRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChainIDRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIDRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIDRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// QueryRateLimitsByChainIDResponse returns all rate-limits by a chain. +type QueryRateLimitsByChainIDResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChainIDResponse) Reset() { *m = QueryRateLimitsByChainIDResponse{} } +func (m *QueryRateLimitsByChainIDResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIDResponse) ProtoMessage() {} +func (*QueryRateLimitsByChainIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{5} +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIDResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIDResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChainIDResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIDResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIDResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +// Queries all the rate limits for a given channel or client ID +type QueryRateLimitsByChannelOrClientIDRequest struct { + ChannelOrClientId string `protobuf:"bytes,1,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) Reset() { + *m = QueryRateLimitsByChannelOrClientIDRequest{} +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) String() string { + return proto.CompactTextString(m) +} +func (*QueryRateLimitsByChannelOrClientIDRequest) ProtoMessage() {} +func (*QueryRateLimitsByChannelOrClientIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{6} +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelOrClientIDRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelOrClientIDRequest) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// QueryRateLimitsByChannelOrClientIDResponse returns all rate-limits by a channel or client id. +type QueryRateLimitsByChannelOrClientIDResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) Reset() { + *m = QueryRateLimitsByChannelOrClientIDResponse{} +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) String() string { + return proto.CompactTextString(m) +} +func (*QueryRateLimitsByChannelOrClientIDResponse) ProtoMessage() {} +func (*QueryRateLimitsByChannelOrClientIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{7} +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelOrClientIDResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelOrClientIDResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +// Queries all blacklisted denoms +type QueryAllBlacklistedDenomsRequest struct { +} + +func (m *QueryAllBlacklistedDenomsRequest) Reset() { *m = QueryAllBlacklistedDenomsRequest{} } +func (m *QueryAllBlacklistedDenomsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllBlacklistedDenomsRequest) ProtoMessage() {} +func (*QueryAllBlacklistedDenomsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{8} +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllBlacklistedDenomsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllBlacklistedDenomsRequest.Merge(m, src) +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllBlacklistedDenomsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllBlacklistedDenomsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllBlacklistedDenomsRequest proto.InternalMessageInfo + +// QueryAllBlacklistedDenomsResponse returns all the blacklisted denosm. +type QueryAllBlacklistedDenomsResponse struct { + Denoms []string `protobuf:"bytes,1,rep,name=denoms,proto3" json:"denoms,omitempty"` +} + +func (m *QueryAllBlacklistedDenomsResponse) Reset() { *m = QueryAllBlacklistedDenomsResponse{} } +func (m *QueryAllBlacklistedDenomsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllBlacklistedDenomsResponse) ProtoMessage() {} +func (*QueryAllBlacklistedDenomsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{9} +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllBlacklistedDenomsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllBlacklistedDenomsResponse.Merge(m, src) +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllBlacklistedDenomsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllBlacklistedDenomsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllBlacklistedDenomsResponse proto.InternalMessageInfo + +func (m *QueryAllBlacklistedDenomsResponse) GetDenoms() []string { + if m != nil { + return m.Denoms + } + return nil +} + +// Queries all whitelisted address pairs +type QueryAllWhitelistedAddressesRequest struct { +} + +func (m *QueryAllWhitelistedAddressesRequest) Reset() { *m = QueryAllWhitelistedAddressesRequest{} } +func (m *QueryAllWhitelistedAddressesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesRequest) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{10} +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesRequest proto.InternalMessageInfo + +// QueryAllWhitelistedAddressesResponse returns all whitelisted pairs. +type QueryAllWhitelistedAddressesResponse struct { + AddressPairs []WhitelistedAddressPair `protobuf:"bytes,1,rep,name=address_pairs,json=addressPairs,proto3" json:"address_pairs"` +} + +func (m *QueryAllWhitelistedAddressesResponse) Reset() { *m = QueryAllWhitelistedAddressesResponse{} } +func (m *QueryAllWhitelistedAddressesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesResponse) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f55a91bf266ae0f7, []int{11} +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesResponse proto.InternalMessageInfo + +func (m *QueryAllWhitelistedAddressesResponse) GetAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.AddressPairs + } + return nil +} + +func init() { + proto.RegisterType((*QueryAllRateLimitsRequest)(nil), "ibc.applications.rate_limiting.v1.QueryAllRateLimitsRequest") + proto.RegisterType((*QueryAllRateLimitsResponse)(nil), "ibc.applications.rate_limiting.v1.QueryAllRateLimitsResponse") + proto.RegisterType((*QueryRateLimitRequest)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitRequest") + proto.RegisterType((*QueryRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitResponse") + proto.RegisterType((*QueryRateLimitsByChainIDRequest)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChainIDRequest") + proto.RegisterType((*QueryRateLimitsByChainIDResponse)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChainIDResponse") + proto.RegisterType((*QueryRateLimitsByChannelOrClientIDRequest)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChannelOrClientIDRequest") + proto.RegisterType((*QueryRateLimitsByChannelOrClientIDResponse)(nil), "ibc.applications.rate_limiting.v1.QueryRateLimitsByChannelOrClientIDResponse") + proto.RegisterType((*QueryAllBlacklistedDenomsRequest)(nil), "ibc.applications.rate_limiting.v1.QueryAllBlacklistedDenomsRequest") + proto.RegisterType((*QueryAllBlacklistedDenomsResponse)(nil), "ibc.applications.rate_limiting.v1.QueryAllBlacklistedDenomsResponse") + proto.RegisterType((*QueryAllWhitelistedAddressesRequest)(nil), "ibc.applications.rate_limiting.v1.QueryAllWhitelistedAddressesRequest") + proto.RegisterType((*QueryAllWhitelistedAddressesResponse)(nil), "ibc.applications.rate_limiting.v1.QueryAllWhitelistedAddressesResponse") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/query.proto", fileDescriptor_f55a91bf266ae0f7) +} + +var fileDescriptor_f55a91bf266ae0f7 = []byte{ + // 742 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x41, 0x4f, 0x13, 0x4d, + 0x18, 0xee, 0xf2, 0x7d, 0xf0, 0x7d, 0x7d, 0x91, 0x83, 0x63, 0x45, 0x58, 0xb5, 0xc0, 0x2a, 0x11, + 0x8d, 0xed, 0x0a, 0xc6, 0x28, 0x0a, 0x31, 0xb4, 0x04, 0x25, 0x22, 0x62, 0x4d, 0x24, 0x31, 0xc6, + 0x66, 0xba, 0x3b, 0x69, 0x27, 0x6e, 0x77, 0x96, 0x9d, 0x2d, 0xa4, 0x31, 0x1c, 0xf4, 0xcc, 0xc1, + 0xc4, 0x9f, 0xe3, 0x1f, 0xe0, 0x48, 0xe2, 0x85, 0x93, 0x31, 0xc5, 0x93, 0x07, 0x7f, 0x83, 0xd9, + 0xd9, 0xe9, 0x96, 0xc2, 0xb6, 0xb4, 0x45, 0x6e, 0xdd, 0x9d, 0xf7, 0x7d, 0xde, 0xe7, 0x79, 0xf6, + 0x9d, 0x27, 0x85, 0x14, 0x2d, 0x18, 0x3a, 0x76, 0x1c, 0x8b, 0x1a, 0xd8, 0xa3, 0xcc, 0xe6, 0xba, + 0x8b, 0x3d, 0x92, 0xb7, 0x68, 0x99, 0x7a, 0xd4, 0x2e, 0xea, 0x9b, 0xd3, 0xfa, 0x46, 0x85, 0xb8, + 0xd5, 0xb4, 0xe3, 0x32, 0x8f, 0xa1, 0x09, 0x5a, 0x30, 0xd2, 0x87, 0xcb, 0xd3, 0x4d, 0xe5, 0xe9, + 0xcd, 0x69, 0x35, 0x51, 0x64, 0x45, 0x26, 0xaa, 0x75, 0xff, 0x57, 0xd0, 0xa8, 0x5e, 0x29, 0x32, + 0x56, 0xb4, 0x88, 0x8e, 0x1d, 0xaa, 0x63, 0xdb, 0x66, 0x9e, 0x6c, 0x0f, 0x4e, 0xef, 0x9d, 0xcc, + 0xa2, 0x79, 0x8e, 0x68, 0xd3, 0x2e, 0xc3, 0xe8, 0x4b, 0x9f, 0xdc, 0x82, 0x65, 0xe5, 0xb0, 0x47, + 0x56, 0xfc, 0x53, 0x9e, 0x23, 0x1b, 0x15, 0xc2, 0x3d, 0x6d, 0x03, 0xd4, 0xa8, 0x43, 0xee, 0x30, + 0x9b, 0x13, 0xf4, 0x0a, 0x06, 0x1b, 0x88, 0x7c, 0x44, 0x19, 0xff, 0x67, 0x6a, 0x70, 0xe6, 0x76, + 0xfa, 0x44, 0x79, 0xe9, 0x10, 0x2b, 0xf3, 0xef, 0xee, 0xf7, 0xb1, 0x58, 0x0e, 0xdc, 0x10, 0x5c, + 0x7b, 0x07, 0x17, 0xc5, 0xc8, 0xb0, 0x46, 0x72, 0x41, 0x09, 0xe8, 0x37, 0x89, 0xcd, 0xca, 0x23, + 0xca, 0xb8, 0x32, 0x15, 0xcf, 0x05, 0x0f, 0x48, 0x87, 0x84, 0x51, 0xc2, 0xb6, 0x4d, 0xac, 0x3c, + 0x73, 0xf3, 0x86, 0x45, 0x89, 0xed, 0xe5, 0xa9, 0x39, 0xd2, 0x27, 0x8a, 0xce, 0xcb, 0xb3, 0x17, + 0x6e, 0x56, 0x9c, 0x2c, 0x9b, 0x1a, 0x81, 0xe1, 0xa3, 0xf8, 0x52, 0xce, 0x33, 0x80, 0x06, 0x53, + 0x31, 0xa5, 0x4b, 0x35, 0xb9, 0x78, 0xa8, 0x43, 0x9b, 0x83, 0xb1, 0xe6, 0x31, 0x3c, 0x53, 0xcd, + 0x96, 0x30, 0xb5, 0x97, 0x17, 0xeb, 0x82, 0x46, 0xe1, 0x7f, 0xc3, 0x7f, 0xe3, 0xd3, 0x0d, 0x34, + 0xfd, 0x27, 0x9e, 0x97, 0x4d, 0x6d, 0x0b, 0xc6, 0x5b, 0x77, 0x9f, 0xa5, 0xfb, 0x6f, 0xe1, 0x66, + 0xd4, 0xe0, 0x26, 0x0f, 0x43, 0x01, 0xad, 0xbc, 0x57, 0x5a, 0x79, 0xff, 0x51, 0x81, 0x5b, 0x9d, + 0xc0, 0x9f, 0xa5, 0x42, 0x4d, 0x5a, 0xbb, 0x60, 0x59, 0x19, 0x0b, 0x1b, 0xef, 0x2d, 0xca, 0x3d, + 0x62, 0x2e, 0xfa, 0xcb, 0x14, 0xae, 0xfd, 0x23, 0x98, 0x68, 0x53, 0x23, 0xd9, 0x0d, 0xc3, 0x80, + 0x58, 0xc1, 0x80, 0x58, 0x3c, 0x27, 0x9f, 0xb4, 0x49, 0xb8, 0x56, 0x6f, 0x5e, 0x2f, 0x51, 0x8f, + 0x04, 0xcd, 0x0b, 0xa6, 0xe9, 0x12, 0xce, 0x49, 0x38, 0x63, 0x47, 0x81, 0xeb, 0xed, 0xeb, 0xe4, + 0x1c, 0x13, 0x86, 0x70, 0xf0, 0x32, 0xef, 0x60, 0xea, 0xd6, 0x7d, 0x98, 0xed, 0xc0, 0x87, 0xe3, + 0xb8, 0x6b, 0x98, 0xba, 0xd2, 0x94, 0x73, 0xb8, 0xf1, 0x8a, 0xcf, 0xfc, 0x02, 0xe8, 0x17, 0x74, + 0xd0, 0x57, 0x05, 0x86, 0x9a, 0xee, 0x3b, 0x9a, 0xeb, 0x60, 0x54, 0xcb, 0x0c, 0x51, 0xe7, 0x7b, + 0xec, 0x0e, 0xe4, 0x6b, 0xa9, 0x4f, 0xdf, 0x7e, 0x7e, 0xe9, 0xbb, 0x81, 0x26, 0x75, 0x99, 0x6f, + 0x41, 0xae, 0xa5, 0x8e, 0xe6, 0x5a, 0xb0, 0x24, 0x68, 0x5f, 0x81, 0x78, 0x88, 0x82, 0x1e, 0x74, + 0x3a, 0xfb, 0x68, 0xda, 0xa8, 0xb3, 0x3d, 0x74, 0x4a, 0xc6, 0xaf, 0x05, 0xe3, 0x35, 0xb4, 0xda, + 0x09, 0xe3, 0x43, 0xbf, 0x3e, 0x44, 0x5d, 0xa6, 0x6d, 0xbd, 0x50, 0xcd, 0x07, 0x51, 0x57, 0x53, + 0xe0, 0x42, 0x44, 0x20, 0xa0, 0x4c, 0xd7, 0x54, 0x8f, 0x65, 0x91, 0x9a, 0x3d, 0x15, 0x86, 0x14, + 0x9e, 0x11, 0xc2, 0xe7, 0xd0, 0xc3, 0xee, 0x84, 0x73, 0xa1, 0x5c, 0xe4, 0xe0, 0x36, 0xda, 0xe9, + 0x83, 0xab, 0x6d, 0xd3, 0x01, 0xad, 0xf4, 0x48, 0x35, 0x32, 0xc3, 0xd4, 0xe7, 0x7f, 0x09, 0x4d, + 0x5a, 0xb0, 0x2a, 0x2c, 0x78, 0x8a, 0x96, 0x7a, 0xb1, 0xe0, 0xf8, 0xc7, 0xf7, 0xbf, 0x79, 0x22, + 0x2a, 0x85, 0x50, 0xb6, 0x8b, 0x5b, 0xd5, 0x2a, 0xe7, 0xd4, 0xc5, 0xd3, 0x81, 0x48, 0xcd, 0x8f, + 0x85, 0xe6, 0x59, 0x74, 0xbf, 0x23, 0xcd, 0x85, 0x06, 0x4e, 0xb0, 0xd7, 0x1c, 0xfd, 0x56, 0xe0, + 0x52, 0x8b, 0x14, 0x44, 0x4b, 0x5d, 0x50, 0x6c, 0x13, 0xb7, 0xea, 0x93, 0x53, 0xe3, 0xf4, 0xb4, + 0xe4, 0x5b, 0x0d, 0xa8, 0x3c, 0xae, 0x63, 0x65, 0xd6, 0x77, 0x6b, 0x49, 0x65, 0xaf, 0x96, 0x54, + 0x7e, 0xd4, 0x92, 0xca, 0xe7, 0x83, 0x64, 0x6c, 0xef, 0x20, 0x19, 0xdb, 0x3f, 0x48, 0xc6, 0xde, + 0xcc, 0x17, 0xa9, 0x57, 0xaa, 0x14, 0xd2, 0x06, 0x2b, 0xeb, 0x06, 0xe3, 0x65, 0xc6, 0xfd, 0x31, + 0xa9, 0x22, 0xd3, 0x37, 0xa7, 0xef, 0xe8, 0x65, 0x66, 0x56, 0x2c, 0xc2, 0xa3, 0xa6, 0x7a, 0x55, + 0x87, 0xf0, 0xc2, 0x80, 0xf8, 0x4f, 0x77, 0xf7, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0xfa, + 0xfe, 0xd6, 0x92, 0x0a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Queries all rate limits + AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) + // Queries a specific rate limit by channel ID and denom + // Ex: + // - /ratelimit/{channel_or_client_id}/by_denom?denom={denom} + RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) + // Queries all the rate limits for a given chain + RateLimitsByChainID(ctx context.Context, in *QueryRateLimitsByChainIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIDResponse, error) + // Queries all the rate limits for a given channel ID + RateLimitsByChannelOrClientID(ctx context.Context, in *QueryRateLimitsByChannelOrClientIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelOrClientIDResponse, error) + // Queries all blacklisted denoms + AllBlacklistedDenoms(ctx context.Context, in *QueryAllBlacklistedDenomsRequest, opts ...grpc.CallOption) (*QueryAllBlacklistedDenomsResponse, error) + // Queries all whitelisted address pairs + AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) { + out := new(QueryAllRateLimitsResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/AllRateLimits", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) { + out := new(QueryRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/RateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChainID(ctx context.Context, in *QueryRateLimitsByChainIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIDResponse, error) { + out := new(QueryRateLimitsByChainIDResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChainID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChannelOrClientID(ctx context.Context, in *QueryRateLimitsByChannelOrClientIDRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelOrClientIDResponse, error) { + out := new(QueryRateLimitsByChannelOrClientIDResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChannelOrClientID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllBlacklistedDenoms(ctx context.Context, in *QueryAllBlacklistedDenomsRequest, opts ...grpc.CallOption) (*QueryAllBlacklistedDenomsResponse, error) { + out := new(QueryAllBlacklistedDenomsResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/AllBlacklistedDenoms", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) { + out := new(QueryAllWhitelistedAddressesResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Query/AllWhitelistedAddresses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Queries all rate limits + AllRateLimits(context.Context, *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) + // Queries a specific rate limit by channel ID and denom + // Ex: + // - /ratelimit/{channel_or_client_id}/by_denom?denom={denom} + RateLimit(context.Context, *QueryRateLimitRequest) (*QueryRateLimitResponse, error) + // Queries all the rate limits for a given chain + RateLimitsByChainID(context.Context, *QueryRateLimitsByChainIDRequest) (*QueryRateLimitsByChainIDResponse, error) + // Queries all the rate limits for a given channel ID + RateLimitsByChannelOrClientID(context.Context, *QueryRateLimitsByChannelOrClientIDRequest) (*QueryRateLimitsByChannelOrClientIDResponse, error) + // Queries all blacklisted denoms + AllBlacklistedDenoms(context.Context, *QueryAllBlacklistedDenomsRequest) (*QueryAllBlacklistedDenomsResponse, error) + // Queries all whitelisted address pairs + AllWhitelistedAddresses(context.Context, *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllRateLimits(ctx context.Context, req *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllRateLimits not implemented") +} +func (*UnimplementedQueryServer) RateLimit(ctx context.Context, req *QueryRateLimitRequest) (*QueryRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimit not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChainID(ctx context.Context, req *QueryRateLimitsByChainIDRequest) (*QueryRateLimitsByChainIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChainID not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChannelOrClientID(ctx context.Context, req *QueryRateLimitsByChannelOrClientIDRequest) (*QueryRateLimitsByChannelOrClientIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChannelOrClientID not implemented") +} +func (*UnimplementedQueryServer) AllBlacklistedDenoms(ctx context.Context, req *QueryAllBlacklistedDenomsRequest) (*QueryAllBlacklistedDenomsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllBlacklistedDenoms not implemented") +} +func (*UnimplementedQueryServer) AllWhitelistedAddresses(ctx context.Context, req *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllWhitelistedAddresses not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllRateLimitsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllRateLimits(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/AllRateLimits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllRateLimits(ctx, req.(*QueryAllRateLimitsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/RateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimit(ctx, req.(*QueryRateLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChainID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChainIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChainID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChainID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChainID(ctx, req.(*QueryRateLimitsByChainIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChannelOrClientID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChannelOrClientIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChannelOrClientID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/RateLimitsByChannelOrClientID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChannelOrClientID(ctx, req.(*QueryRateLimitsByChannelOrClientIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllBlacklistedDenoms_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllBlacklistedDenomsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllBlacklistedDenoms(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/AllBlacklistedDenoms", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllBlacklistedDenoms(ctx, req.(*QueryAllBlacklistedDenomsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllWhitelistedAddresses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllWhitelistedAddressesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Query/AllWhitelistedAddresses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, req.(*QueryAllWhitelistedAddressesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var Query_serviceDesc = _Query_serviceDesc +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibc.applications.rate_limiting.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllRateLimits", + Handler: _Query_AllRateLimits_Handler, + }, + { + MethodName: "RateLimit", + Handler: _Query_RateLimit_Handler, + }, + { + MethodName: "RateLimitsByChainID", + Handler: _Query_RateLimitsByChainID_Handler, + }, + { + MethodName: "RateLimitsByChannelOrClientID", + Handler: _Query_RateLimitsByChannelOrClientID_Handler, + }, + { + MethodName: "AllBlacklistedDenoms", + Handler: _Query_AllBlacklistedDenoms_Handler, + }, + { + MethodName: "AllWhitelistedAddresses", + Handler: _Query_AllWhitelistedAddresses_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibc/applications/rate_limiting/v1/query.proto", +} + +func (m *QueryAllRateLimitsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllRateLimitsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RateLimit != nil { + { + size, err := m.RateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIDResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIDResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIDResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllBlacklistedDenomsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllBlacklistedDenomsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllBlacklistedDenomsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllBlacklistedDenomsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllBlacklistedDenomsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllBlacklistedDenomsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Denoms) > 0 { + for iNdEx := len(m.Denoms) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Denoms[iNdEx]) + copy(dAtA[i:], m.Denoms[iNdEx]) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denoms[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AddressPairs) > 0 { + for iNdEx := len(m.AddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAllRateLimitsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllRateLimitsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RateLimit != nil { + l = m.RateLimit.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIDResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitsByChannelOrClientIDRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChannelOrClientIDResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllBlacklistedDenomsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllBlacklistedDenomsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Denoms) > 0 { + for _, s := range m.Denoms { + l = len(s) + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllWhitelistedAddressesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllWhitelistedAddressesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AddressPairs) > 0 { + for _, e := range m.AddressPairs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAllRateLimitsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllRateLimitsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RateLimit == nil { + m.RateLimit = &RateLimit{} + } + if err := m.RateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIDResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelOrClientIDRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelOrClientIDResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelOrClientIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllBlacklistedDenomsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllBlacklistedDenomsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllBlacklistedDenomsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denoms", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denoms = append(m.Denoms, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressPairs = append(m.AddressPairs, WhitelistedAddressPair{}) + if err := m.AddressPairs[len(m.AddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/types/query.pb.gw.go b/modules/apps/rate-limiting/types/query.pb.gw.go new file mode 100644 index 00000000000..1d6baa9b321 --- /dev/null +++ b/modules/apps/rate-limiting/types/query.pb.gw.go @@ -0,0 +1,604 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllRateLimits(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_RateLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_or_client_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RateLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RateLimit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChainID_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := client.RateLimitsByChainID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChainID_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := server.RateLimitsByChainID(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChannelOrClientID_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelOrClientIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + msg, err := client.RateLimitsByChannelOrClientID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChannelOrClientID_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelOrClientIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_or_client_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_or_client_id") + } + + protoReq.ChannelOrClientId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_or_client_id", err) + } + + msg, err := server.RateLimitsByChannelOrClientID(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_AllBlacklistedDenoms_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllBlacklistedDenomsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllBlacklistedDenoms(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllBlacklistedDenoms_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllBlacklistedDenomsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllBlacklistedDenoms(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllWhitelistedAddresses(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllWhitelistedAddresses(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllRateLimits_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChainID_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelOrClientID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChannelOrClientID_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelOrClientID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllBlacklistedDenoms_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllBlacklistedDenoms_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllBlacklistedDenoms_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllRateLimits_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChainID_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelOrClientID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChannelOrClientID_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelOrClientID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllBlacklistedDenoms_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllBlacklistedDenoms_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllBlacklistedDenoms_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimits"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "channel_or_client_id", "by_denom"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChainID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "ratelimits", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChannelOrClientID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 1, 0, 4, 1, 5, 6}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "ratelimits", "channel_or_client_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllBlacklistedDenoms_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "blacklisted_denoms"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllWhitelistedAddresses_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"ibc", "apps", "rate-limiting", "v1", "ratelimit", "whitelisted_addresses"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_AllRateLimits_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimit_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChainID_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChannelOrClientID_0 = runtime.ForwardResponseMessage + + forward_Query_AllBlacklistedDenoms_0 = runtime.ForwardResponseMessage + + forward_Query_AllWhitelistedAddresses_0 = runtime.ForwardResponseMessage +) diff --git a/modules/apps/rate-limiting/types/quota.go b/modules/apps/rate-limiting/types/quota.go new file mode 100644 index 00000000000..ac89e1713c4 --- /dev/null +++ b/modules/apps/rate-limiting/types/quota.go @@ -0,0 +1,23 @@ +package types + +import ( + sdkmath "cosmossdk.io/math" +) + +// CheckExceedsQuota checks if new in/out flow is going to reach the max in/out or not +func (q *Quota) CheckExceedsQuota(direction PacketDirection, amount sdkmath.Int, totalValue sdkmath.Int) bool { + // If there's no channel value (this should be almost impossible), it means there is no + // supply of the asset, so we shouldn't prevent inflows/outflows + if totalValue.IsZero() { + return false + } + var threshold sdkmath.Int + if direction == PACKET_RECV { + threshold = totalValue.Mul(q.MaxPercentRecv).Quo(sdkmath.NewInt(100)) + } else { + threshold = totalValue.Mul(q.MaxPercentSend).Quo(sdkmath.NewInt(100)) + } + + // Revert to GT check as in the original reference module + return amount.GT(threshold) +} diff --git a/modules/apps/rate-limiting/types/quota_test.go b/modules/apps/rate-limiting/types/quota_test.go new file mode 100644 index 00000000000..d441b7609b9 --- /dev/null +++ b/modules/apps/rate-limiting/types/quota_test.go @@ -0,0 +1,80 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestCheckExceedsQuota(t *testing.T) { + totalValue := sdkmath.NewInt(100) + amountUnderThreshold := sdkmath.NewInt(5) + amountOverThreshold := sdkmath.NewInt(15) + quota := types.Quota{ + MaxPercentRecv: sdkmath.NewInt(10), + MaxPercentSend: sdkmath.NewInt(10), + DurationHours: uint64(1), + } + + tests := []struct { + name string + direction types.PacketDirection + amount sdkmath.Int + totalValue sdkmath.Int + exceeded bool + }{ + { + name: "inflow exceeded threshold", + direction: types.PACKET_RECV, + amount: amountOverThreshold, + totalValue: totalValue, + exceeded: true, + }, + { + name: "inflow did not exceed threshold", + direction: types.PACKET_RECV, + amount: amountUnderThreshold, + totalValue: totalValue, + exceeded: false, + }, + { + name: "outflow exceeded threshold", + direction: types.PACKET_SEND, + amount: amountOverThreshold, + totalValue: totalValue, + exceeded: true, + }, + { + name: "outflow did not exceed threshold", + direction: types.PACKET_SEND, + amount: amountUnderThreshold, + totalValue: totalValue, + exceeded: false, + }, + { + name: "zero channel value send", + direction: types.PACKET_SEND, + amount: amountOverThreshold, + totalValue: sdkmath.ZeroInt(), + exceeded: false, + }, + { + name: "zero channel value recv", + direction: types.PACKET_RECV, + amount: amountOverThreshold, + totalValue: sdkmath.ZeroInt(), + exceeded: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := quota.CheckExceedsQuota(test.direction, test.amount, test.totalValue) + require.Equal(t, res, test.exceeded, "test: %s", test.name) + }) + } +} diff --git a/modules/apps/rate-limiting/types/rate_limiting.pb.go b/modules/apps/rate-limiting/types/rate_limiting.pb.go new file mode 100644 index 00000000000..b6f43336a86 --- /dev/null +++ b/modules/apps/rate-limiting/types/rate_limiting.pb.go @@ -0,0 +1,1779 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/rate_limiting.proto + +package types + +import ( + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PacketDirection defines whether the transfer packet is being sent from +// this chain or is being received on this chain +type PacketDirection int32 + +const ( + PACKET_SEND PacketDirection = 0 + PACKET_RECV PacketDirection = 1 +) + +var PacketDirection_name = map[int32]string{ + 0: "PACKET_SEND", + 1: "PACKET_RECV", +} + +var PacketDirection_value = map[string]int32{ + "PACKET_SEND": 0, + "PACKET_RECV": 1, +} + +func (x PacketDirection) String() string { + return proto.EnumName(PacketDirection_name, int32(x)) +} + +func (PacketDirection) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{0} +} + +// Path holds the denom and channelID that define the rate limited route +type Path struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelOrClientId string `protobuf:"bytes,2,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{0} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(m, src) +} +func (m *Path) XXX_Size() int { + return m.Size() +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *Path) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// Quota defines the rate limit thresholds for transfer packets +type Quota struct { + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentSend cosmossdk_io_math.Int `protobuf:"bytes,1,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_send"` + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentRecv cosmossdk_io_math.Int `protobuf:"bytes,2,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_recv"` + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + DurationHours uint64 `protobuf:"varint,3,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *Quota) Reset() { *m = Quota{} } +func (m *Quota) String() string { return proto.CompactTextString(m) } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{1} +} +func (m *Quota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quota.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quota) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quota.Merge(m, src) +} +func (m *Quota) XXX_Size() int { + return m.Size() +} +func (m *Quota) XXX_DiscardUnknown() { + xxx_messageInfo_Quota.DiscardUnknown(m) +} + +var xxx_messageInfo_Quota proto.InternalMessageInfo + +func (m *Quota) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +// Flow tracks all the inflows and outflows of a channel. +type Flow struct { + // Inflow defines the total amount of inbound transfers for the given + // rate limit in the current window + Inflow cosmossdk_io_math.Int `protobuf:"bytes,1,opt,name=inflow,proto3,customtype=cosmossdk.io/math.Int" json:"inflow"` + // Outflow defines the total amount of outbound transfers for the given + // rate limit in the current window + Outflow cosmossdk_io_math.Int `protobuf:"bytes,2,opt,name=outflow,proto3,customtype=cosmossdk.io/math.Int" json:"outflow"` + // ChannelValue stores the total supply of the denom at the start of + // the rate limit. This is used as the denominator when checking + // the rate limit threshold + // The ChannelValue is fixed for the duration of the rate limit window + ChannelValue cosmossdk_io_math.Int `protobuf:"bytes,3,opt,name=channel_value,json=channelValue,proto3,customtype=cosmossdk.io/math.Int" json:"channel_value"` +} + +func (m *Flow) Reset() { *m = Flow{} } +func (m *Flow) String() string { return proto.CompactTextString(m) } +func (*Flow) ProtoMessage() {} +func (*Flow) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{2} +} +func (m *Flow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Flow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Flow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Flow.Merge(m, src) +} +func (m *Flow) XXX_Size() int { + return m.Size() +} +func (m *Flow) XXX_DiscardUnknown() { + xxx_messageInfo_Flow.DiscardUnknown(m) +} + +var xxx_messageInfo_Flow proto.InternalMessageInfo + +// RateLimit stores all the context about a given rate limit, including +// the relevant denom and channel, rate limit thresholds, and current +// progress towards the limits +type RateLimit struct { + Path *Path `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Quota *Quota `protobuf:"bytes,2,opt,name=quota,proto3" json:"quota,omitempty"` + Flow *Flow `protobuf:"bytes,3,opt,name=flow,proto3" json:"flow,omitempty"` +} + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (m *RateLimit) String() string { return proto.CompactTextString(m) } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{3} +} +func (m *RateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) +} +func (m *RateLimit) XXX_Size() int { + return m.Size() +} +func (m *RateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimit proto.InternalMessageInfo + +func (m *RateLimit) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +func (m *RateLimit) GetQuota() *Quota { + if m != nil { + return m.Quota + } + return nil +} + +func (m *RateLimit) GetFlow() *Flow { + if m != nil { + return m.Flow + } + return nil +} + +// WhitelistedAddressPair represents a sender-receiver combo that is +// not subject to rate limit restrictions +type WhitelistedAddressPair struct { + Sender string `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` + Receiver string `protobuf:"bytes,2,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *WhitelistedAddressPair) Reset() { *m = WhitelistedAddressPair{} } +func (m *WhitelistedAddressPair) String() string { return proto.CompactTextString(m) } +func (*WhitelistedAddressPair) ProtoMessage() {} +func (*WhitelistedAddressPair) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{4} +} +func (m *WhitelistedAddressPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WhitelistedAddressPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WhitelistedAddressPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WhitelistedAddressPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_WhitelistedAddressPair.Merge(m, src) +} +func (m *WhitelistedAddressPair) XXX_Size() int { + return m.Size() +} +func (m *WhitelistedAddressPair) XXX_DiscardUnknown() { + xxx_messageInfo_WhitelistedAddressPair.DiscardUnknown(m) +} + +var xxx_messageInfo_WhitelistedAddressPair proto.InternalMessageInfo + +func (m *WhitelistedAddressPair) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *WhitelistedAddressPair) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +// HourEpoch is the epoch type. +type HourEpoch struct { + EpochNumber uint64 `protobuf:"varint,1,opt,name=epoch_number,json=epochNumber,proto3" json:"epoch_number,omitempty"` + Duration time.Duration `protobuf:"bytes,2,opt,name=duration,proto3,stdduration" json:"duration,omitempty"` + EpochStartTime time.Time `protobuf:"bytes,3,opt,name=epoch_start_time,json=epochStartTime,proto3,stdtime" json:"epoch_start_time"` + EpochStartHeight int64 `protobuf:"varint,4,opt,name=epoch_start_height,json=epochStartHeight,proto3" json:"epoch_start_height,omitempty"` +} + +func (m *HourEpoch) Reset() { *m = HourEpoch{} } +func (m *HourEpoch) String() string { return proto.CompactTextString(m) } +func (*HourEpoch) ProtoMessage() {} +func (*HourEpoch) Descriptor() ([]byte, []int) { + return fileDescriptor_bf22d2adece00654, []int{5} +} +func (m *HourEpoch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HourEpoch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HourEpoch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HourEpoch) XXX_Merge(src proto.Message) { + xxx_messageInfo_HourEpoch.Merge(m, src) +} +func (m *HourEpoch) XXX_Size() int { + return m.Size() +} +func (m *HourEpoch) XXX_DiscardUnknown() { + xxx_messageInfo_HourEpoch.DiscardUnknown(m) +} + +var xxx_messageInfo_HourEpoch proto.InternalMessageInfo + +func (m *HourEpoch) GetEpochNumber() uint64 { + if m != nil { + return m.EpochNumber + } + return 0 +} + +func (m *HourEpoch) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *HourEpoch) GetEpochStartTime() time.Time { + if m != nil { + return m.EpochStartTime + } + return time.Time{} +} + +func (m *HourEpoch) GetEpochStartHeight() int64 { + if m != nil { + return m.EpochStartHeight + } + return 0 +} + +func init() { + proto.RegisterEnum("ibc.applications.rate_limiting.v1.PacketDirection", PacketDirection_name, PacketDirection_value) + proto.RegisterType((*Path)(nil), "ibc.applications.rate_limiting.v1.Path") + proto.RegisterType((*Quota)(nil), "ibc.applications.rate_limiting.v1.Quota") + proto.RegisterType((*Flow)(nil), "ibc.applications.rate_limiting.v1.Flow") + proto.RegisterType((*RateLimit)(nil), "ibc.applications.rate_limiting.v1.RateLimit") + proto.RegisterType((*WhitelistedAddressPair)(nil), "ibc.applications.rate_limiting.v1.WhitelistedAddressPair") + proto.RegisterType((*HourEpoch)(nil), "ibc.applications.rate_limiting.v1.HourEpoch") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/rate_limiting.proto", fileDescriptor_bf22d2adece00654) +} + +var fileDescriptor_bf22d2adece00654 = []byte{ + // 709 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0xde, 0xc2, 0x82, 0x30, 0xcb, 0x8f, 0x75, 0x82, 0x64, 0xdd, 0xc4, 0x2e, 0x6c, 0x62, 0xdc, + 0x18, 0x68, 0x05, 0x43, 0x8c, 0x31, 0x9a, 0xb0, 0xb0, 0x0a, 0x11, 0x71, 0x2d, 0x08, 0x89, 0x97, + 0x66, 0x76, 0x3a, 0xb4, 0x13, 0xda, 0x4e, 0x9d, 0x4e, 0x17, 0x38, 0x7b, 0xf1, 0xc8, 0xd1, 0xbb, + 0xff, 0x85, 0x27, 0x2f, 0x26, 0x1c, 0x39, 0x1a, 0x0f, 0x68, 0xe0, 0xe6, 0x5f, 0x61, 0x66, 0xda, + 0x22, 0xe0, 0x81, 0xf5, 0x36, 0xf3, 0xde, 0xf7, 0x7d, 0xf3, 0xe6, 0xcd, 0xf7, 0x06, 0x2c, 0xd0, + 0x0e, 0x36, 0x51, 0x14, 0xf9, 0x14, 0x23, 0x41, 0x59, 0x18, 0x9b, 0x1c, 0x09, 0x62, 0xfb, 0x34, + 0xa0, 0x82, 0x86, 0xae, 0xd9, 0x9d, 0xbb, 0x1c, 0x30, 0x22, 0xce, 0x04, 0x83, 0xd3, 0xb4, 0x83, + 0x8d, 0x8b, 0x34, 0xe3, 0x32, 0xaa, 0x3b, 0x57, 0x9d, 0x70, 0x99, 0xcb, 0x14, 0xda, 0x94, 0xab, + 0x94, 0x58, 0xd5, 0x5d, 0xc6, 0x5c, 0x9f, 0x98, 0x6a, 0xd7, 0x49, 0x76, 0x4c, 0x27, 0xe1, 0x4a, + 0x21, 0xcb, 0xd7, 0xae, 0xe6, 0x05, 0x0d, 0x48, 0x2c, 0x50, 0x10, 0xa5, 0x80, 0xfa, 0x2b, 0x50, + 0x6c, 0x23, 0xe1, 0xc1, 0x09, 0x30, 0xe0, 0x90, 0x90, 0x05, 0x15, 0x6d, 0x4a, 0x6b, 0x0c, 0x5b, + 0xe9, 0x06, 0x9a, 0x60, 0x02, 0x7b, 0x28, 0x0c, 0x89, 0x6f, 0x33, 0x6e, 0x63, 0x9f, 0x92, 0x50, + 0xd8, 0xd4, 0xa9, 0xf4, 0x29, 0xd0, 0xcd, 0x2c, 0xf7, 0x9a, 0x2f, 0xa9, 0xcc, 0xaa, 0x53, 0xff, + 0xaa, 0x81, 0x81, 0x37, 0x09, 0x13, 0x08, 0xbe, 0x00, 0xe5, 0x00, 0xed, 0xdb, 0x11, 0xe1, 0x58, + 0x92, 0x62, 0x12, 0x3a, 0xa9, 0x76, 0xf3, 0xce, 0xd1, 0x49, 0xad, 0xf0, 0xe3, 0xa4, 0x76, 0x0b, + 0xb3, 0x38, 0x60, 0x71, 0xec, 0xec, 0x1a, 0x94, 0x99, 0x01, 0x12, 0x9e, 0xb1, 0x1a, 0x0a, 0x6b, + 0x2c, 0x40, 0xfb, 0xed, 0x94, 0xb5, 0x41, 0x42, 0xe7, 0xaa, 0x10, 0x27, 0xb8, 0x9b, 0x9e, 0xff, + 0x1f, 0x42, 0x16, 0xc1, 0x5d, 0x78, 0x17, 0x8c, 0xe5, 0xdd, 0xb1, 0x3d, 0x96, 0xf0, 0xb8, 0xd2, + 0x3f, 0xa5, 0x35, 0x8a, 0xd6, 0x68, 0x1e, 0x5d, 0x91, 0xc1, 0xfa, 0x17, 0x0d, 0x14, 0x9f, 0xfb, + 0x6c, 0x0f, 0x2e, 0x80, 0x41, 0x1a, 0xee, 0xf8, 0x6c, 0xaf, 0xb7, 0xba, 0x33, 0x30, 0x7c, 0x04, + 0x6e, 0xb0, 0x44, 0x28, 0x5e, 0x4f, 0x65, 0xe6, 0x68, 0xd8, 0x04, 0xa3, 0x79, 0xb3, 0xbb, 0xc8, + 0x4f, 0x88, 0x2a, 0xef, 0x5a, 0xfa, 0x48, 0xc6, 0xd9, 0x92, 0x94, 0xfa, 0x37, 0x0d, 0x0c, 0x5b, + 0x48, 0x90, 0x35, 0xe9, 0x1c, 0xf8, 0x04, 0x14, 0x23, 0x24, 0x3c, 0x55, 0x7f, 0x69, 0xfe, 0x9e, + 0x71, 0xad, 0xcb, 0x0c, 0xe9, 0x05, 0x4b, 0x91, 0xe0, 0x33, 0x30, 0xf0, 0x5e, 0xbe, 0xa4, 0xba, + 0x45, 0x69, 0xbe, 0xd1, 0x03, 0x5b, 0xbd, 0xbc, 0x95, 0xd2, 0xe4, 0xe1, 0xaa, 0x09, 0xfd, 0x3d, + 0x1f, 0x2e, 0xbb, 0x6e, 0x29, 0x52, 0x7d, 0x0d, 0x4c, 0x6e, 0x7b, 0x54, 0x10, 0x9f, 0xc6, 0x82, + 0x38, 0x8b, 0x8e, 0xc3, 0x49, 0x1c, 0xb7, 0x11, 0xe5, 0x70, 0x12, 0x0c, 0x4a, 0x2f, 0x11, 0x9e, + 0x39, 0x35, 0xdb, 0xc1, 0x2a, 0x18, 0xe2, 0x04, 0x13, 0xda, 0x25, 0x3c, 0xb3, 0xe7, 0xf9, 0xbe, + 0xfe, 0xa1, 0x0f, 0x0c, 0xcb, 0xc7, 0x6d, 0x45, 0x0c, 0x7b, 0x70, 0x1a, 0x8c, 0x10, 0xb9, 0xb0, + 0xc3, 0x24, 0xe8, 0x64, 0x3a, 0x45, 0xab, 0xa4, 0x62, 0xeb, 0x2a, 0x04, 0xdf, 0x82, 0xa1, 0xdc, + 0x14, 0xd9, 0xf5, 0x6f, 0x1b, 0xe9, 0x24, 0x19, 0xf9, 0x24, 0x19, 0xcb, 0x19, 0xa0, 0xa9, 0xcb, + 0x07, 0xfa, 0x7d, 0x52, 0x83, 0x39, 0x65, 0x86, 0x05, 0x54, 0x90, 0x20, 0x12, 0x07, 0x9f, 0x7e, + 0xd6, 0x34, 0xeb, 0x5c, 0x0a, 0xae, 0x83, 0x72, 0x7a, 0x72, 0x2c, 0x10, 0x17, 0xb6, 0x9c, 0xc5, + 0xac, 0x3d, 0xd5, 0x7f, 0xe4, 0x37, 0xf3, 0x41, 0x6d, 0x0e, 0x49, 0xfd, 0x43, 0xa9, 0x34, 0xa6, + 0xd8, 0x1b, 0x92, 0x2c, 0xd3, 0x70, 0x06, 0xc0, 0x8b, 0x7a, 0x1e, 0xa1, 0xae, 0x27, 0x2a, 0xc5, + 0x29, 0xad, 0xd1, 0x6f, 0x95, 0xff, 0x62, 0x57, 0x54, 0xfc, 0xfe, 0x63, 0x30, 0xde, 0x46, 0x78, + 0x97, 0x88, 0x65, 0xca, 0x09, 0x56, 0x05, 0x8d, 0x83, 0x52, 0x7b, 0x71, 0xe9, 0x65, 0x6b, 0xd3, + 0xde, 0x68, 0xad, 0x2f, 0x97, 0x0b, 0x17, 0x02, 0x56, 0x6b, 0x69, 0xab, 0xac, 0x55, 0x8b, 0x1f, + 0x3f, 0xeb, 0x85, 0xe6, 0xf6, 0xd1, 0xa9, 0xae, 0x1d, 0x9f, 0xea, 0xda, 0xaf, 0x53, 0x5d, 0x3b, + 0x3c, 0xd3, 0x0b, 0xc7, 0x67, 0x7a, 0xe1, 0xfb, 0x99, 0x5e, 0x78, 0xf7, 0xd4, 0xa5, 0xc2, 0x4b, + 0x3a, 0x06, 0x66, 0x81, 0x99, 0x1a, 0xd4, 0xa4, 0x1d, 0x3c, 0xeb, 0x32, 0xb3, 0x3b, 0xf7, 0xc0, + 0x0c, 0x98, 0x93, 0xf8, 0x24, 0x96, 0x3f, 0x62, 0xfa, 0x13, 0xce, 0x9e, 0xff, 0x84, 0xe2, 0x20, + 0x22, 0x71, 0x67, 0x50, 0xdd, 0xf7, 0xe1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x14, 0x0f, + 0x18, 0x38, 0x05, 0x00, 0x00, +} + +func (m *Path) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Path) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Path) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Quota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintRateLimiting(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x18 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Flow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Flow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Flow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.ChannelValue.Size() + i -= size + if _, err := m.ChannelValue.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size := m.Outflow.Size() + i -= size + if _, err := m.Outflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.Inflow.Size() + i -= size + if _, err := m.Inflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flow != nil { + { + size, err := m.Flow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Quota != nil { + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Path != nil { + { + size, err := m.Path.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRateLimiting(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WhitelistedAddressPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WhitelistedAddressPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WhitelistedAddressPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x12 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintRateLimiting(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HourEpoch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HourEpoch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HourEpoch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EpochStartHeight != 0 { + i = encodeVarintRateLimiting(dAtA, i, uint64(m.EpochStartHeight)) + i-- + dAtA[i] = 0x20 + } + n4, err4 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.EpochStartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EpochStartTime):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintRateLimiting(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x1a + n5, err5 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintRateLimiting(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x12 + if m.EpochNumber != 0 { + i = encodeVarintRateLimiting(dAtA, i, uint64(m.EpochNumber)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintRateLimiting(dAtA []byte, offset int, v uint64) int { + offset -= sovRateLimiting(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Path) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + return n +} + +func (m *Quota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.MaxPercentSend.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovRateLimiting(uint64(m.DurationHours)) + } + return n +} + +func (m *Flow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Inflow.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + l = m.Outflow.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + l = m.ChannelValue.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + return n +} + +func (m *RateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + } + if m.Quota != nil { + l = m.Quota.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + } + if m.Flow != nil { + l = m.Flow.Size() + n += 1 + l + sovRateLimiting(uint64(l)) + } + return n +} + +func (m *WhitelistedAddressPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovRateLimiting(uint64(l)) + } + return n +} + +func (m *HourEpoch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EpochNumber != 0 { + n += 1 + sovRateLimiting(uint64(m.EpochNumber)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovRateLimiting(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.EpochStartTime) + n += 1 + l + sovRateLimiting(uint64(l)) + if m.EpochStartHeight != 0 { + n += 1 + sovRateLimiting(uint64(m.EpochStartHeight)) + } + return n +} + +func sovRateLimiting(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRateLimiting(x uint64) (n int) { + return sovRateLimiting(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Path) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Path: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Path: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Flow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Flow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Flow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ChannelValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Path == nil { + m.Path = &Path{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quota == nil { + m.Quota = &Quota{} + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flow == nil { + m.Flow = &Flow{} + } + if err := m.Flow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WhitelistedAddressPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WhitelistedAddressPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WhitelistedAddressPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HourEpoch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HourEpoch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HourEpoch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochNumber", wireType) + } + m.EpochNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochNumber |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRateLimiting + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRateLimiting + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.EpochStartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochStartHeight", wireType) + } + m.EpochStartHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EpochStartHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRateLimiting(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRateLimiting + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRateLimiting(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRateLimiting + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRateLimiting + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRateLimiting + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRateLimiting + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRateLimiting = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRateLimiting = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRateLimiting = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/types/ratelimit.go b/modules/apps/rate-limiting/types/ratelimit.go new file mode 100644 index 00000000000..8b587870d0f --- /dev/null +++ b/modules/apps/rate-limiting/types/ratelimit.go @@ -0,0 +1,19 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (r *RateLimit) UpdateFlow(direction PacketDirection, amount sdkmath.Int) error { + switch direction { + case PACKET_SEND: + return r.Flow.AddOutflow(amount, *r.Quota) + case PACKET_RECV: + return r.Flow.AddInflow(amount, *r.Quota) + default: + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid packet direction (%s)", direction.String()) + } +} diff --git a/modules/apps/rate-limiting/types/ratelimit_test.go b/modules/apps/rate-limiting/types/ratelimit_test.go new file mode 100644 index 00000000000..7cb0465cf89 --- /dev/null +++ b/modules/apps/rate-limiting/types/ratelimit_test.go @@ -0,0 +1,20 @@ +package types_test + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" +) + +func TestToLowerOnPacketDirection(t *testing.T) { + send := types.PACKET_SEND + lower := strings.ToLower(send.String()) + require.Equal(t, "packet_send", lower) + + recv := types.PACKET_RECV + lower = strings.ToLower(recv.String()) + require.Equal(t, "packet_recv", lower) +} diff --git a/modules/apps/rate-limiting/types/tx.pb.go b/modules/apps/rate-limiting/types/tx.pb.go new file mode 100644 index 00000000000..69264c1eb0d --- /dev/null +++ b/modules/apps/rate-limiting/types/tx.pb.go @@ -0,0 +1,2241 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ibc/applications/rate_limiting/v1/tx.proto + +package types + +import ( + context "context" + cosmossdk_io_math "cosmossdk.io/math" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Gov tx to add a new rate limit +type MsgAddRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentSend cosmossdk_io_math.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_send"` + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentRecv cosmossdk_io_math.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_recv"` + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgAddRateLimit) Reset() { *m = MsgAddRateLimit{} } +func (m *MsgAddRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimit) ProtoMessage() {} +func (*MsgAddRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{0} +} +func (m *MsgAddRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimit.Merge(m, src) +} +func (m *MsgAddRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimit proto.InternalMessageInfo + +func (m *MsgAddRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgAddRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgAddRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +func (m *MsgAddRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +// MsgAddRateLimitResponse is the return type for AddRateLimit function. +type MsgAddRateLimitResponse struct { +} + +func (m *MsgAddRateLimitResponse) Reset() { *m = MsgAddRateLimitResponse{} } +func (m *MsgAddRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimitResponse) ProtoMessage() {} +func (*MsgAddRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{1} +} +func (m *MsgAddRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimitResponse.Merge(m, src) +} +func (m *MsgAddRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimitResponse proto.InternalMessageInfo + +// Gov tx to update an existing rate limit +type MsgUpdateRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentSend cosmossdk_io_math.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_send"` + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + MaxPercentRecv cosmossdk_io_math.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=cosmossdk.io/math.Int" json:"max_percent_recv"` + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgUpdateRateLimit) Reset() { *m = MsgUpdateRateLimit{} } +func (m *MsgUpdateRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimit) ProtoMessage() {} +func (*MsgUpdateRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{2} +} +func (m *MsgUpdateRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimit.Merge(m, src) +} +func (m *MsgUpdateRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimit proto.InternalMessageInfo + +func (m *MsgUpdateRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgUpdateRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +// MsgUpdateRateLimitResponse is the return type for UpdateRateLimit. +type MsgUpdateRateLimitResponse struct { +} + +func (m *MsgUpdateRateLimitResponse) Reset() { *m = MsgUpdateRateLimitResponse{} } +func (m *MsgUpdateRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimitResponse) ProtoMessage() {} +func (*MsgUpdateRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{3} +} +func (m *MsgUpdateRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimitResponse.Merge(m, src) +} +func (m *MsgUpdateRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimitResponse proto.InternalMessageInfo + +// Gov tx to remove a rate limit +type MsgRemoveRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *MsgRemoveRateLimit) Reset() { *m = MsgRemoveRateLimit{} } +func (m *MsgRemoveRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimit) ProtoMessage() {} +func (*MsgRemoveRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{4} +} +func (m *MsgRemoveRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimit.Merge(m, src) +} +func (m *MsgRemoveRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimit proto.InternalMessageInfo + +func (m *MsgRemoveRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgRemoveRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgRemoveRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// MsgRemoveRateLimitResponse is the response type for RemoveRateLimit +type MsgRemoveRateLimitResponse struct { +} + +func (m *MsgRemoveRateLimitResponse) Reset() { *m = MsgRemoveRateLimitResponse{} } +func (m *MsgRemoveRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimitResponse) ProtoMessage() {} +func (*MsgRemoveRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{5} +} +func (m *MsgRemoveRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimitResponse.Merge(m, src) +} +func (m *MsgRemoveRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimitResponse proto.InternalMessageInfo + +// Gov tx to reset the flow on a rate limit +type MsgResetRateLimit struct { + // signer defines the x/gov module account address or other authority signing the message + Signer string `protobuf:"bytes,1,opt,name=signer,proto3" json:"signer,omitempty"` + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + // ChannelId for the rate limit, on the side of the rate limited chain + ChannelOrClientId string `protobuf:"bytes,3,opt,name=channel_or_client_id,json=channelOrClientId,proto3" json:"channel_or_client_id,omitempty"` +} + +func (m *MsgResetRateLimit) Reset() { *m = MsgResetRateLimit{} } +func (m *MsgResetRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimit) ProtoMessage() {} +func (*MsgResetRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{6} +} +func (m *MsgResetRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimit.Merge(m, src) +} +func (m *MsgResetRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimit proto.InternalMessageInfo + +func (m *MsgResetRateLimit) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +func (m *MsgResetRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgResetRateLimit) GetChannelOrClientId() string { + if m != nil { + return m.ChannelOrClientId + } + return "" +} + +// MsgResetRateLimitResponse is the response type for ResetRateLimit. +type MsgResetRateLimitResponse struct { +} + +func (m *MsgResetRateLimitResponse) Reset() { *m = MsgResetRateLimitResponse{} } +func (m *MsgResetRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimitResponse) ProtoMessage() {} +func (*MsgResetRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5bbfc0abda512109, []int{7} +} +func (m *MsgResetRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimitResponse.Merge(m, src) +} +func (m *MsgResetRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimitResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgAddRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgAddRateLimit") + proto.RegisterType((*MsgAddRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgAddRateLimitResponse") + proto.RegisterType((*MsgUpdateRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgUpdateRateLimit") + proto.RegisterType((*MsgUpdateRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgUpdateRateLimitResponse") + proto.RegisterType((*MsgRemoveRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgRemoveRateLimit") + proto.RegisterType((*MsgRemoveRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgRemoveRateLimitResponse") + proto.RegisterType((*MsgResetRateLimit)(nil), "ibc.applications.rate_limiting.v1.MsgResetRateLimit") + proto.RegisterType((*MsgResetRateLimitResponse)(nil), "ibc.applications.rate_limiting.v1.MsgResetRateLimitResponse") +} + +func init() { + proto.RegisterFile("ibc/applications/rate_limiting/v1/tx.proto", fileDescriptor_5bbfc0abda512109) +} + +var fileDescriptor_5bbfc0abda512109 = []byte{ + // 627 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xce, 0xf6, 0x17, 0x38, 0x68, 0x6b, 0x97, 0x48, 0x37, 0xdb, 0xba, 0xad, 0x01, 0xa1, 0x46, + 0xbb, 0xd3, 0x56, 0xbd, 0x14, 0x7b, 0x68, 0x3d, 0x68, 0xc1, 0xa2, 0x6c, 0x11, 0xc1, 0xcb, 0xb2, + 0xd9, 0x19, 0x36, 0x83, 0x99, 0x99, 0x65, 0x67, 0xb2, 0xc4, 0x8b, 0x88, 0x08, 0x82, 0x27, 0xff, + 0x0b, 0xaf, 0x39, 0x78, 0x11, 0xff, 0x81, 0x1e, 0x8b, 0x27, 0xf1, 0x50, 0x24, 0x39, 0xe4, 0xe6, + 0xc9, 0x3f, 0x40, 0xf6, 0x47, 0x42, 0x32, 0x41, 0x9a, 0xe6, 0xd4, 0x83, 0x97, 0x90, 0xf9, 0xde, + 0xfb, 0xde, 0x7e, 0x6f, 0xbe, 0x99, 0x79, 0xa0, 0x42, 0xaa, 0x3e, 0xf4, 0xc2, 0xb0, 0x4e, 0x7c, + 0x4f, 0x12, 0xce, 0x04, 0x8c, 0x3c, 0x89, 0xdd, 0x3a, 0xa1, 0x44, 0x12, 0x16, 0xc0, 0x78, 0x0b, + 0xca, 0xa6, 0x1d, 0x46, 0x5c, 0x72, 0xfd, 0x06, 0xa9, 0xfa, 0xf6, 0x60, 0xae, 0x3d, 0x94, 0x6b, + 0xc7, 0x5b, 0xe6, 0xa2, 0x47, 0x09, 0xe3, 0x30, 0xfd, 0xcd, 0x58, 0xe6, 0x92, 0xcf, 0x05, 0xe5, + 0x02, 0x52, 0x91, 0x56, 0xa3, 0x22, 0xc8, 0x03, 0xa5, 0x2c, 0xe0, 0xa6, 0x2b, 0x98, 0x2d, 0xf2, + 0x50, 0x31, 0xe0, 0x01, 0xcf, 0xf0, 0xe4, 0x5f, 0x86, 0x96, 0x7f, 0x4f, 0x81, 0x85, 0x43, 0x11, + 0xec, 0x21, 0xe4, 0x78, 0x12, 0x3f, 0x49, 0x3e, 0xab, 0x6f, 0x82, 0x39, 0x41, 0x02, 0x86, 0x23, + 0x43, 0x5b, 0xd3, 0xd6, 0x2f, 0xed, 0x1b, 0xdf, 0xbf, 0x6c, 0x14, 0xf3, 0x5a, 0x7b, 0x08, 0x45, + 0x58, 0x88, 0x23, 0x19, 0x11, 0x16, 0x38, 0x79, 0x9e, 0x5e, 0x04, 0xb3, 0x08, 0x33, 0x4e, 0x8d, + 0xa9, 0x84, 0xe0, 0x64, 0x0b, 0x1d, 0x82, 0xa2, 0x5f, 0xf3, 0x18, 0xc3, 0x75, 0x97, 0x47, 0xae, + 0x5f, 0x27, 0x98, 0x49, 0x97, 0x20, 0x63, 0x3a, 0x4d, 0x5a, 0xcc, 0x63, 0x4f, 0xa3, 0x87, 0x69, + 0xe4, 0x00, 0xe9, 0x8f, 0xc0, 0x55, 0xea, 0x35, 0xdd, 0x10, 0x47, 0x7e, 0x92, 0x2a, 0x30, 0x43, + 0xc6, 0x4c, 0x2a, 0xe1, 0xfa, 0xf1, 0xe9, 0x6a, 0xe1, 0xe7, 0xe9, 0xea, 0xb5, 0x4c, 0x86, 0x40, + 0xaf, 0x6c, 0xc2, 0x21, 0xf5, 0x64, 0xcd, 0x3e, 0x60, 0xd2, 0x99, 0xa7, 0x5e, 0xf3, 0x59, 0xc6, + 0x3a, 0xc2, 0x6c, 0xa4, 0x50, 0x84, 0xfd, 0xd8, 0x98, 0x3d, 0x67, 0x21, 0x07, 0xfb, 0xb1, 0x7e, + 0x13, 0xcc, 0xa3, 0x46, 0x94, 0x3a, 0xe3, 0xd6, 0x78, 0x23, 0x12, 0xc6, 0xdc, 0x9a, 0xb6, 0x3e, + 0xe3, 0x5c, 0xe9, 0xa1, 0x8f, 0x13, 0x70, 0xe7, 0xd6, 0xbb, 0x6e, 0xab, 0x92, 0x6f, 0xc6, 0xc7, + 0x6e, 0xab, 0x52, 0x4a, 0x4c, 0x4c, 0x3d, 0x84, 0xca, 0xe6, 0x96, 0x4b, 0x60, 0x49, 0x81, 0x1c, + 0x2c, 0x42, 0xce, 0x04, 0x2e, 0xff, 0x99, 0x02, 0xfa, 0xa1, 0x08, 0x9e, 0x87, 0xc8, 0x93, 0xf8, + 0xbf, 0x1d, 0x93, 0xdb, 0x71, 0x47, 0xb1, 0x63, 0x65, 0xc8, 0x0e, 0x65, 0x7f, 0xcb, 0x2b, 0xc0, + 0x1c, 0x45, 0xfb, 0xa6, 0x7c, 0xd3, 0x52, 0x53, 0x1c, 0x4c, 0x79, 0x7c, 0x01, 0x4c, 0x39, 0xa3, + 0x37, 0x45, 0x66, 0xde, 0x9b, 0x82, 0xf6, 0x7b, 0xfb, 0xaa, 0x81, 0xc5, 0x34, 0x2c, 0xb0, 0xbc, + 0x00, 0xad, 0xdd, 0x56, 0x5a, 0x5b, 0x56, 0x5a, 0x1b, 0x54, 0x59, 0x5e, 0x06, 0xa5, 0x11, 0xb0, + 0xd7, 0xd8, 0xf6, 0xe7, 0x19, 0x30, 0x7d, 0x28, 0x02, 0xfd, 0x0d, 0xb8, 0x3c, 0xf4, 0xb2, 0x6d, + 0xdb, 0x67, 0x3e, 0xb7, 0xb6, 0x72, 0x3b, 0xcd, 0x9d, 0xf3, 0x73, 0x7a, 0x3a, 0xf4, 0x0f, 0x1a, + 0x58, 0x50, 0xaf, 0xf3, 0xfd, 0xf1, 0xea, 0x29, 0x34, 0x73, 0x77, 0x22, 0xda, 0x90, 0x12, 0xf5, + 0x0c, 0x8f, 0xa9, 0x44, 0xa1, 0x8d, 0xab, 0xe4, 0x1f, 0x87, 0x4e, 0x7f, 0xaf, 0x81, 0x79, 0xe5, + 0xc4, 0xdd, 0x1b, 0xb7, 0xe2, 0x20, 0xcb, 0x7c, 0x30, 0x09, 0xab, 0x27, 0xc3, 0x9c, 0x7d, 0xdb, + 0x6d, 0x55, 0xb4, 0xfd, 0x17, 0xc7, 0x6d, 0x4b, 0x3b, 0x69, 0x5b, 0xda, 0xaf, 0xb6, 0xa5, 0x7d, + 0xea, 0x58, 0x85, 0x93, 0x8e, 0x55, 0xf8, 0xd1, 0xb1, 0x0a, 0x2f, 0x77, 0x03, 0x22, 0x6b, 0x8d, + 0xaa, 0xed, 0x73, 0x9a, 0x0f, 0x52, 0x48, 0xaa, 0xfe, 0x46, 0xc0, 0x61, 0xbc, 0xb5, 0x09, 0x29, + 0x47, 0x8d, 0x3a, 0x16, 0xc9, 0x98, 0xcf, 0xc6, 0xfb, 0x46, 0x7f, 0xbc, 0xcb, 0xd7, 0x21, 0x16, + 0xd5, 0xb9, 0x74, 0xbe, 0xde, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x13, 0x14, 0xda, 0xc9, 0x0d, + 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // Gov tx to add a new rate limit + AddRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) + // Gov tx to update an existing rate limit + UpdateRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) + // Gov tx to remove a rate limit + RemoveRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) + // Gov tx to reset the flow on a rate limit + ResetRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) AddRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) { + out := new(MsgAddRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/AddRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) { + out := new(MsgUpdateRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/UpdateRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RemoveRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) { + out := new(MsgRemoveRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/RemoveRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ResetRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) { + out := new(MsgResetRateLimitResponse) + err := c.cc.Invoke(ctx, "/ibc.applications.rate_limiting.v1.Msg/ResetRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // Gov tx to add a new rate limit + AddRateLimit(context.Context, *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) + // Gov tx to update an existing rate limit + UpdateRateLimit(context.Context, *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) + // Gov tx to remove a rate limit + RemoveRateLimit(context.Context, *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) + // Gov tx to reset the flow on a rate limit + ResetRateLimit(context.Context, *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) AddRateLimit(ctx context.Context, req *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddRateLimit not implemented") +} +func (*UnimplementedMsgServer) UpdateRateLimit(ctx context.Context, req *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateRateLimit not implemented") +} +func (*UnimplementedMsgServer) RemoveRateLimit(ctx context.Context, req *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveRateLimit not implemented") +} +func (*UnimplementedMsgServer) ResetRateLimit(ctx context.Context, req *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetRateLimit not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_AddRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgAddRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).AddRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/AddRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).AddRateLimit(ctx, req.(*MsgAddRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/UpdateRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateRateLimit(ctx, req.(*MsgUpdateRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RemoveRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRemoveRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RemoveRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/RemoveRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RemoveRateLimit(ctx, req.(*MsgRemoveRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ResetRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgResetRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ResetRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ibc.applications.rate_limiting.v1.Msg/ResetRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ResetRateLimit(ctx, req.(*MsgResetRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +var Msg_serviceDesc = _Msg_serviceDesc +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ibc.applications.rate_limiting.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddRateLimit", + Handler: _Msg_AddRateLimit_Handler, + }, + { + MethodName: "UpdateRateLimit", + Handler: _Msg_UpdateRateLimit_Handler, + }, + { + MethodName: "RemoveRateLimit", + Handler: _Msg_RemoveRateLimit_Handler, + }, + { + MethodName: "ResetRateLimit", + Handler: _Msg_ResetRateLimit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ibc/applications/rate_limiting/v1/tx.proto", +} + +func (m *MsgAddRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgAddRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelOrClientId) > 0 { + i -= len(m.ChannelOrClientId) + copy(dAtA[i:], m.ChannelOrClientId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelOrClientId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgAddRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgAddRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgUpdateRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRemoveRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRemoveRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgResetRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelOrClientId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgResetRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgAddRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAddRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelOrClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelOrClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/modules/apps/rate-limiting/v2/ibc_middleware.go b/modules/apps/rate-limiting/v2/ibc_middleware.go new file mode 100644 index 00000000000..ea418d3172b --- /dev/null +++ b/modules/apps/rate-limiting/v2/ibc_middleware.go @@ -0,0 +1,122 @@ +package v2 + +import ( + "encoding/json" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + "github.com/cosmos/ibc-go/v10/modules/core/api" +) + +var _ api.IBCModule = (*IBCMiddleware)(nil) + +type IBCMiddleware struct { + app api.IBCModule + keeper keeper.Keeper +} + +func NewIBCMiddleware(k keeper.Keeper, app api.IBCModule) IBCMiddleware { + return IBCMiddleware{ + app: app, + keeper: k, + } +} + +func (im IBCMiddleware) OnSendPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, payload channeltypesv2.Payload, signer sdk.AccAddress) error { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnSendPacket failed to convert v2 packet to v1 packet", "error", err) + return err + } + if err := im.keeper.SendRateLimitedPacket(ctx, packet.SourcePort, packet.SourceChannel, packet.TimeoutHeight, packet.TimeoutTimestamp, packet.Data); err != nil { + im.keeper.Logger(ctx).Error("ICS20 packet send was denied", "error", err) + return err + } + return im.app.OnSendPacket(ctx, sourceClient, destinationClient, sequence, payload, signer) +} + +func (im IBCMiddleware) OnRecvPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) channeltypesv2.RecvPacketResult { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnRecvPacket failed to convert v2 packet to v1 packet", "error", err) + return channeltypesv2.RecvPacketResult{ + Status: channeltypesv2.PacketStatus_Failure, + Acknowledgement: channeltypes.NewErrorAcknowledgement(err).Acknowledgement(), + } + } + // Check if the packet would cause the rate limit to be exceeded, + // and if so, return an ack error + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("ICS20 packet receive was denied", "error", err) + return channeltypesv2.RecvPacketResult{ + Status: channeltypesv2.PacketStatus_Failure, + Acknowledgement: channeltypes.NewErrorAcknowledgement(err).Acknowledgement(), + } + } + + // If the packet was not rate-limited, pass it down to the Transfer OnRecvPacket callback + return im.app.OnRecvPacket(ctx, sourceClient, destinationClient, sequence, payload, relayer) +} + +func (im IBCMiddleware) OnTimeoutPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnTimeoutPacket failed to convert v2 packet to v1 packet", "error", err) + return err + } + if err := im.keeper.TimeoutRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error("ICS20 RateLimited OnTimeoutPacket failed", "error", err) + return err + } + return im.app.OnTimeoutPacket(ctx, sourceClient, destinationClient, sequence, payload, relayer) +} + +func (im IBCMiddleware) OnAcknowledgementPacket(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, acknowledgement []byte, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { + packet, err := v2ToV1Packet(payload, sourceClient, destinationClient, sequence) + if err != nil { + im.keeper.Logger(ctx).Error("ICS20 rate limiting OnAckPacketfailed to convert v2 packet to v1 packet", "error", err) + return err + } + if err := im.keeper.AcknowledgeRateLimitedPacket(ctx, packet, acknowledgement); err != nil { + im.keeper.Logger(ctx).Error("ICS20 RateLimited OnAckPacket failed", "error", err) + return err + } + return im.app.OnAcknowledgementPacket(ctx, sourceClient, destinationClient, sequence, acknowledgement, payload, relayer) +} + +// TODO: Something looks off about this, please review carefully +func v2ToV1Packet(payload channeltypesv2.Payload, sourceClient, destinationClient string, sequence uint64) (channeltypes.Packet, error) { + transferRepresentation, err := transfertypes.UnmarshalPacketData(payload.Value, payload.Version, payload.Encoding) + if err != nil { + return channeltypes.Packet{}, err + } + + packetData := transfertypes.FungibleTokenPacketData{ + Denom: transferRepresentation.Token.Denom.Path(), + Amount: transferRepresentation.Token.Amount, + Sender: transferRepresentation.Sender, + Receiver: transferRepresentation.Receiver, + Memo: transferRepresentation.Memo, + } + + packetDataBz, err := json.Marshal(packetData) + if err != nil { + return channeltypes.Packet{}, err + } + + return channeltypes.Packet{ + Sequence: sequence, + SourcePort: payload.SourcePort, + SourceChannel: sourceClient, + DestinationPort: payload.DestinationPort, + DestinationChannel: destinationClient, + Data: packetDataBz, + TimeoutHeight: clienttypes.Height{}, + TimeoutTimestamp: 0, + }, nil +} diff --git a/modules/apps/rate-limiting/v2/ibc_middleware_test.go b/modules/apps/rate-limiting/v2/ibc_middleware_test.go new file mode 100644 index 00000000000..5bce3cb442b --- /dev/null +++ b/modules/apps/rate-limiting/v2/ibc_middleware_test.go @@ -0,0 +1,138 @@ +package v2 // nolint + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" +) + +func TestV2ToV1Packet_WithJSONEncoding(t *testing.T) { + payloadValue := transfertypes.FungibleTokenPacketData{ + Denom: "denom", + Amount: "100", + Sender: "sender", + Receiver: "receiver", + Memo: "memo", + } + payloadValueBz, err := transfertypes.MarshalPacketData(payloadValue, transfertypes.V1, transfertypes.EncodingJSON) + require.NoError(t, err) + + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingJSON, + Value: payloadValueBz, + } + + v1Packet, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.NoError(t, err) + require.Equal(t, uint64(1), v1Packet.Sequence) + require.Equal(t, payload.SourcePort, v1Packet.SourcePort) + require.Equal(t, "sourceClient", v1Packet.SourceChannel) + require.Equal(t, payload.DestinationPort, v1Packet.DestinationPort) + require.Equal(t, "destinationClient", v1Packet.DestinationChannel) + + var v1PacketData transfertypes.FungibleTokenPacketData + err = json.Unmarshal(v1Packet.Data, &v1PacketData) + require.NoError(t, err) + require.Equal(t, payloadValue, v1PacketData) +} + +func TestV2ToV1Packet_WithABIEncoding(t *testing.T) { + payloadValue := transfertypes.FungibleTokenPacketData{ + Denom: "denom", + Amount: "100", + Sender: "sender", + Receiver: "receiver", + Memo: "memo", + } + + payloadValueBz, err := transfertypes.MarshalPacketData(payloadValue, transfertypes.V1, transfertypes.EncodingABI) + require.NoError(t, err) + + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingABI, + Value: payloadValueBz, + } + + v1Packet, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.NoError(t, err) + require.Equal(t, uint64(1), v1Packet.Sequence) + require.Equal(t, payload.SourcePort, v1Packet.SourcePort) + require.Equal(t, "sourceClient", v1Packet.SourceChannel) + require.Equal(t, payload.DestinationPort, v1Packet.DestinationPort) + require.Equal(t, "destinationClient", v1Packet.DestinationChannel) + + var v1PacketData transfertypes.FungibleTokenPacketData + err = json.Unmarshal(v1Packet.Data, &v1PacketData) + require.NoError(t, err) + require.Equal(t, payloadValue, v1PacketData) +} + +func TestV2ToV1Packet_WithProtobufEncoding(t *testing.T) { + payloadValue := transfertypes.FungibleTokenPacketData{ + Denom: "denom", + Amount: "100", + Sender: "sender", + Receiver: "receiver", + Memo: "memo", + } + + payloadValueBz, err := transfertypes.MarshalPacketData(payloadValue, transfertypes.V1, transfertypes.EncodingProtobuf) + require.NoError(t, err) + + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingProtobuf, + Value: payloadValueBz, + } + + v1Packet, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.NoError(t, err) + require.Equal(t, uint64(1), v1Packet.Sequence) + require.Equal(t, payload.SourcePort, v1Packet.SourcePort) + require.Equal(t, "sourceClient", v1Packet.SourceChannel) + require.Equal(t, payload.DestinationPort, v1Packet.DestinationPort) + require.Equal(t, "destinationClient", v1Packet.DestinationChannel) + + var v1PacketData transfertypes.FungibleTokenPacketData + err = json.Unmarshal(v1Packet.Data, &v1PacketData) + require.NoError(t, err) + require.Equal(t, payloadValue, v1PacketData) +} + +func TestV2ToV1Packet_WithNilPayload(t *testing.T) { + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingABI, + Value: nil, + } + + _, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.Error(t, err) +} + +func TestV2ToV1Packet_WithEmptyPayload(t *testing.T) { + payload := channeltypesv2.Payload{ + SourcePort: "sourcePort", + DestinationPort: "destinationPort", + Version: transfertypes.V1, + Encoding: transfertypes.EncodingABI, + Value: []byte{}, + } + + _, err := v2ToV1Packet(payload, "sourceClient", "destinationClient", 1) + require.Error(t, err) +} diff --git a/modules/apps/transfer/ibc_module.go b/modules/apps/transfer/ibc_module.go index 54f6bcd1ed3..4a1d3d401e2 100644 --- a/modules/apps/transfer/ibc_module.go +++ b/modules/apps/transfer/ibc_module.go @@ -28,12 +28,12 @@ var ( // IBCModule implements the ICS26 interface for transfer given the transfer keeper. type IBCModule struct { - keeper keeper.Keeper + keeper *keeper.Keeper } // NewIBCModule creates a new IBCModule given the keeper -func NewIBCModule(k keeper.Keeper) IBCModule { - return IBCModule{ +func NewIBCModule(k *keeper.Keeper) *IBCModule { + return &IBCModule{ keeper: k, } } @@ -80,7 +80,7 @@ func (im IBCModule) OnChanOpenInit( counterparty channeltypes.Counterparty, version string, ) (string, error) { - if err := ValidateTransferChannelParams(ctx, im.keeper, order, portID, channelID); err != nil { + if err := ValidateTransferChannelParams(ctx, *im.keeper, order, portID, channelID); err != nil { return "", err } @@ -106,7 +106,7 @@ func (im IBCModule) OnChanOpenTry( counterparty channeltypes.Counterparty, counterpartyVersion string, ) (string, error) { - if err := ValidateTransferChannelParams(ctx, im.keeper, order, portID, channelID); err != nil { + if err := ValidateTransferChannelParams(ctx, *im.keeper, order, portID, channelID); err != nil { return "", err } @@ -280,3 +280,14 @@ func (im IBCModule) UnmarshalPacketData(ctx sdk.Context, portID string, channelI ftpd, err := types.UnmarshalPacketData(bz, ics20Version, "") return ftpd, ics20Version, err } + +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the module's initialization to set the middleware which is above this +// module in the IBC application stack. +func (im IBCModule) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } + + im.keeper.WithICS4Wrapper(wrapper) +} diff --git a/modules/apps/transfer/ibc_module_test.go b/modules/apps/transfer/ibc_module_test.go index d3c5eb1dbe9..9a363a91331 100644 --- a/modules/apps/transfer/ibc_module_test.go +++ b/modules/apps/transfer/ibc_module_test.go @@ -19,7 +19,19 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TransferTestSuite) TestOnChanOpenInit() { +func (s *TransferTestSuite) TestSetICS4Wrapper() { + transferModule := transfer.NewIBCModule(s.chainA.GetSimApp().TransferKeeper) + + s.Require().Panics(func() { + transferModule.SetICS4Wrapper(nil) + }, "ICS4Wrapper cannot be nil") + + s.Require().NotPanics(func() { + transferModule.SetICS4Wrapper(s.chainA.App.GetIBCKeeper().ChannelKeeper) + }, "ICS4Wrapper can be set to a non-nil value") +} + +func (s *TransferTestSuite) TestOnChanOpenInit() { var ( channel *channeltypes.Channel path *ibctesting.Path @@ -74,9 +86,9 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.SetupConnections() path.EndpointA.ChannelID = ibctesting.FirstChannelID @@ -91,23 +103,23 @@ func (suite *TransferTestSuite) TestOnChanOpenInit() { tc.malleate() // explicitly change fields in channel and testChannel - transferModule := transfer.NewIBCModule(suite.chainA.GetSimApp().TransferKeeper) - version, err := transferModule.OnChanOpenInit(suite.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, + transferModule := transfer.NewIBCModule(s.chainA.GetSimApp().TransferKeeper) + version, err := transferModule.OnChanOpenInit(s.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, counterparty, channel.Version, ) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().Equal(tc.expVersion, version) + s.Require().NoError(err) + s.Require().Equal(tc.expVersion, version) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *TransferTestSuite) TestOnChanOpenTry() { +func (s *TransferTestSuite) TestOnChanOpenTry() { var ( channel *channeltypes.Channel path *ibctesting.Path @@ -153,10 +165,10 @@ func (suite *TransferTestSuite) TestOnChanOpenTry() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.SetupConnections() path.EndpointA.ChannelID = ibctesting.FirstChannelID @@ -170,26 +182,26 @@ func (suite *TransferTestSuite) TestOnChanOpenTry() { } counterpartyVersion = types.V1 - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) tc.malleate() // explicitly change fields in channel and testChannel - version, err := cbs.OnChanOpenTry(suite.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, + version, err := cbs.OnChanOpenTry(s.chainA.GetContext(), channel.Ordering, channel.ConnectionHops, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel.Counterparty, counterpartyVersion, ) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().Equal(tc.expVersion, version) + s.Require().NoError(err) + s.Require().Equal(tc.expVersion, version) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *TransferTestSuite) TestOnChanOpenAck() { +func (s *TransferTestSuite) TestOnChanOpenAck() { var counterpartyVersion string testCases := []struct { @@ -210,32 +222,32 @@ func (suite *TransferTestSuite) TestOnChanOpenAck() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path := ibctesting.NewTransferPath(s.chainA, s.chainB) path.SetupConnections() path.EndpointA.ChannelID = ibctesting.FirstChannelID counterpartyVersion = types.V1 - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) tc.malleate() // explicitly change fields in channel and testChannel - err := cbs.OnChanOpenAck(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointA.Counterparty.ChannelID, counterpartyVersion) + err := cbs.OnChanOpenAck(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointA.Counterparty.ChannelID, counterpartyVersion) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *TransferTestSuite) TestOnRecvPacket() { +func (s *TransferTestSuite) TestOnRecvPacket() { // This test suite mostly covers the top-level logic of the ibc module OnRecvPacket function // The core logic is covered in keeper OnRecvPacket var ( @@ -274,7 +286,7 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { { "failure: receive disabled", func() { - suite.chainB.GetSimApp().TransferKeeper.SetParams(suite.chainB.GetContext(), types.Params{ReceiveEnabled: false}) + s.chainB.GetSimApp().TransferKeeper.SetParams(s.chainB.GetContext(), types.Params{ReceiveEnabled: false}) }, channeltypes.NewErrorAcknowledgement(types.ErrReceiveDisabled), "fungible token transfers to this chain are disabled", @@ -282,10 +294,10 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() token := types.Token{ @@ -295,8 +307,8 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { packetData := types.NewFungibleTokenPacketData( token.Denom.Path(), token.Amount, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), "", ) @@ -317,17 +329,17 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { } seq := uint64(1) - packet = channeltypes.NewPacket(packetData.GetBytes(), seq, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.ZeroHeight(), suite.chainA.GetTimeoutTimestamp()) + packet = channeltypes.NewPacket(packetData.GetBytes(), seq, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.ZeroHeight(), s.chainA.GetTimeoutTimestamp()) - ctx := suite.chainB.GetContext() - cbs, ok := suite.chainB.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + ctx := s.chainB.GetContext() + cbs, ok := s.chainB.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) tc.malleate() // change fields in packet - ack := cbs.OnRecvPacket(ctx, path.EndpointB.GetChannel().Version, packet, suite.chainB.SenderAccount.GetAddress()) + ack := cbs.OnRecvPacket(ctx, path.EndpointB.GetChannel().Version, packet, s.chainB.SenderAccount.GetAddress()) - suite.Require().Equal(tc.expAck, ack) + s.Require().Equal(tc.expAck, ack) expectedEvents := sdk.Events{ sdk.NewEvent( @@ -337,12 +349,12 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) }) } } -func (suite *TransferTestSuite) TestOnAcknowledgePacket() { +func (s *TransferTestSuite) TestOnAcknowledgePacket() { var ( path *ibctesting.Path packet channeltypes.Packet @@ -400,10 +412,10 @@ func (suite *TransferTestSuite) TestOnAcknowledgePacket() { func() { ack = channeltypes.NewErrorAcknowledgement(ibcerrors.ErrInsufficientFunds).Acknowledgement() - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) - suite.Require().NoError(cbs.OnAcknowledgementPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, ack, suite.chainA.SenderAccount.GetAddress())) + s.Require().NoError(cbs.OnAcknowledgementPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, ack, s.chainA.SenderAccount.GetAddress())) }, errors.New("unable to unescrow tokens"), false, @@ -426,55 +438,55 @@ func (suite *TransferTestSuite) TestOnAcknowledgePacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() - timeoutHeight := suite.chainA.GetTimeoutHeight() + timeoutHeight := s.chainA.GetTimeoutHeight() msg := types.NewMsgTransfer( path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "", ) - res, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) // message committed + res, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) // message committed packet, err = ibctesting.ParseV1PacketFromEvents(res.Events) - suite.Require().NoError(err) + s.Require().NoError(err) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) ack = channeltypes.NewResultAcknowledgement([]byte{byte(1)}).Acknowledgement() tc.malleate() // change fields in packet - err = cbs.OnAcknowledgementPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, ack, suite.chainA.SenderAccount.GetAddress()) + err = cbs.OnAcknowledgementPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, ack, s.chainA.SenderAccount.GetAddress()) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) if tc.expRefund { escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel()) - escrowBalanceAfter := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, sdk.DefaultBondDenom) - suite.Require().Equal(sdkmath.NewInt(0), escrowBalanceAfter.Amount) + escrowBalanceAfter := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, sdk.DefaultBondDenom) + s.Require().Equal(sdkmath.NewInt(0), escrowBalanceAfter.Amount) } } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *TransferTestSuite) TestOnTimeoutPacket() { +func (s *TransferTestSuite) TestOnTimeoutPacket() { var path *ibctesting.Path var packet channeltypes.Packet @@ -510,61 +522,61 @@ func (suite *TransferTestSuite) TestOnTimeoutPacket() { "already timed-out packet", ibctesting.TestCoin, func() { - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) - suite.Require().NoError(cbs.OnTimeoutPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, suite.chainA.SenderAccount.GetAddress())) + s.Require().NoError(cbs.OnTimeoutPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, s.chainA.SenderAccount.GetAddress())) }, errors.New("unable to unescrow tokens"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() - timeoutHeight := suite.chainA.GetTimeoutHeight() + timeoutHeight := s.chainA.GetTimeoutHeight() msg := types.NewMsgTransfer( path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, tc.coinsToSendToB, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "", ) - res, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) // message committed + res, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) // message committed packet, err = ibctesting.ParseV1PacketFromEvents(res.Events) - suite.Require().NoError(err) + s.Require().NoError(err) - cbs, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) - suite.Require().True(ok) + cbs, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(ibctesting.TransferPort) + s.Require().True(ok) tc.malleate() // change fields in packet - err = cbs.OnTimeoutPacket(suite.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, suite.chainA.SenderAccount.GetAddress()) + err = cbs.OnTimeoutPacket(s.chainA.GetContext(), path.EndpointA.GetChannel().Version, packet, s.chainA.SenderAccount.GetAddress()) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel()) - escrowBalanceAfter := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, sdk.DefaultBondDenom) - suite.Require().Equal(sdkmath.NewInt(0), escrowBalanceAfter.Amount) + escrowBalanceAfter := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, sdk.DefaultBondDenom) + s.Require().Equal(sdkmath.NewInt(0), escrowBalanceAfter.Amount) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *TransferTestSuite) TestPacketDataUnmarshalerInterface() { +func (s *TransferTestSuite) TestPacketDataUnmarshalerInterface() { var ( sender = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()).String() receiver = sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()).String() @@ -617,39 +629,39 @@ func (suite *TransferTestSuite) TestPacketDataUnmarshalerInterface() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() - path := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path := ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() - transferStack, ok := suite.chainA.App.GetIBCKeeper().PortKeeper.Route(types.ModuleName) - suite.Require().True(ok) + transferStack, ok := s.chainA.App.GetIBCKeeper().PortKeeper.Route(types.ModuleName) + s.Require().True(ok) unmarshalerStack, ok := transferStack.(porttypes.PacketDataUnmarshaler) - suite.Require().True(ok) + s.Require().True(ok) - packetData, version, err := unmarshalerStack.UnmarshalPacketData(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, data) + packetData, version, err := unmarshalerStack.UnmarshalPacketData(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, data) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) v2PacketData, ok := packetData.(types.InternalTransferRepresentation) - suite.Require().True(ok) - suite.Require().Equal(path.EndpointA.ChannelConfig.Version, version) + s.Require().True(ok) + s.Require().Equal(path.EndpointA.ChannelConfig.Version, version) if v1PacketData, ok := initialPacketData.(types.FungibleTokenPacketData); ok { // Note: testing of the denom trace parsing/conversion should be done as part of testing internal conversion functions - suite.Require().Equal(v1PacketData.Amount, v2PacketData.Token.Amount) - suite.Require().Equal(v1PacketData.Sender, v2PacketData.Sender) - suite.Require().Equal(v1PacketData.Receiver, v2PacketData.Receiver) - suite.Require().Equal(v1PacketData.Memo, v2PacketData.Memo) + s.Require().Equal(v1PacketData.Amount, v2PacketData.Token.Amount) + s.Require().Equal(v1PacketData.Sender, v2PacketData.Sender) + s.Require().Equal(v1PacketData.Receiver, v2PacketData.Receiver) + s.Require().Equal(v1PacketData.Memo, v2PacketData.Memo) } else { - suite.Require().Equal(initialPacketData.(types.InternalTransferRepresentation), v2PacketData) + s.Require().Equal(initialPacketData.(types.InternalTransferRepresentation), v2PacketData) } } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } diff --git a/modules/apps/transfer/internal/types/legacy_denomtrace_test.go b/modules/apps/transfer/internal/types/legacy_denomtrace_test.go index 39a89c33d5a..44a0b7b90f4 100644 --- a/modules/apps/transfer/internal/types/legacy_denomtrace_test.go +++ b/modules/apps/transfer/internal/types/legacy_denomtrace_test.go @@ -19,7 +19,6 @@ func TestDenomTrace_IBCDenom(t *testing.T) { } for _, tc := range testCases { - denom := tc.trace.IBCDenom() require.Equal(t, tc.expDenom, denom, tc.name) } diff --git a/modules/apps/transfer/keeper/export_test.go b/modules/apps/transfer/keeper/export_test.go index 9593ebb8176..3dbedb85bb7 100644 --- a/modules/apps/transfer/keeper/export_test.go +++ b/modules/apps/transfer/keeper/export_test.go @@ -1,6 +1,8 @@ package keeper import ( + "cosmossdk.io/core/address" + sdk "github.com/cosmos/cosmos-sdk/types" internaltypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/internal/types" @@ -8,17 +10,22 @@ import ( ) // SetDenomTrace is a wrapper around setDenomTrace for testing purposes. -func (k Keeper) SetDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { +func (k *Keeper) SetDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { k.setDenomTrace(ctx, denomTrace) } // IterateDenomTraces is a wrapper around iterateDenomTraces for testing purposes. -func (k Keeper) IterateDenomTraces(ctx sdk.Context, cb func(denomTrace internaltypes.DenomTrace) bool) { +func (k *Keeper) IterateDenomTraces(ctx sdk.Context, cb func(denomTrace internaltypes.DenomTrace) bool) { k.iterateDenomTraces(ctx, cb) } +// SetAddressCodec is a setter for the address codec for testing purposes. +func (k *Keeper) SetAddressCodec(addressCodec address.Codec) { + k.addressCodec = addressCodec +} + // GetAllDenomTraces returns the trace information for all the denominations. -func (k Keeper) GetAllDenomTraces(ctx sdk.Context) []internaltypes.DenomTrace { +func (k *Keeper) GetAllDenomTraces(ctx sdk.Context) []internaltypes.DenomTrace { var traces []internaltypes.DenomTrace k.iterateDenomTraces(ctx, func(denomTrace internaltypes.DenomTrace) bool { traces = append(traces, denomTrace) diff --git a/modules/apps/transfer/keeper/genesis.go b/modules/apps/transfer/keeper/genesis.go index 7b309b90897..42c9853c6be 100644 --- a/modules/apps/transfer/keeper/genesis.go +++ b/modules/apps/transfer/keeper/genesis.go @@ -7,7 +7,7 @@ import ( ) // InitGenesis initializes the ibc-transfer state and binds to PortID. -func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { +func (k *Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { k.SetPort(ctx, state.PortId) for _, denom := range state.Denoms { @@ -25,7 +25,7 @@ func (k Keeper) InitGenesis(ctx sdk.Context, state types.GenesisState) { } // ExportGenesis exports ibc-transfer module's portID and denom trace info into its genesis state. -func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { +func (k *Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { return &types.GenesisState{ PortId: k.GetPort(ctx), Denoms: k.GetAllDenoms(ctx), diff --git a/modules/apps/transfer/keeper/genesis_test.go b/modules/apps/transfer/keeper/genesis_test.go index 77320dc1885..2808baf5e80 100644 --- a/modules/apps/transfer/keeper/genesis_test.go +++ b/modules/apps/transfer/keeper/genesis_test.go @@ -10,7 +10,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" ) -func (suite *KeeperTestSuite) TestGenesis() { +func (s *KeeperTestSuite) TestGenesis() { getHop := func(index uint) types.Hop { return types.NewHop("transfer", fmt.Sprintf("channelToChain%d", index)) } @@ -33,27 +33,27 @@ func (suite *KeeperTestSuite) TestGenesis() { for _, traceAndEscrowAmount := range traceAndEscrowAmounts { denom := types.NewDenom("uatom", traceAndEscrowAmount.trace...) denoms = append(denoms, denom) - suite.chainA.GetSimApp().TransferKeeper.SetDenom(suite.chainA.GetContext(), denom) + s.chainA.GetSimApp().TransferKeeper.SetDenom(s.chainA.GetContext(), denom) amount, ok := sdkmath.NewIntFromString(traceAndEscrowAmount.escrow) - suite.Require().True(ok) + s.Require().True(ok) escrow := sdk.NewCoin(denom.IBCDenom(), amount) escrows = append(escrows, escrow) - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), escrow) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), escrow) } - genesis := suite.chainA.GetSimApp().TransferKeeper.ExportGenesis(suite.chainA.GetContext()) + genesis := s.chainA.GetSimApp().TransferKeeper.ExportGenesis(s.chainA.GetContext()) - suite.Require().Equal(types.PortID, genesis.PortId) - suite.Require().Equal(denoms.Sort(), genesis.Denoms) - suite.Require().Equal(escrows.Sort(), genesis.TotalEscrowed) + s.Require().Equal(types.PortID, genesis.PortId) + s.Require().Equal(denoms.Sort(), genesis.Denoms) + s.Require().Equal(escrows.Sort(), genesis.TotalEscrowed) - suite.Require().NotPanics(func() { - suite.chainA.GetSimApp().TransferKeeper.InitGenesis(suite.chainA.GetContext(), *genesis) + s.Require().NotPanics(func() { + s.chainA.GetSimApp().TransferKeeper.InitGenesis(s.chainA.GetContext(), *genesis) }) for _, denom := range denoms { - _, found := suite.chainA.GetSimApp().BankKeeper.GetDenomMetaData(suite.chainA.GetContext(), denom.IBCDenom()) - suite.Require().True(found) + _, found := s.chainA.GetSimApp().BankKeeper.GetDenomMetaData(s.chainA.GetContext(), denom.IBCDenom()) + s.Require().True(found) } } diff --git a/modules/apps/transfer/keeper/grpc_query.go b/modules/apps/transfer/keeper/grpc_query.go index b1a6a4d9fc7..c59a9714b1a 100644 --- a/modules/apps/transfer/keeper/grpc_query.go +++ b/modules/apps/transfer/keeper/grpc_query.go @@ -23,7 +23,7 @@ import ( var _ types.QueryServer = (*Keeper)(nil) // Denom implements the Query/Denom gRPC method -func (k Keeper) Denom(goCtx context.Context, req *types.QueryDenomRequest) (*types.QueryDenomResponse, error) { +func (k *Keeper) Denom(goCtx context.Context, req *types.QueryDenomRequest) (*types.QueryDenomResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -49,7 +49,7 @@ func (k Keeper) Denom(goCtx context.Context, req *types.QueryDenomRequest) (*typ } // Denoms implements the Query/Denoms gRPC method -func (k Keeper) Denoms(ctx context.Context, req *types.QueryDenomsRequest) (*types.QueryDenomsResponse, error) { +func (k *Keeper) Denoms(ctx context.Context, req *types.QueryDenomsRequest) (*types.QueryDenomsResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -77,7 +77,7 @@ func (k Keeper) Denoms(ctx context.Context, req *types.QueryDenomsRequest) (*typ } // Params implements the Query/Params gRPC method -func (k Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { +func (k *Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) params := k.GetParams(ctx) @@ -87,7 +87,7 @@ func (k Keeper) Params(goCtx context.Context, _ *types.QueryParamsRequest) (*typ } // DenomHash implements the Query/DenomHash gRPC method -func (k Keeper) DenomHash(goCtx context.Context, req *types.QueryDenomHashRequest) (*types.QueryDenomHashResponse, error) { +func (k *Keeper) DenomHash(goCtx context.Context, req *types.QueryDenomHashRequest) (*types.QueryDenomHashResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -115,7 +115,7 @@ func (k Keeper) DenomHash(goCtx context.Context, req *types.QueryDenomHashReques } // EscrowAddress implements the EscrowAddress gRPC method -func (k Keeper) EscrowAddress(goCtx context.Context, req *types.QueryEscrowAddressRequest) (*types.QueryEscrowAddressResponse, error) { +func (k *Keeper) EscrowAddress(goCtx context.Context, req *types.QueryEscrowAddressRequest) (*types.QueryEscrowAddressResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -141,7 +141,7 @@ func (k Keeper) EscrowAddress(goCtx context.Context, req *types.QueryEscrowAddre } // TotalEscrowForDenom implements the TotalEscrowForDenom gRPC method. -func (k Keeper) TotalEscrowForDenom(goCtx context.Context, req *types.QueryTotalEscrowForDenomRequest) (*types.QueryTotalEscrowForDenomResponse, error) { +func (k *Keeper) TotalEscrowForDenom(goCtx context.Context, req *types.QueryTotalEscrowForDenomRequest) (*types.QueryTotalEscrowForDenomResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } diff --git a/modules/apps/transfer/keeper/grpc_query_test.go b/modules/apps/transfer/keeper/grpc_query_test.go index 508f9918ef8..bb459e73530 100644 --- a/modules/apps/transfer/keeper/grpc_query_test.go +++ b/modules/apps/transfer/keeper/grpc_query_test.go @@ -13,7 +13,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestQueryDenom() { +func (s *KeeperTestSuite) TestQueryDenom() { var ( req *types.QueryDenomRequest expDenom types.Denom @@ -32,7 +32,7 @@ func (suite *KeeperTestSuite) TestQueryDenom() { types.NewHop("transfer", "channelToA"), //nolint:goconst types.NewHop("transfer", "channelToB"), //nolint:goconst ) - suite.chainA.GetSimApp().TransferKeeper.SetDenom(suite.chainA.GetContext(), expDenom) + s.chainA.GetSimApp().TransferKeeper.SetDenom(s.chainA.GetContext(), expDenom) req = &types.QueryDenomRequest{ Hash: expDenom.IBCDenom(), @@ -48,7 +48,7 @@ func (suite *KeeperTestSuite) TestQueryDenom() { types.NewHop("transfer", "channelToA"), //nolint:goconst types.NewHop("transfer", "channelToB"), //nolint:goconst ) - suite.chainA.GetSimApp().TransferKeeper.SetDenom(suite.chainA.GetContext(), expDenom) + s.chainA.GetSimApp().TransferKeeper.SetDenom(s.chainA.GetContext(), expDenom) req = &types.QueryDenomRequest{ Hash: expDenom.Hash().String(), @@ -83,27 +83,27 @@ func (suite *KeeperTestSuite) TestQueryDenom() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { tc := tc - suite.SetupTest() // reset + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - res, err := suite.chainA.GetSimApp().TransferKeeper.Denom(ctx, req) + res, err := s.chainA.GetSimApp().TransferKeeper.Denom(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(&expDenom, res.Denom) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(&expDenom, res.Denom) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expErr, err.Error()) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expErr, err.Error()) } }) } } -func (suite *KeeperTestSuite) TestQueryDenoms() { +func (s *KeeperTestSuite) TestQueryDenoms() { var ( req *types.QueryDenomsRequest expDenoms = types.Denoms(nil) @@ -129,7 +129,7 @@ func (suite *KeeperTestSuite) TestQueryDenoms() { expDenoms = append(expDenoms, types.NewDenom("uatom", types.NewHop("transfer", "channelToA"), types.NewHop("transfer", "channelToB"))) for _, trace := range expDenoms { - suite.chainA.GetSimApp().TransferKeeper.SetDenom(suite.chainA.GetContext(), trace) + s.chainA.GetSimApp().TransferKeeper.SetDenom(s.chainA.GetContext(), trace) } req = &types.QueryDenomsRequest{ @@ -144,33 +144,33 @@ func (suite *KeeperTestSuite) TestQueryDenoms() { } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - res, err := suite.chainA.GetSimApp().TransferKeeper.Denoms(ctx, req) + res, err := s.chainA.GetSimApp().TransferKeeper.Denoms(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expDenoms.Sort(), res.Denoms) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expDenoms.Sort(), res.Denoms) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expErr, err.Error()) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expErr, err.Error()) } }) } } -func (suite *KeeperTestSuite) TestQueryParams() { - ctx := suite.chainA.GetContext() +func (s *KeeperTestSuite) TestQueryParams() { + ctx := s.chainA.GetContext() expParams := types.DefaultParams() - res, _ := suite.chainA.GetSimApp().TransferKeeper.Params(ctx, &types.QueryParamsRequest{}) - suite.Require().Equal(&expParams, res.Params) + res, _ := s.chainA.GetSimApp().TransferKeeper.Params(ctx, &types.QueryParamsRequest{}) + s.Require().Equal(&expParams, res.Params) } -func (suite *KeeperTestSuite) TestQueryDenomHash() { +func (s *KeeperTestSuite) TestQueryDenomHash() { reqDenom := types.NewDenom("uatom", types.NewHop("transfer", "channelToA"), types.NewHop("transfer", "channelToB")) var ( @@ -209,31 +209,31 @@ func (suite *KeeperTestSuite) TestQueryDenomHash() { } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset req = &types.QueryDenomHashRequest{ Trace: reqDenom.Path(), } - suite.chainA.GetSimApp().TransferKeeper.SetDenom(suite.chainA.GetContext(), reqDenom) + s.chainA.GetSimApp().TransferKeeper.SetDenom(s.chainA.GetContext(), reqDenom) tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - res, err := suite.chainA.GetSimApp().TransferKeeper.DenomHash(ctx, req) + res, err := s.chainA.GetSimApp().TransferKeeper.DenomHash(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expHash, res.Hash) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expHash, res.Hash) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expErr, err.Error()) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expErr, err.Error()) } }) } } -func (suite *KeeperTestSuite) TestEscrowAddress() { +func (s *KeeperTestSuite) TestEscrowAddress() { var req *types.QueryEscrowAddressRequest var path *ibctesting.Path @@ -285,28 +285,28 @@ func (suite *KeeperTestSuite) TestEscrowAddress() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - res, err := suite.chainA.GetSimApp().TransferKeeper.EscrowAddress(ctx, req) + res, err := s.chainA.GetSimApp().TransferKeeper.EscrowAddress(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) expected := types.GetEscrowAddress(ibctesting.TransferPort, path.EndpointA.ChannelID).String() - suite.Require().Equal(expected, res.EscrowAddress) + s.Require().Equal(expected, res.EscrowAddress) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expErr, err.Error()) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expErr, err.Error()) } }) } } -func (suite *KeeperTestSuite) TestTotalEscrowForDenom() { +func (s *KeeperTestSuite) TestTotalEscrowForDenom() { var ( req *types.QueryTotalEscrowForDenomRequest expEscrowAmount sdkmath.Int @@ -325,7 +325,7 @@ func (suite *KeeperTestSuite) TestTotalEscrowForDenom() { } expEscrowAmount = sdkmath.NewInt(100) - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, expEscrowAmount)) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, expEscrowAmount)) }, nil, }, @@ -334,10 +334,10 @@ func (suite *KeeperTestSuite) TestTotalEscrowForDenom() { func() { denom := types.NewDenom(sdk.DefaultBondDenom, types.NewHop("transfer", "channel-0")) - suite.chainA.GetSimApp().TransferKeeper.SetDenom(suite.chainA.GetContext(), denom) + s.chainA.GetSimApp().TransferKeeper.SetDenom(s.chainA.GetContext(), denom) expEscrowAmount, ok := sdkmath.NewIntFromString("100000000000000000000") - suite.Require().True(ok) - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, expEscrowAmount)) + s.Require().True(ok) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, expEscrowAmount)) req = &types.QueryTotalEscrowForDenomRequest{ Denom: denom.IBCDenom(), @@ -377,21 +377,21 @@ func (suite *KeeperTestSuite) TestTotalEscrowForDenom() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset expEscrowAmount = sdkmath.ZeroInt() tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - res, err := suite.chainA.GetSimApp().TransferKeeper.TotalEscrowForDenom(ctx, req) + res, err := s.chainA.GetSimApp().TransferKeeper.TotalEscrowForDenom(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(expEscrowAmount, res.Amount.Amount) + s.Require().NoError(err) + s.Require().Equal(expEscrowAmount, res.Amount.Amount) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expErr, err.Error()) - suite.Require().Error(err) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expErr, err.Error()) + s.Require().Error(err) } }) } diff --git a/modules/apps/transfer/keeper/keeper.go b/modules/apps/transfer/keeper/keeper.go index 2d9169b9095..b0cfd050e6c 100644 --- a/modules/apps/transfer/keeper/keeper.go +++ b/modules/apps/transfer/keeper/keeper.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "cosmossdk.io/core/address" corestore "cosmossdk.io/core/store" "cosmossdk.io/log" sdkmath "cosmossdk.io/math" @@ -25,9 +26,9 @@ import ( // Keeper defines the IBC fungible transfer keeper type Keeper struct { - storeService corestore.KVStoreService - cdc codec.BinaryCodec - legacySubspace types.ParamSubspace + storeService corestore.KVStoreService + cdc codec.BinaryCodec + addressCodec address.Codec ics4Wrapper porttypes.ICS4Wrapper channelKeeper types.ChannelKeeper @@ -41,17 +42,7 @@ type Keeper struct { } // NewKeeper creates a new IBC transfer Keeper instance -func NewKeeper( - cdc codec.BinaryCodec, - storeService corestore.KVStoreService, - legacySubspace types.ParamSubspace, - ics4Wrapper porttypes.ICS4Wrapper, - channelKeeper types.ChannelKeeper, - msgRouter types.MessageRouter, - authKeeper types.AccountKeeper, - bankKeeper types.BankKeeper, - authority string, -) Keeper { +func NewKeeper(cdc codec.BinaryCodec, addressCodec address.Codec, storeService corestore.KVStoreService, channelKeeper types.ChannelKeeper, msgRouter types.MessageRouter, authKeeper types.AccountKeeper, bankKeeper types.BankKeeper, authority string) *Keeper { // ensure ibc transfer module account is set if addr := authKeeper.GetModuleAddress(types.ModuleName); addr == nil { panic(errors.New("the IBC transfer module account has not been set")) @@ -61,16 +52,16 @@ func NewKeeper( panic(errors.New("authority must be non-empty")) } - return Keeper{ - cdc: cdc, - storeService: storeService, - legacySubspace: legacySubspace, - ics4Wrapper: ics4Wrapper, - channelKeeper: channelKeeper, - msgRouter: msgRouter, - AuthKeeper: authKeeper, - BankKeeper: bankKeeper, - authority: authority, + return &Keeper{ + cdc: cdc, + addressCodec: addressCodec, + storeService: storeService, + ics4Wrapper: channelKeeper, // default ICS4Wrapper is the channel keeper + channelKeeper: channelKeeper, + msgRouter: msgRouter, + AuthKeeper: authKeeper, + BankKeeper: bankKeeper, + authority: authority, } } @@ -82,22 +73,27 @@ func (k *Keeper) WithICS4Wrapper(wrapper porttypes.ICS4Wrapper) { } // GetICS4Wrapper returns the ICS4Wrapper. -func (k Keeper) GetICS4Wrapper() porttypes.ICS4Wrapper { +func (k *Keeper) GetICS4Wrapper() porttypes.ICS4Wrapper { return k.ics4Wrapper } // GetAuthority returns the transfer module's authority. -func (k Keeper) GetAuthority() string { +func (k *Keeper) GetAuthority() string { return k.authority } +// GetAddressCodec returns the address codec used by the keeper. +func (k *Keeper) GetAddressCodec() address.Codec { + return k.addressCodec +} + // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+exported.ModuleName+"-"+types.ModuleName) } // GetPort returns the portID for the transfer module. Used in ExportGenesis -func (k Keeper) GetPort(ctx sdk.Context) string { +func (k *Keeper) GetPort(ctx sdk.Context) string { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get(types.PortKey) if err != nil { @@ -107,7 +103,7 @@ func (k Keeper) GetPort(ctx sdk.Context) string { } // SetPort sets the portID for the transfer module. Used in InitGenesis -func (k Keeper) SetPort(ctx sdk.Context, portID string) { +func (k *Keeper) SetPort(ctx sdk.Context, portID string) { store := k.storeService.OpenKVStore(ctx) if err := store.Set(types.PortKey, []byte(portID)); err != nil { panic(err) @@ -115,7 +111,7 @@ func (k Keeper) SetPort(ctx sdk.Context, portID string) { } // GetParams returns the current transfer module parameters. -func (k Keeper) GetParams(ctx sdk.Context) types.Params { +func (k *Keeper) GetParams(ctx sdk.Context) types.Params { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get([]byte(types.ParamsKey)) if err != nil { @@ -131,7 +127,7 @@ func (k Keeper) GetParams(ctx sdk.Context) types.Params { } // SetParams sets the transfer module parameters. -func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { +func (k *Keeper) SetParams(ctx sdk.Context, params types.Params) { store := k.storeService.OpenKVStore(ctx) bz := k.cdc.MustMarshal(¶ms) if err := store.Set([]byte(types.ParamsKey), bz); err != nil { @@ -140,7 +136,7 @@ func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { } // GetDenom retrieves the denom from store given the hash of the denom. -func (k Keeper) GetDenom(ctx sdk.Context, denomHash cmtbytes.HexBytes) (types.Denom, bool) { +func (k *Keeper) GetDenom(ctx sdk.Context, denomHash cmtbytes.HexBytes) (types.Denom, bool) { store := prefix.NewStore(runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)), types.DenomKey) bz := store.Get(denomHash) if len(bz) == 0 { @@ -154,21 +150,21 @@ func (k Keeper) GetDenom(ctx sdk.Context, denomHash cmtbytes.HexBytes) (types.De } // HasDenom checks if a the key with the given denomination hash exists on the store. -func (k Keeper) HasDenom(ctx sdk.Context, denomHash cmtbytes.HexBytes) bool { +func (k *Keeper) HasDenom(ctx sdk.Context, denomHash cmtbytes.HexBytes) bool { store := prefix.NewStore(runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)), types.DenomKey) return store.Has(denomHash) } // SetDenom sets a new {denom hash -> denom } pair to the store. // This allows for reverse lookup of the denom given the hash. -func (k Keeper) SetDenom(ctx sdk.Context, denom types.Denom) { +func (k *Keeper) SetDenom(ctx sdk.Context, denom types.Denom) { store := prefix.NewStore(runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)), types.DenomKey) bz := k.cdc.MustMarshal(&denom) store.Set(denom.Hash(), bz) } // GetAllDenoms returns all the denominations. -func (k Keeper) GetAllDenoms(ctx sdk.Context) types.Denoms { +func (k *Keeper) GetAllDenoms(ctx sdk.Context) types.Denoms { denoms := types.Denoms{} k.IterateDenoms(ctx, func(denom types.Denom) bool { denoms = append(denoms, denom) @@ -179,7 +175,7 @@ func (k Keeper) GetAllDenoms(ctx sdk.Context) types.Denoms { } // IterateDenoms iterates over the denominations in the store and performs a callback function. -func (k Keeper) IterateDenoms(ctx sdk.Context, cb func(denom types.Denom) bool) { +func (k *Keeper) IterateDenoms(ctx sdk.Context, cb func(denom types.Denom) bool) { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, types.DenomKey) @@ -195,7 +191,7 @@ func (k Keeper) IterateDenoms(ctx sdk.Context, cb func(denom types.Denom) bool) } // SetDenomMetadata sets an IBC token's denomination metadata -func (k Keeper) SetDenomMetadata(ctx sdk.Context, denom types.Denom) { +func (k *Keeper) SetDenomMetadata(ctx sdk.Context, denom types.Denom) { metadata := banktypes.Metadata{ Description: fmt.Sprintf("IBC token from %s", denom.Path()), DenomUnits: []*banktypes.DenomUnit{ @@ -221,7 +217,7 @@ func (k Keeper) SetDenomMetadata(ctx sdk.Context, denom types.Denom) { // // NOTE: if there is no value stored in state for the provided denom then a new Coin is returned for the denom with an initial value of zero. // This accommodates callers to simply call `Add()` on the returned Coin as an empty Coin literal (e.g. sdk.Coin{}) will trigger a panic due to the absence of a denom. -func (k Keeper) GetTotalEscrowForDenom(ctx sdk.Context, denom string) sdk.Coin { +func (k *Keeper) GetTotalEscrowForDenom(ctx sdk.Context, denom string) sdk.Coin { store := k.storeService.OpenKVStore(ctx) bz, err := store.Get(types.TotalEscrowForDenomKey(denom)) if err != nil { @@ -240,7 +236,7 @@ func (k Keeper) GetTotalEscrowForDenom(ctx sdk.Context, denom string) sdk.Coin { // SetTotalEscrowForDenom stores the total amount of source chain tokens that are in escrow. // Amount is stored in state if and only if it is not equal to zero. The function will panic // if the amount is negative. -func (k Keeper) SetTotalEscrowForDenom(ctx sdk.Context, coin sdk.Coin) { +func (k *Keeper) SetTotalEscrowForDenom(ctx sdk.Context, coin sdk.Coin) { if coin.Amount.IsNegative() { panic(fmt.Errorf("amount cannot be negative: %s", coin.Amount)) } @@ -262,7 +258,7 @@ func (k Keeper) SetTotalEscrowForDenom(ctx sdk.Context, coin sdk.Coin) { } // GetAllTotalEscrowed returns the escrow information for all the denominations. -func (k Keeper) GetAllTotalEscrowed(ctx sdk.Context) sdk.Coins { +func (k *Keeper) GetAllTotalEscrowed(ctx sdk.Context) sdk.Coins { var escrows sdk.Coins k.IterateTokensInEscrow(ctx, []byte(types.KeyTotalEscrowPrefix), func(denomEscrow sdk.Coin) bool { escrows = escrows.Add(denomEscrow) @@ -275,7 +271,7 @@ func (k Keeper) GetAllTotalEscrowed(ctx sdk.Context) sdk.Coins { // IterateTokensInEscrow iterates over the denomination escrows in the store // and performs a callback function. Denominations for which an invalid value // (i.e. not integer) is stored, will be skipped. -func (k Keeper) IterateTokensInEscrow(ctx sdk.Context, storeprefix []byte, cb func(denomEscrow sdk.Coin) bool) { +func (k *Keeper) IterateTokensInEscrow(ctx sdk.Context, storeprefix []byte, cb func(denomEscrow sdk.Coin) bool) { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, storeprefix) @@ -300,7 +296,7 @@ func (k Keeper) IterateTokensInEscrow(ctx sdk.Context, storeprefix []byte, cb fu // IsBlockedAddr checks if the given address is allowed to send or receive tokens. // The module account is always allowed to send and receive tokens. -func (k Keeper) IsBlockedAddr(addr sdk.AccAddress) bool { +func (k *Keeper) IsBlockedAddr(addr sdk.AccAddress) bool { moduleAddr := k.AuthKeeper.GetModuleAddress(types.ModuleName) if addr.Equals(moduleAddr) { return false diff --git a/modules/apps/transfer/keeper/keeper_test.go b/modules/apps/transfer/keeper/keeper_test.go index c961c755227..6861cf10476 100644 --- a/modules/apps/transfer/keeper/keeper_test.go +++ b/modules/apps/transfer/keeper/keeper_test.go @@ -17,9 +17,9 @@ import ( authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + packetforward "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware" "github.com/cosmos/ibc-go/v10/modules/apps/transfer/keeper" "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" - channelkeeper "github.com/cosmos/ibc-go/v10/modules/core/04-channel/keeper" ibctesting "github.com/cosmos/ibc-go/v10/testing" ) @@ -34,21 +34,21 @@ type KeeperTestSuite struct { chainC *ibctesting.TestChain } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) - queryHelper := baseapp.NewQueryServerTestHelper(suite.chainA.GetContext(), suite.chainA.GetSimApp().InterfaceRegistry()) - types.RegisterQueryServer(queryHelper, suite.chainA.GetSimApp().TransferKeeper) + queryHelper := baseapp.NewQueryServerTestHelper(s.chainA.GetContext(), s.chainA.GetSimApp().InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, s.chainA.GetSimApp().TransferKeeper) } func TestKeeperTestSuite(t *testing.T) { testifysuite.Run(t, new(KeeperTestSuite)) } -func (suite *KeeperTestSuite) TestNewKeeper() { +func (s *KeeperTestSuite) TestNewKeeper() { testCases := []struct { name string instantiateFn func() @@ -56,56 +56,52 @@ func (suite *KeeperTestSuite) TestNewKeeper() { }{ {"success", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.ModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().AccountKeeper, - suite.chainA.GetSimApp().BankKeeper, - suite.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), + s.chainA.GetSimApp().AppCodec(), + s.chainA.GetSimApp().AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().AccountKeeper, + s.chainA.GetSimApp().BankKeeper, + s.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), ) }, ""}, {"failure: transfer module account does not exist", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.ModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().AppCodec(), + s.chainA.GetSimApp().AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), authkeeper.AccountKeeper{}, // empty account keeper - suite.chainA.GetSimApp().BankKeeper, - suite.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), + s.chainA.GetSimApp().BankKeeper, + s.chainA.GetSimApp().ICAControllerKeeper.GetAuthority(), ) }, "the IBC transfer module account has not been set"}, {"failure: empty authority", func() { keeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(types.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(types.ModuleName), - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper, - suite.chainA.GetSimApp().MsgServiceRouter(), - suite.chainA.GetSimApp().AccountKeeper, - suite.chainA.GetSimApp().BankKeeper, + s.chainA.GetSimApp().AppCodec(), + s.chainA.GetSimApp().AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(types.StoreKey)), + s.chainA.GetSimApp().IBCKeeper.ChannelKeeper, + s.chainA.GetSimApp().MsgServiceRouter(), + s.chainA.GetSimApp().AccountKeeper, + s.chainA.GetSimApp().BankKeeper, "", // authority ) }, "authority must be non-empty"}, } for _, tc := range testCases { + s.SetupTest() - suite.SetupTest() - - suite.Run(tc.name, func() { + s.Run(tc.name, func() { if tc.panicMsg == "" { - suite.Require().NotPanics( + s.Require().NotPanics( tc.instantiateFn, ) } else { - suite.Require().PanicsWithError( + s.Require().PanicsWithError( tc.panicMsg, tc.instantiateFn, ) @@ -114,7 +110,7 @@ func (suite *KeeperTestSuite) TestNewKeeper() { } } -func (suite *KeeperTestSuite) TestSetGetTotalEscrowForDenom() { +func (s *KeeperTestSuite) TestSetGetTotalEscrowForDenom() { const denom = "atom" var expAmount sdkmath.Int @@ -152,10 +148,10 @@ func (suite *KeeperTestSuite) TestSetGetTotalEscrowForDenom() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset expAmount = sdkmath.NewInt(100) - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() tc.malleate() @@ -165,30 +161,30 @@ func (suite *KeeperTestSuite) TestSetGetTotalEscrowForDenom() { } if tc.expError == nil { - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(ctx, coin) - total := suite.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(ctx, denom) - suite.Require().Equal(expAmount, total.Amount) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(ctx, coin) + total := s.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(ctx, denom) + s.Require().Equal(expAmount, total.Amount) - storeKey := suite.chainA.GetSimApp().GetKey(types.ModuleName) + storeKey := s.chainA.GetSimApp().GetKey(types.ModuleName) store := ctx.KVStore(storeKey) key := types.TotalEscrowForDenomKey(denom) if expAmount.IsZero() { - suite.Require().False(store.Has(key)) + s.Require().False(store.Has(key)) } else { - suite.Require().True(store.Has(key)) + s.Require().True(store.Has(key)) } } else { - suite.Require().PanicsWithError(tc.expError.Error(), func() { - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(ctx, coin) + s.Require().PanicsWithError(tc.expError.Error(), func() { + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(ctx, coin) }) - total := suite.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(ctx, denom) - suite.Require().Equal(sdkmath.ZeroInt(), total.Amount) + total := s.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(ctx, denom) + s.Require().Equal(sdkmath.ZeroInt(), total.Amount) } }) } } -func (suite *KeeperTestSuite) TestGetAllDenomEscrows() { +func (s *KeeperTestSuite) TestGetAllDenomEscrows() { var ( store storetypes.KVStore cdc codec.Codec @@ -268,31 +264,31 @@ func (suite *KeeperTestSuite) TestGetAllDenomEscrows() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset expDenomEscrows = sdk.Coins{} - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - storeKey := suite.chainA.GetSimApp().GetKey(types.ModuleName) + storeKey := s.chainA.GetSimApp().GetKey(types.ModuleName) store = ctx.KVStore(storeKey) - cdc = suite.chainA.App.AppCodec() + cdc = s.chainA.App.AppCodec() tc.malleate() - denomEscrows := suite.chainA.GetSimApp().TransferKeeper.GetAllTotalEscrowed(ctx) + denomEscrows := s.chainA.GetSimApp().TransferKeeper.GetAllTotalEscrowed(ctx) if tc.expPass { - suite.Require().Len(expDenomEscrows, len(denomEscrows)) - suite.Require().ElementsMatch(expDenomEscrows, denomEscrows) + s.Require().Len(expDenomEscrows, len(denomEscrows)) + s.Require().ElementsMatch(expDenomEscrows, denomEscrows) } else { - suite.Require().Empty(denomEscrows) + s.Require().Empty(denomEscrows) } }) } } -func (suite *KeeperTestSuite) TestParams() { +func (s *KeeperTestSuite) TestParams() { testCases := []struct { name string input types.Params @@ -306,53 +302,53 @@ func (suite *KeeperTestSuite) TestParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(tc.name, func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() if tc.panicMsg == "" { - suite.chainA.GetSimApp().TransferKeeper.SetParams(ctx, tc.input) + s.chainA.GetSimApp().TransferKeeper.SetParams(ctx, tc.input) expected := tc.input - p := suite.chainA.GetSimApp().TransferKeeper.GetParams(ctx) - suite.Require().Equal(expected, p) + p := s.chainA.GetSimApp().TransferKeeper.GetParams(ctx) + s.Require().Equal(expected, p) } else { - suite.Require().PanicsWithError(tc.panicMsg, func() { - suite.chainA.GetSimApp().TransferKeeper.SetParams(ctx, tc.input) + s.Require().PanicsWithError(tc.panicMsg, func() { + s.chainA.GetSimApp().TransferKeeper.SetParams(ctx, tc.input) }) } }) } } -func (suite *KeeperTestSuite) TestUnsetParams() { - suite.SetupTest() +func (s *KeeperTestSuite) TestUnsetParams() { + s.SetupTest() - ctx := suite.chainA.GetContext() - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(types.ModuleName)) + ctx := s.chainA.GetContext() + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(types.ModuleName)) store.Delete([]byte(types.ParamsKey)) - suite.Require().Panics(func() { - suite.chainA.GetSimApp().TransferKeeper.GetParams(ctx) + s.Require().Panics(func() { + s.chainA.GetSimApp().TransferKeeper.GetParams(ctx) }) } -func (suite *KeeperTestSuite) TestWithICS4Wrapper() { - suite.SetupTest() +func (s *KeeperTestSuite) TestWithICS4Wrapper() { + s.SetupTest() - // test if the ics4 wrapper is the channel keeper initially - ics4Wrapper := suite.chainA.GetSimApp().TransferKeeper.GetICS4Wrapper() + // test if the ics4 wrapper is the pfm keeper initially + ics4Wrapper := s.chainA.GetSimApp().TransferKeeper.GetICS4Wrapper() - _, isChannelKeeper := ics4Wrapper.(*channelkeeper.Keeper) - suite.Require().True(isChannelKeeper) - suite.Require().IsType((*channelkeeper.Keeper)(nil), ics4Wrapper) + _, isPFMKeeper := ics4Wrapper.(*packetforward.IBCMiddleware) + s.Require().True(isPFMKeeper) + s.Require().IsType((*packetforward.IBCMiddleware)(nil), ics4Wrapper) // set the ics4 wrapper to the channel keeper - suite.chainA.GetSimApp().TransferKeeper.WithICS4Wrapper(nil) - ics4Wrapper = suite.chainA.GetSimApp().TransferKeeper.GetICS4Wrapper() - suite.Require().Nil(ics4Wrapper) + s.chainA.GetSimApp().TransferKeeper.WithICS4Wrapper(nil) + ics4Wrapper = s.chainA.GetSimApp().TransferKeeper.GetICS4Wrapper() + s.Require().Nil(ics4Wrapper) } -func (suite *KeeperTestSuite) TestIsBlockedAddr() { - suite.SetupTest() +func (s *KeeperTestSuite) TestIsBlockedAddr() { + s.SetupTest() testCases := []struct { name string @@ -361,24 +357,24 @@ func (suite *KeeperTestSuite) TestIsBlockedAddr() { }{ { "transfer module account address", - suite.chainA.GetSimApp().AccountKeeper.GetModuleAddress(types.ModuleName), + s.chainA.GetSimApp().AccountKeeper.GetModuleAddress(types.ModuleName), false, }, { "regular address", - suite.chainA.SenderAccount.GetAddress(), + s.chainA.SenderAccount.GetAddress(), false, }, { "blocked address", - suite.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName), + s.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName), true, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.Require().Equal(tc.expBlock, suite.chainA.GetSimApp().TransferKeeper.IsBlockedAddr(tc.addr)) + s.Run(tc.name, func() { + s.Require().Equal(tc.expBlock, s.chainA.GetSimApp().TransferKeeper.IsBlockedAddr(tc.addr)) }) } } diff --git a/modules/apps/transfer/keeper/mbt_relay_test.go b/modules/apps/transfer/keeper/mbt_relay_test.go index 2e19f901a59..a2a5f8acf57 100644 --- a/modules/apps/transfer/keeper/mbt_relay_test.go +++ b/modules/apps/transfer/keeper/mbt_relay_test.go @@ -102,7 +102,7 @@ func AddressFromTla(addr []string) string { if len(addr) != 3 { panic(errors.New("failed to convert from TLA+ address: wrong number of address components")) } - s := "" + var s string if len(addr[0]) == 0 && len(addr[1]) == 0 { //nolint:gocritic // simple address: id s = addr[2] @@ -262,7 +262,7 @@ func (bank *Bank) NonZeroString() string { // Construct a bank out of the chain bank func BankOfChain(chain *ibctesting.TestChain) Bank { bank := MakeBank() - chain.GetSimApp().BankKeeper.IterateAllBalances(chain.GetContext(), func(address sdk.AccAddress, coin sdk.Coin) (stop bool) { + chain.GetSimApp().BankKeeper.IterateAllBalances(chain.GetContext(), func(address sdk.AccAddress, coin sdk.Coin) bool { token, err := chain.GetSimApp().TransferKeeper.TokenFromCoin(chain.GetContext(), coin) if err != nil { panic(fmt.Errorf("Failed to construct token from coin: %w", err)) @@ -285,7 +285,7 @@ func (*KeeperTestSuite) CheckBankBalances(chain *ibctesting.TestChain, bankBefor return nil } -func (suite *KeeperTestSuite) TestModelBasedRelay() { +func (s *KeeperTestSuite) TestModelBasedRelay() { dirname := "model_based_tests/" files, err := os.ReadDir(dirname) if err != nil { @@ -305,30 +305,30 @@ func (suite *KeeperTestSuite) TestModelBasedRelay() { panic(fmt.Errorf("Failed to parse JSON test fixture: %w", err)) } - suite.SetupTest() - pathAtoB := ibctesting.NewTransferPath(suite.chainA, suite.chainB).DisableUniqueChannelIDs() - pathBtoC := ibctesting.NewTransferPath(suite.chainB, suite.chainC).DisableUniqueChannelIDs() + s.SetupTest() + pathAtoB := ibctesting.NewTransferPath(s.chainA, s.chainB).DisableUniqueChannelIDs() + pathBtoC := ibctesting.NewTransferPath(s.chainB, s.chainC).DisableUniqueChannelIDs() pathAtoB.Setup() pathBtoC.Setup() for i, tlaTc := range tlaTestCases { tc := OnRecvPacketTestCaseFromTla(tlaTc) registerDenomFn := func() { - if !suite.chainB.GetSimApp().TransferKeeper.HasDenom(suite.chainB.GetContext(), tc.packet.Data.Token.Denom.Hash()) { - suite.chainB.GetSimApp().TransferKeeper.SetDenom(suite.chainB.GetContext(), tc.packet.Data.Token.Denom) + if !s.chainB.GetSimApp().TransferKeeper.HasDenom(s.chainB.GetContext(), tc.packet.Data.Token.Denom.Hash()) { + s.chainB.GetSimApp().TransferKeeper.SetDenom(s.chainB.GetContext(), tc.packet.Data.Token.Denom) } } description := fileInfo.Name() + " # " + strconv.Itoa(i+1) - suite.Run(fmt.Sprintf("Case %s", description), func() { + s.Run(fmt.Sprintf("Case %s", description), func() { seq := uint64(1) packet := channeltypes.NewPacket([]byte("mockdata"), seq, tc.packet.SourcePort, tc.packet.SourceChannel, tc.packet.DestPort, tc.packet.DestChannel, clienttypes.NewHeight(1, 100), 0) bankBefore := BankFromBalances(tc.bankBefore) - realBankBefore := BankOfChain(suite.chainB) + realBankBefore := BankOfChain(s.chainB) // First validate the packet itself (mimics what happens when the packet is being sent and/or received) err := packet.ValidateBasic() if err != nil { - suite.Require().False(tc.pass, err.Error()) + s.Require().False(tc.pass, err.Error()) return } switch tc.handler { @@ -353,16 +353,15 @@ func (suite *KeeperTestSuite) TestModelBasedRelay() { sdk.NewCoin(denom, amount), sender.String(), tc.packet.Data.Receiver, - suite.chainA.GetTimeoutHeight(), 0, // only use timeout height + s.chainA.GetTimeoutHeight(), 0, // only use timeout height "", ) - _, err = suite.chainB.GetSimApp().TransferKeeper.Transfer(suite.chainB.GetContext(), msg) - + _, err = s.chainB.GetSimApp().TransferKeeper.Transfer(s.chainB.GetContext(), msg) } case "OnRecvPacket": - err = suite.chainB.GetSimApp().TransferKeeper.OnRecvPacket( - suite.chainB.GetContext(), + err = s.chainB.GetSimApp().TransferKeeper.OnRecvPacket( + s.chainB.GetContext(), tc.packet.Data, packet.SourcePort, packet.SourceChannel, @@ -372,31 +371,31 @@ func (suite *KeeperTestSuite) TestModelBasedRelay() { case "OnTimeoutPacket": registerDenomFn() - err = suite.chainB.GetSimApp().TransferKeeper.OnTimeoutPacket(suite.chainB.GetContext(), packet.SourcePort, packet.SourceChannel, tc.packet.Data) + err = s.chainB.GetSimApp().TransferKeeper.OnTimeoutPacket(s.chainB.GetContext(), packet.SourcePort, packet.SourceChannel, tc.packet.Data) case "OnRecvAcknowledgementResult": - err = suite.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket( - suite.chainB.GetContext(), packet.SourcePort, packet.SourceChannel, tc.packet.Data, + err = s.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket( + s.chainB.GetContext(), packet.SourcePort, packet.SourceChannel, tc.packet.Data, channeltypes.NewResultAcknowledgement(nil)) case "OnRecvAcknowledgementError": registerDenomFn() - err = suite.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket( - suite.chainB.GetContext(), packet.SourcePort, packet.SourceChannel, tc.packet.Data, + err = s.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket( + s.chainB.GetContext(), packet.SourcePort, packet.SourceChannel, tc.packet.Data, channeltypes.NewErrorAcknowledgement(errors.New("MBT Error Acknowledgement"))) default: err = fmt.Errorf("Unknown handler: %s", tc.handler) } if err != nil { - suite.Require().False(tc.pass, err.Error()) + s.Require().False(tc.pass, err.Error()) return } bankAfter := BankFromBalances(tc.bankAfter) expectedBankChange := bankAfter.Sub(&bankBefore) - if err := suite.CheckBankBalances(suite.chainB, &realBankBefore, &expectedBankChange); err != nil { - suite.Require().False(tc.pass, err.Error()) + if err := s.CheckBankBalances(s.chainB, &realBankBefore, &expectedBankChange); err != nil { + s.Require().False(tc.pass, err.Error()) return } - suite.Require().True(tc.pass) + s.Require().True(tc.pass) }) } } diff --git a/modules/apps/transfer/keeper/migrations.go b/modules/apps/transfer/keeper/migrations.go index 3c1c9bbd3d7..70120344ec9 100644 --- a/modules/apps/transfer/keeper/migrations.go +++ b/modules/apps/transfer/keeper/migrations.go @@ -27,20 +27,10 @@ func NewMigrator(keeper Keeper) Migrator { } } -// MigrateParams migrates the transfer module's parameters from the x/params to self store. -func (m Migrator) MigrateParams(ctx sdk.Context) error { - var params types.Params - m.keeper.legacySubspace.GetParamSet(ctx, ¶ms) - - m.keeper.SetParams(ctx, params) - m.keeper.Logger(ctx).Info("successfully migrated transfer app self-manage params") - return nil -} - // MigrateDenomMetadata sets token metadata for all the IBC denom traces func (m Migrator) MigrateDenomMetadata(ctx sdk.Context) error { m.keeper.iterateDenomTraces(ctx, - func(dt internaltypes.DenomTrace) (stop bool) { + func(dt internaltypes.DenomTrace) bool { // check if the metadata for the given denom trace does not already exist if !m.keeper.BankKeeper.HasDenomMetaData(ctx, dt.IBCDenom()) { m.keeper.setDenomMetadataWithDenomTrace(ctx, dt) @@ -80,7 +70,7 @@ func (m Migrator) MigrateDenomTraceToDenom(ctx sdk.Context) error { denomTraces []internaltypes.DenomTrace ) m.keeper.iterateDenomTraces(ctx, - func(dt internaltypes.DenomTrace) (stop bool) { + func(dt internaltypes.DenomTrace) bool { // convert denomTrace to denom denom := types.ExtractDenomFromPath(dt.GetFullDenomPath()) err := denom.Validate() @@ -114,7 +104,7 @@ func (m Migrator) MigrateDenomTraceToDenom(ctx sdk.Context) error { } // setDenomTrace sets a new {trace hash -> denom trace} pair to the store. -func (k Keeper) setDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { +func (k *Keeper) setDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { store := prefix.NewStore(runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)), types.DenomTraceKey) bz := k.cdc.MustMarshal(&denomTrace) @@ -122,14 +112,14 @@ func (k Keeper) setDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTra } // deleteDenomTrace deletes the denom trace -func (k Keeper) deleteDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { +func (k *Keeper) deleteDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { store := prefix.NewStore(runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)), types.DenomTraceKey) store.Delete(denomTrace.Hash()) } // iterateDenomTraces iterates over the denomination traces in the store // and performs a callback function. -func (k Keeper) iterateDenomTraces(ctx sdk.Context, cb func(denomTrace internaltypes.DenomTrace) bool) { +func (k *Keeper) iterateDenomTraces(ctx sdk.Context, cb func(denomTrace internaltypes.DenomTrace) bool) { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, types.DenomTraceKey) @@ -145,7 +135,7 @@ func (k Keeper) iterateDenomTraces(ctx sdk.Context, cb func(denomTrace internalt } // setDenomMetadataWithDenomTrace sets an IBC token's denomination metadata -func (k Keeper) setDenomMetadataWithDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { +func (k *Keeper) setDenomMetadataWithDenomTrace(ctx sdk.Context, denomTrace internaltypes.DenomTrace) { metadata := banktypes.Metadata{ Description: fmt.Sprintf("IBC token from %s", denomTrace.GetFullDenomPath()), DenomUnits: []*banktypes.DenomUnit{ diff --git a/modules/apps/transfer/keeper/migrations_test.go b/modules/apps/transfer/keeper/migrations_test.go index dda0358803d..83b97b93551 100644 --- a/modules/apps/transfer/keeper/migrations_test.go +++ b/modules/apps/transfer/keeper/migrations_test.go @@ -15,40 +15,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestMigratorMigrateParams() { - testCases := []struct { - msg string - malleate func() - expectedParams transfertypes.Params - }{ - { - "success: default params", - func() { - params := transfertypes.DefaultParams() - subspace := suite.chainA.GetSimApp().GetSubspace(transfertypes.ModuleName) - subspace.SetParamSet(suite.chainA.GetContext(), ¶ms) // set params - }, - transfertypes.DefaultParams(), - }, - } - - for _, tc := range testCases { - suite.Run(fmt.Sprintf("case %s", tc.msg), func() { - suite.SetupTest() // reset - - tc.malleate() // explicitly set params - - migrator := transferkeeper.NewMigrator(suite.chainA.GetSimApp().TransferKeeper) - err := migrator.MigrateParams(suite.chainA.GetContext()) - suite.Require().NoError(err) - - params := suite.chainA.GetSimApp().TransferKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(tc.expectedParams, params) - }) - } -} - -func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { +func (s *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { testCases := []struct { msg string malleate func() @@ -57,8 +24,8 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: no trace", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "uatom", }) @@ -70,8 +37,8 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: single trace", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "uatom", Path: "transfer/channel-49", }) @@ -83,8 +50,8 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: multiple trace", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "uatom", Path: "transfer/channel-49/transfer/channel-32/transfer/channel-2", }) @@ -96,23 +63,23 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: many denoms", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "uatom", Path: "transfer/channel-49", }) - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "pineapple", Path: "transfer/channel-0", }) - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "apple", Path: "transfer/channel-0", }) - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "cucumber", Path: "transfer/channel-102/transfer/channel-0", }) @@ -128,8 +95,8 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: two slashes in base denom", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "gamm/pool/1", Path: "transfer/channel-0", }) @@ -141,8 +108,8 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: one slash in base denom", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "erc/0x85bcBCd7e79Ec36f4fBBDc54F90C643d921151AA", Path: "transfer/channel-149", }) @@ -154,8 +121,8 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { { "success: non-standard port", func() { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace( - suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace( + s.chainA.GetContext(), internaltransfertypes.DenomTrace{ BaseDenom: "uatom", Path: "transfer/channel-0/customport/channel-7", }) @@ -167,22 +134,22 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset tc.malleate() - migrator := transferkeeper.NewMigrator(suite.chainA.GetSimApp().TransferKeeper) - err := migrator.MigrateDenomTraceToDenom(suite.chainA.GetContext()) - suite.Require().NoError(err) + migrator := transferkeeper.NewMigrator(*s.chainA.GetSimApp().TransferKeeper) + err := migrator.MigrateDenomTraceToDenom(s.chainA.GetContext()) + s.Require().NoError(err) - denoms := suite.chainA.GetSimApp().TransferKeeper.GetAllDenoms(suite.chainA.GetContext()) - suite.Require().Equal(tc.expectedDenoms, denoms) + denoms := s.chainA.GetSimApp().TransferKeeper.GetAllDenoms(s.chainA.GetContext()) + s.Require().Equal(tc.expectedDenoms, denoms) // assert no leftover denom traces - suite.chainA.GetSimApp().TransferKeeper.IterateDenomTraces(suite.chainA.GetContext(), - func(dt internaltransfertypes.DenomTrace) (stop bool) { - suite.FailNow("DenomTrace key still exists", dt) + s.chainA.GetSimApp().TransferKeeper.IterateDenomTraces(s.chainA.GetContext(), + func(dt internaltransfertypes.DenomTrace) bool { + s.FailNow("DenomTrace key still exists", dt) return false }, ) @@ -190,7 +157,7 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenom() { } } -func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenomCorruptionDetection() { +func (s *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenomCorruptionDetection() { testCases := []struct { name string denomTrace internaltransfertypes.DenomTrace @@ -211,20 +178,20 @@ func (suite *KeeperTestSuite) TestMigratorMigrateDenomTraceToDenomCorruptionDete }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace(suite.chainA.GetContext(), tc.denomTrace) + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace(s.chainA.GetContext(), tc.denomTrace) - migrator := transferkeeper.NewMigrator(suite.chainA.GetSimApp().TransferKeeper) - suite.Panics(func() { - migrator.MigrateDenomTraceToDenom(suite.chainA.GetContext()) //nolint:errcheck // we shouldn't check the error here because we want to ensure that a panic occurs. + migrator := transferkeeper.NewMigrator(*s.chainA.GetSimApp().TransferKeeper) + s.Panics(func() { + migrator.MigrateDenomTraceToDenom(s.chainA.GetContext()) //nolint:errcheck // we shouldn't check the error here because we want to ensure that a panic occurs. }) }) } } -func (suite *KeeperTestSuite) TestMigrateTotalEscrowForDenom() { +func (s *KeeperTestSuite) TestMigrateTotalEscrowForDenom() { var ( path *ibctesting.Path denom string @@ -243,7 +210,7 @@ func (suite *KeeperTestSuite) TestMigrateTotalEscrowForDenom() { coin := ibctesting.TestCoin // funds the escrow account to have balance - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin))) }, sdkmath.NewInt(100), }, @@ -251,7 +218,7 @@ func (suite *KeeperTestSuite) TestMigrateTotalEscrowForDenom() { "success: one native denom escrowed in two channels", func() { denom = sdk.DefaultBondDenom - extraPath := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + extraPath := ibctesting.NewTransferPath(s.chainA, s.chainB) extraPath.Setup() escrowAddress1 := transfertypes.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) @@ -260,8 +227,8 @@ func (suite *KeeperTestSuite) TestMigrateTotalEscrowForDenom() { coin2 := ibctesting.TestCoin // funds the escrow accounts to have balance - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrowAddress1, sdk.NewCoins(coin1))) - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrowAddress2, sdk.NewCoins(coin2))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrowAddress1, sdk.NewCoins(coin1))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrowAddress2, sdk.NewCoins(coin2))) }, sdkmath.NewInt(200), }, @@ -274,32 +241,32 @@ func (suite *KeeperTestSuite) TestMigrateTotalEscrowForDenom() { denom = voucherDenom.IBCDenom() // funds the escrow account to have balance - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin))) }, sdkmath.NewInt(100), }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() tc.malleate() // explicitly fund escrow account - migrator := transferkeeper.NewMigrator(suite.chainA.GetSimApp().TransferKeeper) - suite.Require().NoError(migrator.MigrateTotalEscrowForDenom(suite.chainA.GetContext())) + migrator := transferkeeper.NewMigrator(*s.chainA.GetSimApp().TransferKeeper) + s.Require().NoError(migrator.MigrateTotalEscrowForDenom(s.chainA.GetContext())) // check that the migration set the expected amount for both native and IBC tokens - amount := suite.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainA.GetContext(), denom) - suite.Require().Equal(tc.expectedEscrowAmt, amount.Amount) + amount := s.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainA.GetContext(), denom) + s.Require().Equal(tc.expectedEscrowAmt, amount.Amount) }) } } -func (suite *KeeperTestSuite) TestMigratorMigrateMetadata() { +func (s *KeeperTestSuite) TestMigratorMigrateMetadata() { var ( denomTraces []internaltransfertypes.DenomTrace expectedMetadata []banktypes.Metadata @@ -451,31 +418,31 @@ func (suite *KeeperTestSuite) TestMigratorMigrateMetadata() { } // set metadata for one of the tokens, so that it exists already in state before doing the migration - suite.chainA.GetSimApp().BankKeeper.SetDenomMetaData(suite.chainA.GetContext(), expectedMetadata[1]) + s.chainA.GetSimApp().BankKeeper.SetDenomMetaData(s.chainA.GetContext(), expectedMetadata[1]) }, }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() tc.malleate() for _, denomTrace := range denomTraces { - suite.chainA.GetSimApp().TransferKeeper.SetDenomTrace(ctx, denomTrace) + s.chainA.GetSimApp().TransferKeeper.SetDenomTrace(ctx, denomTrace) } // run migration - migrator := transferkeeper.NewMigrator(suite.chainA.GetSimApp().TransferKeeper) + migrator := transferkeeper.NewMigrator(*s.chainA.GetSimApp().TransferKeeper) err := migrator.MigrateDenomMetadata(ctx) - suite.Require().NoError(err) + s.Require().NoError(err) for _, expMetadata := range expectedMetadata { - denomMetadata, found := suite.chainA.GetSimApp().BankKeeper.GetDenomMetaData(ctx, expMetadata.Base) - suite.Require().True(found) - suite.Require().Equal(expMetadata, denomMetadata) + denomMetadata, found := s.chainA.GetSimApp().BankKeeper.GetDenomMetaData(ctx, expMetadata.Base) + s.Require().True(found) + s.Require().Equal(expMetadata, denomMetadata) } }) } diff --git a/modules/apps/transfer/keeper/msg_server.go b/modules/apps/transfer/keeper/msg_server.go index 0dfcea2378f..50af401910e 100644 --- a/modules/apps/transfer/keeper/msg_server.go +++ b/modules/apps/transfer/keeper/msg_server.go @@ -20,14 +20,14 @@ import ( var _ types.MsgServer = (*Keeper)(nil) // Transfer defines an rpc handler method for MsgTransfer. -func (k Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types.MsgTransferResponse, error) { +func (k *Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types.MsgTransferResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) if !k.GetParams(ctx).SendEnabled { return nil, types.ErrSendDisabled } - sender, err := sdk.AccAddressFromBech32(msg.Sender) + sender, err := k.addressCodec.StringToBytes(msg.Sender) if err != nil { return nil, err } @@ -47,26 +47,27 @@ func (k Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types. return nil, err } - packetData := types.NewFungibleTokenPacketData(token.Denom.Path(), token.Amount, sender.String(), msg.Receiver, msg.Memo) + packetData := types.NewFungibleTokenPacketData(token.Denom.Path(), token.Amount, msg.Sender, msg.Receiver, msg.Memo) if err := packetData.ValidateBasic(); err != nil { return nil, errorsmod.Wrapf(err, "failed to validate %s packet data", types.V1) } - // if a channel exists with source channel, then use IBC V1 protocol - // otherwise use IBC V2 protocol - channel, isIBCV1 := k.channelKeeper.GetChannel(ctx, msg.SourcePort, msg.SourceChannel) + // if the channel does not exist, or we are using channel aliasing then use IBC V2 protocol + // otherwise use IBC V1 protocol + channel, hasChannel := k.channelKeeper.GetChannel(ctx, msg.SourcePort, msg.SourceChannel) + isIBCV2 := !hasChannel || msg.UseAliasing var sequence uint64 - if isIBCV1 { - // if a V1 channel exists for the source channel, then use IBC V1 protocol - sequence, err = k.transferV1Packet(ctx, msg.SourceChannel, token, msg.TimeoutHeight, msg.TimeoutTimestamp, packetData) - // telemetry for transfer occurs here, in IBC V2 this is done in the onSendPacket callback - telemetry.ReportTransfer(msg.SourcePort, msg.SourceChannel, channel.Counterparty.PortId, channel.Counterparty.ChannelId, token) - } else { + if isIBCV2 { // otherwise try to send an IBC V2 packet, if the sourceChannel is not a IBC V2 client // then core IBC will return a CounterpartyNotFound error sequence, err = k.transferV2Packet(ctx, msg.Encoding, msg.SourceChannel, msg.TimeoutTimestamp, packetData) + } else { + // if a V1 channel exists for the source channel, then use IBC V1 protocol + sequence, err = k.transferV1Packet(ctx, msg.SourceChannel, token, msg.TimeoutHeight, msg.TimeoutTimestamp, sender, packetData) + // telemetry for transfer occurs here, in IBC V2 this is done in the onSendPacket callback + telemetry.ReportTransfer(msg.SourcePort, msg.SourceChannel, channel.Counterparty.PortId, channel.Counterparty.ChannelId, token) } if err != nil { return nil, err @@ -77,8 +78,8 @@ func (k Keeper) Transfer(goCtx context.Context, msg *types.MsgTransfer) (*types. return &types.MsgTransferResponse{Sequence: sequence}, nil } -func (k Keeper) transferV1Packet(ctx sdk.Context, sourceChannel string, token types.Token, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, packetData types.FungibleTokenPacketData) (uint64, error) { - if err := k.SendTransfer(ctx, types.PortID, sourceChannel, token, sdk.MustAccAddressFromBech32(packetData.Sender)); err != nil { +func (k *Keeper) transferV1Packet(ctx sdk.Context, sourceChannel string, token types.Token, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, sender sdk.AccAddress, packetData types.FungibleTokenPacketData) (uint64, error) { + if err := k.SendTransfer(ctx, types.PortID, sourceChannel, token, sender); err != nil { return 0, err } @@ -93,7 +94,7 @@ func (k Keeper) transferV1Packet(ctx sdk.Context, sourceChannel string, token ty return sequence, nil } -func (k Keeper) transferV2Packet(ctx sdk.Context, encoding, sourceChannel string, timeoutTimestamp uint64, packetData types.FungibleTokenPacketData) (uint64, error) { +func (k *Keeper) transferV2Packet(ctx sdk.Context, encoding, sourceChannel string, timeoutTimestamp uint64, packetData types.FungibleTokenPacketData) (uint64, error) { if encoding == "" { encoding = types.EncodingJSON } @@ -139,7 +140,7 @@ func (k Keeper) transferV2Packet(ctx sdk.Context, encoding, sourceChannel string } // UpdateParams defines an rpc handler method for MsgUpdateParams. Updates the ibc-transfer module's parameters. -func (k Keeper) UpdateParams(goCtx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { +func (k *Keeper) UpdateParams(goCtx context.Context, msg *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { if k.GetAuthority() != msg.Signer { return nil, errorsmod.Wrapf(ibcerrors.ErrUnauthorized, "expected %s, got %s", k.GetAuthority(), msg.Signer) } diff --git a/modules/apps/transfer/keeper/msg_server_test.go b/modules/apps/transfer/keeper/msg_server_test.go index a0718a78670..66785fdedf7 100644 --- a/modules/apps/transfer/keeper/msg_server_test.go +++ b/modules/apps/transfer/keeper/msg_server_test.go @@ -19,7 +19,7 @@ import ( ) // TestMsgTransfer tests Transfer rpc handler -func (suite *KeeperTestSuite) TestMsgTransfer() { +func (s *KeeperTestSuite) TestMsgTransfer() { var msg *types.MsgTransfer var path *ibctesting.Path @@ -38,7 +38,7 @@ func (suite *KeeperTestSuite) TestMsgTransfer() { { "bank send enabled for denoms", func() { - err := suite.chainA.GetSimApp().BankKeeper.SetParams(suite.chainA.GetContext(), + err := s.chainA.GetSimApp().BankKeeper.SetParams(s.chainA.GetContext(), banktypes.Params{ SendEnabled: []*banktypes.SendEnabled{ {Denom: sdk.DefaultBondDenom, Enabled: true}, @@ -46,14 +46,14 @@ func (suite *KeeperTestSuite) TestMsgTransfer() { }, }, ) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, }, { "failure: send transfers disabled", func() { - suite.chainA.GetSimApp().TransferKeeper.SetParams(suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetParams(s.chainA.GetContext(), types.Params{ SendEnabled: false, }, @@ -61,6 +61,13 @@ func (suite *KeeperTestSuite) TestMsgTransfer() { }, types.ErrSendDisabled, }, + { + "failure: zero amount", + func() { + msg.Token = sdk.NewInt64Coin(sdk.DefaultBondDenom, 0) + }, + types.ErrInvalidAmount, + }, { "failure: invalid sender", func() { @@ -71,19 +78,19 @@ func (suite *KeeperTestSuite) TestMsgTransfer() { { "failure: sender is a blocked address", func() { - msg.Sender = suite.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() + msg.Sender = s.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() }, ibcerrors.ErrUnauthorized, }, { "failure: bank send disabled", func() { - err := suite.chainA.GetSimApp().BankKeeper.SetParams(suite.chainA.GetContext(), + err := s.chainA.GetSimApp().BankKeeper.SetParams(s.chainA.GetContext(), banktypes.Params{ SendEnabled: []*banktypes.SendEnabled{{Denom: sdk.DefaultBondDenom, Enabled: false}}, }, ) - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrSendDisabled, }, @@ -97,36 +104,36 @@ func (suite *KeeperTestSuite) TestMsgTransfer() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() msg = types.NewMsgTransfer( path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.SenderAccount.GetAddress().String(), - clienttypes.Height{}, suite.chainB.GetTimeoutTimestamp(), // only use timeout height + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), + clienttypes.Height{}, s.chainB.GetTimeoutTimestamp(), // only use timeout height "memo", ) // send some coins of the second denom from bank module to the sender account as well - err := suite.chainA.GetSimApp().BankKeeper.MintCoins(suite.chainA.GetContext(), types.ModuleName, sdk.NewCoins(ibctesting.SecondaryTestCoin)) - suite.Require().NoError(err) - err = suite.chainA.GetSimApp().BankKeeper.SendCoinsFromModuleToAccount(suite.chainA.GetContext(), types.ModuleName, suite.chainA.SenderAccount.GetAddress(), sdk.NewCoins(ibctesting.SecondaryTestCoin)) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), types.ModuleName, sdk.NewCoins(ibctesting.SecondaryTestCoin)) + s.Require().NoError(err) + err = s.chainA.GetSimApp().BankKeeper.SendCoinsFromModuleToAccount(s.chainA.GetContext(), types.ModuleName, s.chainA.SenderAccount.GetAddress(), sdk.NewCoins(ibctesting.SecondaryTestCoin)) + s.Require().NoError(err) tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - token, err := suite.chainA.GetSimApp().TransferKeeper.TokenFromCoin(ctx, msg.Token) - suite.Require().NoError(err) + token, err := s.chainA.GetSimApp().TransferKeeper.TokenFromCoin(ctx, msg.Token) + s.Require().NoError(err) - res, err := suite.chainA.GetSimApp().TransferKeeper.Transfer(ctx, msg) + res, err := s.chainA.GetSimApp().TransferKeeper.Transfer(ctx, msg) // Verify events var expEvents []abci.Event @@ -147,21 +154,21 @@ func (suite *KeeperTestSuite) TestMsgTransfer() { }.ToABCIEvents() if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().NotEqual(res.Sequence, uint64(0)) - ibctesting.AssertEvents(&suite.Suite, expEvents, events) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().NotEqual(res.Sequence, uint64(0)) + ibctesting.AssertEvents(&s.Suite, expEvents, events) } else { - suite.Require().Nil(res) - suite.Require().True(errors.Is(err, tc.expError) || strings.Contains(err.Error(), tc.expError.Error()), err.Error()) - suite.Require().Len(events, 0) + s.Require().Nil(res) + s.Require().True(errors.Is(err, tc.expError) || strings.Contains(err.Error(), tc.expError.Error()), err.Error()) + s.Require().Len(events, 0) } }) } } // TestMsgTransfer tests Transfer rpc handler with IBC V2 protocol -func (suite *KeeperTestSuite) TestMsgTransferIBCV2() { +func (s *KeeperTestSuite) TestMsgTransferIBCV2() { var msg *types.MsgTransfer var path *ibctesting.Path @@ -180,7 +187,7 @@ func (suite *KeeperTestSuite) TestMsgTransferIBCV2() { { "bank send enabled for denoms", func() { - err := suite.chainA.GetSimApp().BankKeeper.SetParams(suite.chainA.GetContext(), + err := s.chainA.GetSimApp().BankKeeper.SetParams(s.chainA.GetContext(), banktypes.Params{ SendEnabled: []*banktypes.SendEnabled{ {Denom: sdk.DefaultBondDenom, Enabled: true}, @@ -188,14 +195,14 @@ func (suite *KeeperTestSuite) TestMsgTransferIBCV2() { }, }, ) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, }, { "failure: send transfers disabled", func() { - suite.chainA.GetSimApp().TransferKeeper.SetParams(suite.chainA.GetContext(), + s.chainA.GetSimApp().TransferKeeper.SetParams(s.chainA.GetContext(), types.Params{ SendEnabled: false, }, @@ -213,19 +220,19 @@ func (suite *KeeperTestSuite) TestMsgTransferIBCV2() { { "failure: sender is a blocked address", func() { - msg.Sender = suite.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() + msg.Sender = s.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() }, ibcerrors.ErrUnauthorized, }, { "failure: bank send disabled", func() { - err := suite.chainA.GetSimApp().BankKeeper.SetParams(suite.chainA.GetContext(), + err := s.chainA.GetSimApp().BankKeeper.SetParams(s.chainA.GetContext(), banktypes.Params{ SendEnabled: []*banktypes.SendEnabled{{Denom: sdk.DefaultBondDenom, Enabled: false}}, }, ) - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrSendDisabled, }, @@ -239,38 +246,38 @@ func (suite *KeeperTestSuite) TestMsgTransferIBCV2() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - timeoutTimestamp := uint64(suite.chainA.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Hour).Unix()) msg = types.NewMsgTransfer( types.PortID, path.EndpointA.ClientID, // use eureka client id ibctesting.TestCoin, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, // only use timeout timestamp "memo", ) // send some coins of the second denom from bank module to the sender account as well - err := suite.chainA.GetSimApp().BankKeeper.MintCoins(suite.chainA.GetContext(), types.ModuleName, sdk.NewCoins(ibctesting.SecondaryTestCoin)) - suite.Require().NoError(err) - err = suite.chainA.GetSimApp().BankKeeper.SendCoinsFromModuleToAccount(suite.chainA.GetContext(), types.ModuleName, suite.chainA.SenderAccount.GetAddress(), sdk.NewCoins(ibctesting.SecondaryTestCoin)) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().BankKeeper.MintCoins(s.chainA.GetContext(), types.ModuleName, sdk.NewCoins(ibctesting.SecondaryTestCoin)) + s.Require().NoError(err) + err = s.chainA.GetSimApp().BankKeeper.SendCoinsFromModuleToAccount(s.chainA.GetContext(), types.ModuleName, s.chainA.SenderAccount.GetAddress(), sdk.NewCoins(ibctesting.SecondaryTestCoin)) + s.Require().NoError(err) tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - token, err := suite.chainA.GetSimApp().TransferKeeper.TokenFromCoin(ctx, msg.Token) - suite.Require().NoError(err) + token, err := s.chainA.GetSimApp().TransferKeeper.TokenFromCoin(ctx, msg.Token) + s.Require().NoError(err) - res, err := suite.chainA.GetSimApp().TransferKeeper.Transfer(ctx, msg) + res, err := s.chainA.GetSimApp().TransferKeeper.Transfer(ctx, msg) // Verify events var expEvents []abci.Event @@ -291,21 +298,21 @@ func (suite *KeeperTestSuite) TestMsgTransferIBCV2() { }.ToABCIEvents() if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().NotEqual(res.Sequence, uint64(0)) - ibctesting.AssertEvents(&suite.Suite, expEvents, events) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().NotEqual(res.Sequence, uint64(0)) + ibctesting.AssertEvents(&s.Suite, expEvents, events) } else { - suite.Require().Nil(res) - suite.Require().True(errors.Is(err, tc.expError) || strings.Contains(err.Error(), tc.expError.Error()), err.Error()) + s.Require().Nil(res) + s.Require().True(errors.Is(err, tc.expError) || strings.Contains(err.Error(), tc.expError.Error()), err.Error()) } }) } } // TestUpdateParams tests UpdateParams rpc handler -func (suite *KeeperTestSuite) TestUpdateParams() { - signer := suite.chainA.GetSimApp().TransferKeeper.GetAuthority() +func (s *KeeperTestSuite) TestUpdateParams() { + signer := s.chainA.GetSimApp().TransferKeeper.GetAuthority() testCases := []struct { name string msg *types.MsgUpdateParams @@ -339,13 +346,13 @@ func (suite *KeeperTestSuite) TestUpdateParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - _, err := suite.chainA.GetSimApp().TransferKeeper.UpdateParams(suite.chainA.GetContext(), tc.msg) + s.Run(tc.name, func() { + s.SetupTest() + _, err := s.chainA.GetSimApp().TransferKeeper.UpdateParams(s.chainA.GetContext(), tc.msg) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/apps/transfer/keeper/relay.go b/modules/apps/transfer/keeper/relay.go index 73233a45aec..308fa160144 100644 --- a/modules/apps/transfer/keeper/relay.go +++ b/modules/apps/transfer/keeper/relay.go @@ -47,7 +47,7 @@ import ( // 4. A -> C : sender chain is sink zone. Denom upon receiving: 'C/B/denom' // 5. C -> B : sender chain is sink zone. Denom upon receiving: 'B/denom' // 6. B -> A : sender chain is sink zone. Denom upon receiving: 'denom' -func (k Keeper) SendTransfer( +func (k *Keeper) SendTransfer( ctx sdk.Context, sourcePort, sourceChannel string, @@ -91,7 +91,7 @@ func (k Keeper) SendTransfer( // NOTE: should not happen as the module account was // retrieved on the step above and it has enough balance // to burn. - panic(fmt.Errorf("cannot burn coins after a successful send to a module account: %v", err)) + panic(fmt.Errorf("cannot burn coins after a successful send to a module account: %w", err)) } } else { // obtain the escrow address for the source channel end @@ -110,7 +110,7 @@ func (k Keeper) SendTransfer( // and sent to the receiving address. Otherwise if the sender chain is sending // back tokens this chain originally transferred to it, the tokens are // unescrowed and sent to the receiving address. -func (k Keeper) OnRecvPacket( +func (k *Keeper) OnRecvPacket( ctx sdk.Context, data types.InternalTransferRepresentation, sourcePort string, @@ -127,7 +127,7 @@ func (k Keeper) OnRecvPacket( return types.ErrReceiveDisabled } - receiver, err := sdk.AccAddressFromBech32(data.Receiver) + receiver, err := k.addressCodec.StringToBytes(data.Receiver) if err != nil { return errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "failed to decode receiver address: %s", data.Receiver) } @@ -195,9 +195,8 @@ func (k Keeper) OnRecvPacket( if err := k.BankKeeper.SendCoins( ctx, moduleAddr, receiver, sdk.NewCoins(voucher), ); err != nil { - return errorsmod.Wrapf(err, "failed to send coins to receiver %s", receiver.String()) + return errorsmod.Wrapf(err, "failed to send coins to receiver %s", data.Receiver) } - } // The ibc_module.go module will return the proper ack. @@ -209,7 +208,7 @@ func (k Keeper) OnRecvPacket( // // If the acknowledgement was a success then nothing occurs. Otherwise, // if the acknowledgement failed, then the sender is refunded their tokens. -func (k Keeper) OnAcknowledgementPacket( +func (k *Keeper) OnAcknowledgementPacket( ctx sdk.Context, sourcePort string, sourceChannel string, @@ -232,7 +231,7 @@ func (k Keeper) OnAcknowledgementPacket( } // OnTimeoutPacket processes a transfer packet timeout by refunding the tokens to the sender -func (k Keeper) OnTimeoutPacket( +func (k *Keeper) OnTimeoutPacket( ctx sdk.Context, sourcePort string, sourceChannel string, @@ -245,7 +244,7 @@ func (k Keeper) OnTimeoutPacket( // if the sending chain was the source chain. Otherwise, the sent token // were burnt in the original send so new tokens are minted and sent to // the sending address. -func (k Keeper) refundPacketTokens( +func (k *Keeper) refundPacketTokens( ctx sdk.Context, sourcePort string, sourceChannel string, @@ -253,7 +252,7 @@ func (k Keeper) refundPacketTokens( ) error { // NOTE: packet data type already checked in handler.go - sender, err := sdk.AccAddressFromBech32(data.Sender) + sender, err := k.addressCodec.StringToBytes(data.Sender) if err != nil { return err } @@ -282,7 +281,7 @@ func (k Keeper) refundPacketTokens( } if err := k.BankKeeper.SendCoins(ctx, moduleAccountAddr, sender, sdk.NewCoins(coin)); err != nil { - panic(fmt.Errorf("unable to send coins from module to account despite previously minting coins to module account: %v", err)) + panic(fmt.Errorf("unable to send coins from module to account despite previously minting coins to module account: %w", err)) } } else { if err := k.UnescrowCoin(ctx, escrowAddress, sender, coin); err != nil { @@ -295,7 +294,7 @@ func (k Keeper) refundPacketTokens( // EscrowCoin will send the given coin from the provided sender to the escrow address. It will also // update the total escrowed amount by adding the escrowed coin's amount to the current total escrow. -func (k Keeper) EscrowCoin(ctx sdk.Context, sender, escrowAddress sdk.AccAddress, coin sdk.Coin) error { +func (k *Keeper) EscrowCoin(ctx sdk.Context, sender, escrowAddress sdk.AccAddress, coin sdk.Coin) error { if err := k.BankKeeper.SendCoins(ctx, sender, escrowAddress, sdk.NewCoins(coin)); err != nil { // failure is expected for insufficient balances return err @@ -311,7 +310,7 @@ func (k Keeper) EscrowCoin(ctx sdk.Context, sender, escrowAddress sdk.AccAddress // UnescrowCoin will send the given coin from the escrow address to the provided receiver. It will also // update the total escrow by deducting the unescrowed coin's amount from the current total escrow. -func (k Keeper) UnescrowCoin(ctx sdk.Context, escrowAddress, receiver sdk.AccAddress, coin sdk.Coin) error { +func (k *Keeper) UnescrowCoin(ctx sdk.Context, escrowAddress, receiver sdk.AccAddress, coin sdk.Coin) error { if err := k.BankKeeper.SendCoins(ctx, escrowAddress, receiver, sdk.NewCoins(coin)); err != nil { // NOTE: this error is only expected to occur given an unexpected bug or a malicious // counterparty module. The bug may occur in bank or any part of the code that allows @@ -329,7 +328,7 @@ func (k Keeper) UnescrowCoin(ctx sdk.Context, escrowAddress, receiver sdk.AccAdd } // tokenFromCoin constructs an IBC token given an SDK coin. -func (k Keeper) TokenFromCoin(ctx sdk.Context, coin sdk.Coin) (types.Token, error) { +func (k *Keeper) TokenFromCoin(ctx sdk.Context, coin sdk.Coin) (types.Token, error) { // if the coin does not have an IBC denom, return as is if !strings.HasPrefix(coin.Denom, "ibc/") { return types.Token{ @@ -352,7 +351,7 @@ func (k Keeper) TokenFromCoin(ctx sdk.Context, coin sdk.Coin) (types.Token, erro // GetDenomFromIBCDenom returns the `Denom` given the IBC Denom (ibc/{hex hash}) of the denomination. // The ibcDenom is the hex hash of the denomination prefixed by "ibc/", often referred to as the IBC denom. -func (k Keeper) GetDenomFromIBCDenom(ctx sdk.Context, ibcDenom string) (types.Denom, error) { +func (k *Keeper) GetDenomFromIBCDenom(ctx sdk.Context, ibcDenom string) (types.Denom, error) { hexHash := ibcDenom[len(types.DenomPrefix+"/"):] hash, err := types.ParseHexHash(hexHash) @@ -371,7 +370,7 @@ func (k Keeper) GetDenomFromIBCDenom(ctx sdk.Context, ibcDenom string) (types.De // Deprecated: usage of this function should be replaced by `Keeper.GetDenomFromIBCDenom` // DenomPathFromHash returns the full denomination path prefix from an ibc denom with a hash // component. -func (k Keeper) DenomPathFromHash(ctx sdk.Context, ibcDenom string) (string, error) { +func (k *Keeper) DenomPathFromHash(ctx sdk.Context, ibcDenom string) (string, error) { denom, err := k.GetDenomFromIBCDenom(ctx, ibcDenom) if err != nil { return "", err diff --git a/modules/apps/transfer/keeper/relay_test.go b/modules/apps/transfer/keeper/relay_test.go index 25e31649d58..db469165358 100644 --- a/modules/apps/transfer/keeper/relay_test.go +++ b/modules/apps/transfer/keeper/relay_test.go @@ -1,6 +1,7 @@ package keeper_test import ( + "encoding/hex" "errors" "fmt" "strings" @@ -32,7 +33,7 @@ var ( // TestSendTransfer tests sending from chainA to chainB using both coin // that originate on chainA and coin that originate on chainB. -func (suite *KeeperTestSuite) TestSendTransfer() { +func (s *KeeperTestSuite) TestSendTransfer() { var ( coin sdk.Coin path *ibctesting.Path @@ -47,19 +48,19 @@ func (suite *KeeperTestSuite) TestSendTransfer() { expError error }{ { - "successful transfer of native token", + "success: transfer of native token", func() {}, nil, }, { - "successful transfer of native token with memo", + "success: transfer of native token with memo", func() { memo = "memo" //nolint:goconst }, nil, }, { - "successful transfer of IBC token", + "success: transfer of IBC token", func() { // send IBC token back to chainB denom := types.NewDenom(ibctesting.TestCoin.Denom, types.NewHop(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)) @@ -70,7 +71,7 @@ func (suite *KeeperTestSuite) TestSendTransfer() { nil, }, { - "successful transfer of IBC token with memo", + "success: transfer of IBC token with memo", func() { // send IBC token back to chainB denom := types.NewDenom(ibctesting.TestCoin.Denom, types.NewHop(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)) @@ -82,37 +83,37 @@ func (suite *KeeperTestSuite) TestSendTransfer() { nil, }, { - "successful transfer of entire balance", + "success: transfer of entire balance", func() { coin = sdk.NewCoin(coin.Denom, types.UnboundedSpendLimit()) var ok bool expEscrowAmount, ok = sdkmath.NewIntFromString(ibctesting.DefaultGenesisAccBalance) - suite.Require().True(ok) + s.Require().True(ok) }, nil, }, { - "successful transfer of entire spendable balance with vesting account", + "success: transfer of entire spendable balance with vesting account", func() { // create vesting account vestingAccPrivKey := secp256k1.GenPrivKey() vestingAccAddress := sdk.AccAddress(vestingAccPrivKey.PubKey().Address()) vestingCoins := sdk.NewCoins(sdk.NewCoin(coin.Denom, ibctesting.DefaultCoinAmount)) - _, err := suite.chainA.SendMsgs(vestingtypes.NewMsgCreateVestingAccount( - suite.chainA.SenderAccount.GetAddress(), + _, err := s.chainA.SendMsgs(vestingtypes.NewMsgCreateVestingAccount( + s.chainA.SenderAccount.GetAddress(), vestingAccAddress, vestingCoins, - suite.chainA.GetContext().BlockTime().Add(time.Hour).Unix(), + s.chainA.GetContext().BlockTime().Add(time.Hour).Unix(), false, )) - suite.Require().NoError(err) + s.Require().NoError(err) sender = vestingAccAddress // transfer some spendable coins to vesting account transferCoin := sdk.NewCoin(coin.Denom, sdkmath.NewInt(42)) - _, err = suite.chainA.SendMsgs(banktypes.NewMsgSend(suite.chainA.SenderAccount.GetAddress(), vestingAccAddress, sdk.NewCoins(transferCoin))) - suite.Require().NoError(err) + _, err = s.chainA.SendMsgs(banktypes.NewMsgSend(s.chainA.SenderAccount.GetAddress(), vestingAccAddress, sdk.NewCoins(transferCoin))) + s.Require().NoError(err) coin = sdk.NewCoin(coin.Denom, types.UnboundedSpendLimit()) expEscrowAmount = transferCoin.Amount @@ -127,21 +128,21 @@ func (suite *KeeperTestSuite) TestSendTransfer() { vestingAccAddress := sdk.AccAddress(vestingAccPrivKey.PubKey().Address()) vestingCoin := sdk.NewCoin(coin.Denom, ibctesting.DefaultCoinAmount) - _, err := suite.chainA.SendMsgs(vestingtypes.NewMsgCreateVestingAccount( - suite.chainA.SenderAccount.GetAddress(), + _, err := s.chainA.SendMsgs(vestingtypes.NewMsgCreateVestingAccount( + s.chainA.SenderAccount.GetAddress(), vestingAccAddress, sdk.NewCoins(vestingCoin), - suite.chainA.GetContext().BlockTime().Add(time.Hour).Unix(), + s.chainA.GetContext().BlockTime().Add(time.Hour).Unix(), false, )) - suite.Require().NoError(err) + s.Require().NoError(err) sender = vestingAccAddress // just to prove that the vesting account has a balance (but not spendable) - vestingAccBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), vestingAccAddress, coin.Denom) - suite.Require().Equal(vestingCoin.Amount.Int64(), vestingAccBalance.Amount.Int64()) - vestinSpendableBalance := suite.chainA.GetSimApp().BankKeeper.SpendableCoins(suite.chainA.GetContext(), vestingAccAddress) - suite.Require().Zero(vestinSpendableBalance.AmountOf(coin.Denom).Int64()) + vestingAccBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), vestingAccAddress, coin.Denom) + s.Require().Equal(vestingCoin.Amount.Int64(), vestingAccBalance.Amount.Int64()) + vestinSpendableBalance := s.chainA.GetSimApp().BankKeeper.SpendableCoins(s.chainA.GetContext(), vestingAccAddress) + s.Require().Zero(vestinSpendableBalance.AmountOf(coin.Denom).Int64()) coin = sdk.NewCoin(coin.Denom, types.UnboundedSpendLimit()) }, @@ -150,7 +151,7 @@ func (suite *KeeperTestSuite) TestSendTransfer() { { "failure: sender account is blocked", func() { - sender = suite.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName) + sender = s.chainA.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName) }, ibcerrors.ErrUnauthorized, }, @@ -180,27 +181,27 @@ func (suite *KeeperTestSuite) TestSendTransfer() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() // create IBC token on chainA - transferMsg := types.NewMsgTransfer(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, ibctesting.TestCoin, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), suite.chainA.GetTimeoutHeight(), 0, "") + transferMsg := types.NewMsgTransfer(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, ibctesting.TestCoin, s.chainB.SenderAccount.GetAddress().String(), s.chainA.SenderAccount.GetAddress().String(), s.chainA.GetTimeoutHeight(), 0, "") - result, err := suite.chainB.SendMsgs(transferMsg) - suite.Require().NoError(err) // message committed + result, err := s.chainB.SendMsgs(transferMsg) + s.Require().NoError(err) // message committed packet, err := ibctesting.ParseV1PacketFromEvents(result.Events) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.RelayPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) // Value that can malleated for Transfer we are testing. coin = ibctesting.TestCoin - sender = suite.chainA.SenderAccount.GetAddress() + sender = s.chainA.SenderAccount.GetAddress() memo = "" expEscrowAmount = defaultAmount @@ -211,31 +212,31 @@ func (suite *KeeperTestSuite) TestSendTransfer() { path.EndpointA.ChannelID, coin, sender.String(), - suite.chainB.SenderAccount.GetAddress().String(), - suite.chainB.GetTimeoutHeight(), 0, // only use timeout height + s.chainB.SenderAccount.GetAddress().String(), + s.chainB.GetTimeoutHeight(), 0, // only use timeout height memo, ) - res, err := suite.chainA.GetSimApp().TransferKeeper.Transfer(suite.chainA.GetContext(), msg) + res, err := s.chainA.GetSimApp().TransferKeeper.Transfer(s.chainA.GetContext(), msg) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) } else { - suite.Require().Nil(res) - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) // We do not expect escrowed amounts in error cases. expEscrowAmount = zeroAmount } // Assert amounts escrowed are as expected. - suite.assertEscrowEqual(suite.chainA, coin, expEscrowAmount) + s.assertEscrowEqual(s.chainA, coin, expEscrowAmount) }) } } -func (suite *KeeperTestSuite) TestSendTransferSetsTotalEscrowAmountForSourceIBCToken() { +func (s *KeeperTestSuite) TestSendTransferSetsTotalEscrowAmountForSourceIBCToken() { /* Given the following flow of tokens: @@ -266,10 +267,10 @@ func (suite *KeeperTestSuite) TestSendTransferSetsTotalEscrowAmountForSourceIBCT // set up // 2 transfer channels between chain A and chain B - path1 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewTransferPath(s.chainA, s.chainB) path1.Setup() - path2 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewTransferPath(s.chainA, s.chainB) path2.Setup() // create IBC token on chain B with denom trace "transfer/channel-0/stake" @@ -278,18 +279,18 @@ func (suite *KeeperTestSuite) TestSendTransferSetsTotalEscrowAmountForSourceIBCT path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, coin, - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainB.SenderAccount.GetAddress().String(), - suite.chainB.GetTimeoutHeight(), 0, "", + s.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), + s.chainB.GetTimeoutHeight(), 0, "", ) - result, err := suite.chainA.SendMsgs(transferMsg) - suite.Require().NoError(err) // message committed + result, err := s.chainA.SendMsgs(transferMsg) + s.Require().NoError(err) // message committed packet, err := ibctesting.ParseV1PacketFromEvents(result.Events) - suite.Require().NoError(err) + s.Require().NoError(err) err = path1.RelayPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) // execute denom := types.NewDenom(sdk.DefaultBondDenom, types.NewHop(path1.EndpointB.ChannelConfig.PortID, path1.EndpointB.ChannelID)) @@ -298,25 +299,25 @@ func (suite *KeeperTestSuite) TestSendTransferSetsTotalEscrowAmountForSourceIBCT path2.EndpointB.ChannelConfig.PortID, path2.EndpointB.ChannelID, coin, - suite.chainB.SenderAccount.GetAddress().String(), - suite.chainA.SenderAccount.GetAddress().String(), - suite.chainA.GetTimeoutHeight(), 0, "", + s.chainB.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), + s.chainA.GetTimeoutHeight(), 0, "", ) - res, err := suite.chainB.GetSimApp().TransferKeeper.Transfer(suite.chainB.GetContext(), msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + res, err := s.chainB.GetSimApp().TransferKeeper.Transfer(s.chainB.GetContext(), msg) + s.Require().NoError(err) + s.Require().NotNil(res) // check total amount in escrow of sent token on sending chain - totalEscrow := suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(defaultAmount, totalEscrow.Amount) + totalEscrow := s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(defaultAmount, totalEscrow.Amount) } // TestOnRecvPacket_ReceiverIsNotSource tests receiving on chainB a coin that // originates on chainA. The bulk of the testing occurs in the test case for // loop since setup is intensive for all cases. The malleate function allows // for testing invalid cases. -func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { +func (s *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { var packetData types.InternalTransferRepresentation testCases := []struct { @@ -325,17 +326,27 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { expError error }{ { - "successful receive", + "success: receive", func() {}, nil, }, { - "successful receive with memo", + "success: receive with memo", func() { packetData.Memo = "memo" }, nil, }, + { + "success: receive with hex receiver address", + func() { + s.chainB.GetSimApp().TransferKeeper.SetAddressCodec(ibcmock.TestAddressCodec{}) + + receiver := sdk.MustAccAddressFromBech32(packetData.Receiver) + packetData.Receiver = hex.EncodeToString(receiver.Bytes()) + }, + nil, + }, { "failure: mint zero coin", func() { @@ -346,7 +357,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { { "failure: receiver is module account", func() { - packetData.Receiver = suite.chainB.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() + packetData.Receiver = s.chainB.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() }, ibcerrors.ErrUnauthorized, }, @@ -360,7 +371,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { { "failure: receive is disabled", func() { - suite.chainB.GetSimApp().TransferKeeper.SetParams(suite.chainB.GetContext(), + s.chainB.GetSimApp().TransferKeeper.SetParams(s.chainB.GetContext(), types.Params{ ReceiveEnabled: false, }) @@ -370,21 +381,21 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset - path := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path := ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() - receiver := suite.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate + receiver := s.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate // send coins from chainA to chainB - transferMsg := types.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, suite.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(1, 110), 0, "") - _, err := suite.chainA.SendMsgs(transferMsg) - suite.Require().NoError(err) // message committed + transferMsg := types.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, s.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(1, 110), 0, "") + _, err := s.chainA.SendMsgs(transferMsg) + s.Require().NoError(err) // message committed token := types.Token{Denom: types.NewDenom(transferMsg.Token.Denom), Amount: transferMsg.Token.Amount.String()} - packetData = types.NewInternalTransferRepresentation(token, suite.chainA.SenderAccount.GetAddress().String(), receiver, "") + packetData = types.NewInternalTransferRepresentation(token, s.chainA.SenderAccount.GetAddress().String(), receiver, "") sourcePort := path.EndpointA.ChannelConfig.PortID sourceChannel := path.EndpointA.ChannelID destinationPort := path.EndpointB.ChannelConfig.PortID @@ -394,8 +405,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { denom := types.NewDenom(token.Denom.Base, types.NewHop(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)) - err = suite.chainB.GetSimApp().TransferKeeper.OnRecvPacket( - suite.chainB.GetContext(), + err = s.chainB.GetSimApp().TransferKeeper.OnRecvPacket( + s.chainB.GetContext(), packetData, sourcePort, sourceChannel, @@ -404,21 +415,21 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { ) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // Check denom metadata for of tokens received on chain B. - actualMetadata, found := suite.chainB.GetSimApp().BankKeeper.GetDenomMetaData(suite.chainB.GetContext(), denom.IBCDenom()) + actualMetadata, found := s.chainB.GetSimApp().BankKeeper.GetDenomMetaData(s.chainB.GetContext(), denom.IBCDenom()) - suite.Require().True(found) - suite.Require().Equal(metadataFromDenom(denom), actualMetadata) + s.Require().True(found) + s.Require().Equal(metadataFromDenom(denom), actualMetadata) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) // Check denom metadata absence for cases where recv fails. - _, found := suite.chainB.GetSimApp().BankKeeper.GetDenomMetaData(suite.chainB.GetContext(), denom.IBCDenom()) + _, found := s.chainB.GetSimApp().BankKeeper.GetDenomMetaData(s.chainB.GetContext(), denom.IBCDenom()) - suite.Require().False(found) + s.Require().False(found) } }) } @@ -428,7 +439,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsNotSource() { // originated on chainB, but was previously transferred to chainA. The bulk // of the testing occurs in the test case for loop since setup is intensive // for all cases. The malleate function allows for testing invalid cases. -func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsSource() { +func (s *KeeperTestSuite) TestOnRecvPacket_ReceiverIsSource() { var ( packetData types.InternalTransferRepresentation expEscrowAmount sdkmath.Int // total amount in escrow for denom on receiving chain @@ -491,29 +502,29 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsSource() { { "failure: receiver is module account", func() { - packetData.Receiver = suite.chainB.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() + packetData.Receiver = s.chainB.GetSimApp().AccountKeeper.GetModuleAddress(minttypes.ModuleName).String() }, ibcerrors.ErrUnauthorized, }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset - path := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path := ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() - receiver := suite.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate - expEscrowAmount = zeroAmount // total amount in escrow of voucher denom on receiving chain + receiver := s.chainB.SenderAccount.GetAddress().String() // must be explicitly changed in malleate + expEscrowAmount = zeroAmount // total amount in escrow of voucher denom on receiving chain // send coins from chainA to chainB, receive them, acknowledge them - transferMsg := types.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, suite.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(1, 110), 0, "") - _, err := suite.chainA.SendMsgs(transferMsg) - suite.Require().NoError(err) // message committed + transferMsg := types.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, s.chainA.SenderAccount.GetAddress().String(), receiver, clienttypes.NewHeight(1, 110), 0, "") + _, err := s.chainA.SendMsgs(transferMsg) + s.Require().NoError(err) // message committed token := types.Token{Denom: types.NewDenom(transferMsg.Token.Denom, types.NewHop(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)), Amount: transferMsg.Token.Amount.String()} - packetData = types.NewInternalTransferRepresentation(token, suite.chainA.SenderAccount.GetAddress().String(), receiver, "") + packetData = types.NewInternalTransferRepresentation(token, s.chainA.SenderAccount.GetAddress().String(), receiver, "") sourcePort := path.EndpointB.ChannelConfig.PortID sourceChannel := path.EndpointB.ChannelID destinationPort := path.EndpointA.ChannelConfig.PortID @@ -521,8 +532,8 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsSource() { tc.malleate() - err = suite.chainA.GetSimApp().TransferKeeper.OnRecvPacket( - suite.chainA.GetContext(), + err = s.chainA.GetSimApp().TransferKeeper.OnRecvPacket( + s.chainA.GetContext(), packetData, sourcePort, sourceChannel, @@ -531,25 +542,25 @@ func (suite *KeeperTestSuite) TestOnRecvPacket_ReceiverIsSource() { ) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) - _, found := suite.chainA.GetSimApp().BankKeeper.GetDenomMetaData(suite.chainA.GetContext(), sdk.DefaultBondDenom) - suite.Require().False(found) + _, found := s.chainA.GetSimApp().BankKeeper.GetDenomMetaData(s.chainA.GetContext(), sdk.DefaultBondDenom) + s.Require().False(found) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expError.Error()) // Expect escrowed amount to stay same on failure. expEscrowAmount = defaultAmount } // Assert amounts escrowed are as expected, we do not malleate amount escrowed in initial transfer. - suite.assertEscrowEqual(suite.chainA, ibctesting.TestCoin, expEscrowAmount) + s.assertEscrowEqual(s.chainA, ibctesting.TestCoin, expEscrowAmount) }) } } -func (suite *KeeperTestSuite) TestOnRecvPacketSetsTotalEscrowAmountForSourceIBCToken() { +func (s *KeeperTestSuite) TestOnRecvPacketSetsTotalEscrowAmountForSourceIBCToken() { /* Given the following flow of tokens: @@ -583,10 +594,10 @@ func (suite *KeeperTestSuite) TestOnRecvPacketSetsTotalEscrowAmountForSourceIBCT // setup // 2 transfer channels between chain A and chain B - path1 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewTransferPath(s.chainA, s.chainB) path1.Setup() - path2 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewTransferPath(s.chainA, s.chainB) path2.Setup() // denom path: {transfer/channel-1/transfer/channel-0} @@ -600,7 +611,7 @@ func (suite *KeeperTestSuite) TestOnRecvPacketSetsTotalEscrowAmountForSourceIBCT types.Token{ Denom: denom, Amount: amount.String(), - }, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), "") + }, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), "") sourcePort := path2.EndpointA.ChannelConfig.PortID sourceChannel := path2.EndpointA.ChannelID destinationPort := path2.EndpointB.ChannelConfig.PortID @@ -615,40 +626,40 @@ func (suite *KeeperTestSuite) TestOnRecvPacketSetsTotalEscrowAmountForSourceIBCT escrowAddress := types.GetEscrowAddress(path2.EndpointB.ChannelConfig.PortID, path2.EndpointB.ChannelID) coin := sdk.NewCoin(denom.IBCDenom(), amount) - suite.Require().NoError( + s.Require().NoError( banktestutil.FundAccount( - suite.chainB.GetContext(), - suite.chainB.GetSimApp().BankKeeper, + s.chainB.GetContext(), + s.chainB.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin), ), ) - suite.chainB.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainB.GetContext(), coin) - totalEscrowChainB := suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(defaultAmount, totalEscrowChainB.Amount) + s.chainB.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainB.GetContext(), coin) + totalEscrowChainB := s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(defaultAmount, totalEscrowChainB.Amount) // execute onRecvPacket, when chaninB receives the source token the escrow amount should decrease - err := suite.chainB.GetSimApp().TransferKeeper.OnRecvPacket( - suite.chainB.GetContext(), + err := s.chainB.GetSimApp().TransferKeeper.OnRecvPacket( + s.chainB.GetContext(), data, sourcePort, sourceChannel, destinationPort, destinationChannel, ) - suite.Require().NoError(err) + s.Require().NoError(err) // check total amount in escrow of sent token on receiving chain - totalEscrowChainB = suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(zeroAmount, totalEscrowChainB.Amount) + totalEscrowChainB = s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(zeroAmount, totalEscrowChainB.Amount) } // TestOnAcknowledgementPacket tests that successful acknowledgement is a no-op // and failure acknowledment leads to refund when attempting to send from chainA // to chainB. If sender is source then the denomination being refunded has no // trace. -func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { +func (s *KeeperTestSuite) TestOnAcknowledgementPacket() { var ( successAck = channeltypes.NewResultAcknowledgement([]byte{byte(1)}) failedAck = channeltypes.NewErrorAcknowledgement(errors.New("failed packet transfer")) @@ -680,10 +691,10 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { denom = types.NewDenom(sdk.DefaultBondDenom) coin := sdk.NewCoin(sdk.DefaultBondDenom, amount) - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) // set escrow amount that would have been stored after successful execution of MsgTransfer - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, amount)) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, amount)) }, nil, }, @@ -695,7 +706,7 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { denom = types.NewDenom(sdk.DefaultBondDenom, types.NewHop(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)) coin := sdk.NewCoin(denom.IBCDenom(), amount) - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) }, nil, }, @@ -706,7 +717,7 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { denom = types.NewDenom(sdk.DefaultBondDenom) // set escrow amount that would have been stored after successful execution of MsgTransfer - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, amount)) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), sdk.NewCoin(sdk.DefaultBondDenom, amount)) expEscrowAmount = defaultAmount }, sdkerrors.ErrInsufficientFunds, @@ -714,10 +725,10 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() amount = defaultAmount // must be explicitly changed @@ -729,36 +740,36 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacket() { types.Token{ Denom: denom, Amount: amount.String(), - }, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), "") + }, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), "") sourcePort := path.EndpointA.ChannelConfig.PortID sourceChannel := path.EndpointA.ChannelID - preAcknowledgementBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) + preAcknowledgementBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) - err := suite.chainA.GetSimApp().TransferKeeper.OnAcknowledgementPacket(suite.chainA.GetContext(), sourcePort, sourceChannel, data, tc.ack) + err := s.chainA.GetSimApp().TransferKeeper.OnAcknowledgementPacket(s.chainA.GetContext(), sourcePort, sourceChannel, data, tc.ack) // check total amount in escrow of sent token denom on sending chain - totalEscrow := suite.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainA.GetContext(), denom.IBCDenom()) - suite.Require().Equal(expEscrowAmount, totalEscrow.Amount) + totalEscrow := s.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainA.GetContext(), denom.IBCDenom()) + s.Require().Equal(expEscrowAmount, totalEscrow.Amount) if tc.expError == nil { - suite.Require().NoError(err) - postAcknowledgementBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) + s.Require().NoError(err) + postAcknowledgementBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) deltaAmount := postAcknowledgementBalance.Amount.Sub(preAcknowledgementBalance.Amount) if tc.ack.Success() { - suite.Require().Equal(int64(0), deltaAmount.Int64(), "successful ack changed balance") + s.Require().Equal(int64(0), deltaAmount.Int64(), "successful ack changed balance") } else { - suite.Require().Equal(amount, deltaAmount, "failed ack did not trigger refund") + s.Require().Equal(amount, deltaAmount, "failed ack did not trigger refund") } } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestOnAcknowledgementPacketSetsTotalEscrowAmountForSourceIBCToken() { +func (s *KeeperTestSuite) TestOnAcknowledgementPacketSetsTotalEscrowAmountForSourceIBCToken() { /* This test is testing the following scenario. Given tokens travelling like this: @@ -792,10 +803,10 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacketSetsTotalEscrowAmountFo // set up // 2 transfer channels between chain A and chain B - path1 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewTransferPath(s.chainA, s.chainB) path1.Setup() - path2 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewTransferPath(s.chainA, s.chainB) path2.Setup() // fund escrow account for transfer and channel-1 on chain B @@ -804,10 +815,10 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacketSetsTotalEscrowAmountFo escrowAddress := types.GetEscrowAddress(path2.EndpointB.ChannelConfig.PortID, path2.EndpointB.ChannelID) coin := sdk.NewCoin(denom.IBCDenom(), amount) - suite.Require().NoError( + s.Require().NoError( banktestutil.FundAccount( - suite.chainB.GetContext(), - suite.chainB.GetSimApp().BankKeeper, + s.chainB.GetContext(), + s.chainB.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin), ), @@ -818,30 +829,30 @@ func (suite *KeeperTestSuite) TestOnAcknowledgementPacketSetsTotalEscrowAmountFo Denom: denom, Amount: amount.String(), }, - suite.chainB.SenderAccount.GetAddress().String(), - suite.chainA.SenderAccount.GetAddress().String(), + s.chainB.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), "", ) sourcePort := path2.EndpointB.ChannelConfig.PortID sourceChannel := path2.EndpointB.ChannelID - suite.chainB.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainB.GetContext(), coin) - totalEscrowChainB := suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(defaultAmount, totalEscrowChainB.Amount) + s.chainB.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainB.GetContext(), coin) + totalEscrowChainB := s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(defaultAmount, totalEscrowChainB.Amount) - err := suite.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket(suite.chainB.GetContext(), sourcePort, sourceChannel, data, ack) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().TransferKeeper.OnAcknowledgementPacket(s.chainB.GetContext(), sourcePort, sourceChannel, data, ack) + s.Require().NoError(err) // check total amount in escrow of sent token on sending chain - totalEscrowChainB = suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(zeroAmount, totalEscrowChainB.Amount) + totalEscrowChainB = s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(zeroAmount, totalEscrowChainB.Amount) } // TestOnTimeoutPacket tests private refundPacket function since it is a simple // wrapper over it. The actual timeout does not matter since IBC core logic // is not being tested. The test is timing out a send from chainA to chainB // so the refunds are occurring on chainA. -func (suite *KeeperTestSuite) TestOnTimeoutPacket() { +func (s *KeeperTestSuite) TestOnTimeoutPacket() { var ( path *ibctesting.Path amount string @@ -861,14 +872,14 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { escrow := types.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) denom = types.NewDenom(sdk.DefaultBondDenom) coinAmount, ok := sdkmath.NewIntFromString(amount) - suite.Require().True(ok) + s.Require().True(ok) coin := sdk.NewCoin(denom.IBCDenom(), coinAmount) expEscrowAmount = zeroAmount // funds the escrow account to have balance - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) // set escrow amount that would have been stored after successful execution of MsgTransfer - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), coin) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), coin) }, nil, }, @@ -878,12 +889,12 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { escrow := types.GetEscrowAddress(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) denom = types.NewDenom(sdk.DefaultBondDenom, types.NewHop(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)) coinAmount, ok := sdkmath.NewIntFromString(amount) - suite.Require().True(ok) + s.Require().True(ok) coin := sdk.NewCoin(denom.IBCDenom(), coinAmount) expEscrowAmount = zeroAmount // funds the escrow account to have balance - suite.Require().NoError(banktestutil.FundAccount(suite.chainA.GetContext(), suite.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) + s.Require().NoError(banktestutil.FundAccount(s.chainA.GetContext(), s.chainA.GetSimApp().BankKeeper, escrow, sdk.NewCoins(coin))) }, nil, }, @@ -893,10 +904,10 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { denom = types.NewDenom("bitcoin") var ok bool expEscrowAmount, ok = sdkmath.NewIntFromString(amount) - suite.Require().True(ok) + s.Require().True(ok) // set escrow amount that would have been stored after successful execution of MsgTransfer - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), sdk.NewCoin(denom.IBCDenom(), expEscrowAmount)) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), sdk.NewCoin(denom.IBCDenom(), expEscrowAmount)) }, sdkerrors.ErrInsufficientFunds, }, @@ -906,10 +917,10 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { denom = types.NewDenom(sdk.DefaultBondDenom) var ok bool expEscrowAmount, ok = sdkmath.NewIntFromString(amount) - suite.Require().True(ok) + s.Require().True(ok) // set escrow amount that would have been stored after successful execution of MsgTransfer - suite.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainA.GetContext(), sdk.NewCoin(denom.IBCDenom(), expEscrowAmount)) + s.chainA.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainA.GetContext(), sdk.NewCoin(denom.IBCDenom(), expEscrowAmount)) }, sdkerrors.ErrInsufficientFunds, }, @@ -933,14 +944,14 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() amount = defaultAmount.String() // must be explicitly changed - sender = suite.chainA.SenderAccount.GetAddress().String() + sender = s.chainA.SenderAccount.GetAddress().String() expEscrowAmount = zeroAmount tc.malleate() @@ -949,34 +960,34 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacket() { types.Token{ Denom: denom, Amount: amount, - }, sender, suite.chainB.SenderAccount.GetAddress().String(), "") + }, sender, s.chainB.SenderAccount.GetAddress().String(), "") sourcePort := path.EndpointA.ChannelConfig.PortID sourceChannel := path.EndpointA.ChannelID - preTimeoutBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) + preTimeoutBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) - err := suite.chainA.GetSimApp().TransferKeeper.OnTimeoutPacket(suite.chainA.GetContext(), sourcePort, sourceChannel, data) + err := s.chainA.GetSimApp().TransferKeeper.OnTimeoutPacket(s.chainA.GetContext(), sourcePort, sourceChannel, data) - postTimeoutBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) + postTimeoutBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), denom.IBCDenom()) deltaAmount := postTimeoutBalance.Amount.Sub(preTimeoutBalance.Amount) // check total amount in escrow of sent token denom on sending chain - totalEscrow := suite.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainA.GetContext(), denom.IBCDenom()) - suite.Require().Equal(expEscrowAmount, totalEscrow.Amount) + totalEscrow := s.chainA.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainA.GetContext(), denom.IBCDenom()) + s.Require().Equal(expEscrowAmount, totalEscrow.Amount) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) amountParsed, ok := sdkmath.NewIntFromString(amount) - suite.Require().True(ok) - suite.Require().Equal(amountParsed, deltaAmount, "successful timeout did not trigger refund") + s.Require().True(ok) + s.Require().Equal(amountParsed, deltaAmount, "successful timeout did not trigger refund") } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expError.Error()) } }) } } -func (suite *KeeperTestSuite) TestOnTimeoutPacketSetsTotalEscrowAmountForSourceIBCToken() { +func (s *KeeperTestSuite) TestOnTimeoutPacketSetsTotalEscrowAmountForSourceIBCToken() { /* Given the following flow of tokens: @@ -1008,10 +1019,10 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacketSetsTotalEscrowAmountForSourceI // set up // 2 transfer channels between chain A and chain B - path1 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewTransferPath(s.chainA, s.chainB) path1.Setup() - path2 := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewTransferPath(s.chainA, s.chainB) path2.Setup() // fund escrow account for transfer and channel-1 on chain B @@ -1019,10 +1030,10 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacketSetsTotalEscrowAmountForSourceI escrowAddress := types.GetEscrowAddress(path2.EndpointB.ChannelConfig.PortID, path2.EndpointB.ChannelID) coin := sdk.NewCoin(denom.IBCDenom(), amount) - suite.Require().NoError( + s.Require().NoError( banktestutil.FundAccount( - suite.chainB.GetContext(), - suite.chainB.GetSimApp().BankKeeper, + s.chainB.GetContext(), + s.chainB.GetSimApp().BankKeeper, escrowAddress, sdk.NewCoins(coin), ), @@ -1032,23 +1043,23 @@ func (suite *KeeperTestSuite) TestOnTimeoutPacketSetsTotalEscrowAmountForSourceI types.Token{ Denom: denom, Amount: amount.String(), - }, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), "") + }, s.chainB.SenderAccount.GetAddress().String(), s.chainA.SenderAccount.GetAddress().String(), "") sourcePort := path2.EndpointB.ChannelConfig.PortID sourceChannel := path2.EndpointB.ChannelID - suite.chainB.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(suite.chainB.GetContext(), coin) - totalEscrowChainB := suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(defaultAmount, totalEscrowChainB.Amount) + s.chainB.GetSimApp().TransferKeeper.SetTotalEscrowForDenom(s.chainB.GetContext(), coin) + totalEscrowChainB := s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(defaultAmount, totalEscrowChainB.Amount) - err := suite.chainB.GetSimApp().TransferKeeper.OnTimeoutPacket(suite.chainB.GetContext(), sourcePort, sourceChannel, data) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().TransferKeeper.OnTimeoutPacket(s.chainB.GetContext(), sourcePort, sourceChannel, data) + s.Require().NoError(err) // check total amount in escrow of sent token on sending chain - totalEscrowChainB = suite.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(suite.chainB.GetContext(), coin.GetDenom()) - suite.Require().Equal(zeroAmount, totalEscrowChainB.Amount) + totalEscrowChainB = s.chainB.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(s.chainB.GetContext(), coin.GetDenom()) + s.Require().Equal(zeroAmount, totalEscrowChainB.Amount) } -func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { +func (s *KeeperTestSuite) TestPacketForwardsCompatibility() { // We are testing a scenario where a packet in the future has a new populated // field called "new_field". And this packet is being sent to this module which // doesn't have this field in the packet data. The module should be able to handle @@ -1068,7 +1079,7 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { { "success: no new field with memo", func() { - jsonString := fmt.Sprintf(`{"denom":"denom","amount":"100","sender":"%s","receiver":"%s","memo":"memo"}`, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String()) + jsonString := fmt.Sprintf(`{"denom":"denom","amount":"100","sender":"%s","receiver":"%s","memo":"memo"}`, s.chainB.SenderAccount.GetAddress().String(), s.chainA.SenderAccount.GetAddress().String()) packetData = []byte(jsonString) }, nil, @@ -1077,7 +1088,7 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { { "success: no new field without memo", func() { - jsonString := fmt.Sprintf(`{"denom":"denom","amount":"100","sender":"%s","receiver":"%s"}`, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String()) + jsonString := fmt.Sprintf(`{"denom":"denom","amount":"100","sender":"%s","receiver":"%s"}`, s.chainB.SenderAccount.GetAddress().String(), s.chainA.SenderAccount.GetAddress().String()) packetData = []byte(jsonString) }, nil, @@ -1094,7 +1105,7 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { { "failure: new field", func() { - jsonString := fmt.Sprintf(`{"denom":"denom","amount":"100","sender":"%s","receiver":"%s","memo":"memo","new_field":"value"}`, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String()) + jsonString := fmt.Sprintf(`{"denom":"denom","amount":"100","sender":"%s","receiver":"%s","memo":"memo","new_field":"value"}`, s.chainB.SenderAccount.GetAddress().String(), s.chainA.SenderAccount.GetAddress().String()) packetData = []byte(jsonString) }, ibcerrors.ErrInvalidType, @@ -1103,7 +1114,7 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { { "failure: missing field", func() { - jsonString := fmt.Sprintf(`{"amount":"100","sender":%s","receiver":"%s"}`, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String()) + jsonString := fmt.Sprintf(`{"amount":"100","sender":%s","receiver":"%s"}`, s.chainB.SenderAccount.GetAddress().String(), s.chainA.SenderAccount.GetAddress().String()) packetData = []byte(jsonString) }, ibcerrors.ErrInvalidType, @@ -1112,11 +1123,11 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset packetData = nil - path = ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path = ibctesting.NewTransferPath(s.chainA, s.chainB) path.EndpointA.ChannelConfig.Version = types.V1 path.EndpointB.ChannelConfig.Version = types.V1 @@ -1124,10 +1135,10 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { path.Setup() - timeoutHeight := suite.chainB.GetTimeoutHeight() + timeoutHeight := s.chainB.GetTimeoutHeight() seq, err := path.EndpointB.SendPacket(timeoutHeight, 0, packetData) - suite.Require().NoError(err) + s.Require().NoError(err) packet := channeltypes.NewPacket(packetData, seq, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, timeoutHeight, 0) @@ -1135,22 +1146,22 @@ func (suite *KeeperTestSuite) TestPacketForwardsCompatibility() { err = path.RelayPacket(packet) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) ackBz, ok := path.EndpointA.Chain.GetSimApp().IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(path.EndpointA.Chain.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) - suite.Require().True(ok) + s.Require().True(ok) // an ack should be written for the malformed / bad packet data. expectedAck := channeltypes.NewErrorAcknowledgement(tc.expAckError) expBz := channeltypes.CommitAcknowledgement(expectedAck.Acknowledgement()) - suite.Require().Equal(expBz, ackBz) + s.Require().Equal(expBz, ackBz) } }) } } -func (suite *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { +func (s *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { var ( token types.Token sender, receiver string @@ -1168,8 +1179,8 @@ func (suite *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { func() {}, func(bz []byte, err error) { expPacketData := types.NewFungibleTokenPacketData(ibctesting.TestCoin.Denom, ibctesting.TestCoin.Amount.String(), sender, receiver, "") - suite.Require().Equal(bz, expPacketData.GetBytes()) - suite.Require().NoError(err) + s.Require().Equal(bz, expPacketData.GetBytes()) + s.Require().NoError(err) }, }, { @@ -1177,8 +1188,8 @@ func (suite *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { "ics20-2", func() {}, func(bz []byte, err error) { - suite.Require().Nil(bz) - suite.Require().Error(err, ibcerrors.ErrInvalidVersion) + s.Require().Nil(bz) + s.Require().Error(err, ibcerrors.ErrInvalidVersion) }, }, { @@ -1188,8 +1199,8 @@ func (suite *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { sender = "" }, func(bz []byte, err error) { - suite.Require().Nil(bz) - suite.Require().ErrorIs(err, ibcerrors.ErrInvalidAddress) + s.Require().Nil(bz) + s.Require().ErrorIs(err, ibcerrors.ErrInvalidAddress) }, }, { @@ -1197,17 +1208,17 @@ func (suite *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { ibcmock.Version, func() {}, func(bz []byte, err error) { - suite.Require().Nil(bz) - suite.Require().ErrorIs(err, types.ErrInvalidVersion) + s.Require().Nil(bz) + s.Require().ErrorIs(err, types.ErrInvalidVersion) }, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path := ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() token = types.Token{ @@ -1215,8 +1226,8 @@ func (suite *KeeperTestSuite) TestCreatePacketDataBytesFromVersion() { Denom: types.NewDenom(ibctesting.TestCoin.Denom), } - sender = suite.chainA.SenderAccount.GetAddress().String() - receiver = suite.chainB.SenderAccount.GetAddress().String() + sender = s.chainA.SenderAccount.GetAddress().String() + receiver = s.chainB.SenderAccount.GetAddress().String() tc.malleate() @@ -1245,7 +1256,7 @@ func metadataFromDenom(denom types.Denom) banktypes.Metadata { } // assertEscrowEqual asserts that the amounts escrowed for each of the coins on chain matches the expectedAmounts -func (suite *KeeperTestSuite) assertEscrowEqual(chain *ibctesting.TestChain, coin sdk.Coin, expectedAmount sdkmath.Int) { +func (s *KeeperTestSuite) assertEscrowEqual(chain *ibctesting.TestChain, coin sdk.Coin, expectedAmount sdkmath.Int) { amount := chain.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(chain.GetContext(), coin.GetDenom()) - suite.Require().Equal(expectedAmount, amount.Amount) + s.Require().Equal(expectedAmount, amount.Amount) } diff --git a/modules/apps/transfer/module.go b/modules/apps/transfer/module.go index 1b33cbba776..666a86bc6a4 100644 --- a/modules/apps/transfer/module.go +++ b/modules/apps/transfer/module.go @@ -98,11 +98,11 @@ func (AppModuleBasic) GetQueryCmd() *cobra.Command { // AppModule represents the AppModule for this module type AppModule struct { AppModuleBasic - keeper keeper.Keeper + keeper *keeper.Keeper } // NewAppModule creates a new 20-transfer module -func NewAppModule(k keeper.Keeper) AppModule { +func NewAppModule(k *keeper.Keeper) AppModule { return AppModule{ keeper: k, } @@ -113,21 +113,13 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), am.keeper) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - m := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 2, m.MigrateTotalEscrowForDenom); err != nil { - panic(fmt.Errorf("failed to migrate transfer app from version 2 to 3 (total escrow entry migration): %v", err)) - } - - if err := cfg.RegisterMigration(types.ModuleName, 3, m.MigrateParams); err != nil { - panic(fmt.Errorf("failed to migrate transfer app version 3 to 4 (self-managed params migration): %v", err)) - } - + m := keeper.NewMigrator(*am.keeper) if err := cfg.RegisterMigration(types.ModuleName, 4, m.MigrateDenomMetadata); err != nil { - panic(fmt.Errorf("failed to migrate transfer app from version 4 to 5 (set denom metadata migration): %v", err)) + panic(fmt.Errorf("failed to migrate transfer app from version 4 to 5 (set denom metadata migration): %w", err)) } if err := cfg.RegisterMigration(types.ModuleName, 5, m.MigrateDenomTraceToDenom); err != nil { - panic(fmt.Errorf("failed to migrate transfer app from version 5 to 6 (migrate DenomTrace to Denom): %v", err)) + panic(fmt.Errorf("failed to migrate transfer app from version 5 to 6 (migrate DenomTrace to Denom): %w", err)) } } diff --git a/modules/apps/transfer/simulation/genesis_test.go b/modules/apps/transfer/simulation/genesis_test.go index a3133112436..327450817fb 100644 --- a/modules/apps/transfer/simulation/genesis_test.go +++ b/modules/apps/transfer/simulation/genesis_test.go @@ -47,7 +47,7 @@ func TestRandomizedGenState(t *testing.T) { require.Equal(t, "euzxpfgkqegqiqwixnku", ibcTransferGenesis.PortId) require.True(t, ibcTransferGenesis.Params.SendEnabled) require.True(t, ibcTransferGenesis.Params.ReceiveEnabled) - require.Len(t, ibcTransferGenesis.Denoms, 0) + require.Empty(t, ibcTransferGenesis.Denoms) } // TestRandomizedGenState tests abnormal scenarios of applying RandomizedGenState. diff --git a/modules/apps/transfer/simulation/proposals_test.go b/modules/apps/transfer/simulation/proposals_test.go index 125bc673d90..67ac1fc273a 100644 --- a/modules/apps/transfer/simulation/proposals_test.go +++ b/modules/apps/transfer/simulation/proposals_test.go @@ -26,7 +26,7 @@ func TestProposalMsgs(t *testing.T) { // execute ProposalMsgs function weightedProposalMsgs := simulation.ProposalMsgs() - require.Equal(t, len(weightedProposalMsgs), 1) + require.Len(t, weightedProposalMsgs, 1) w0 := weightedProposalMsgs[0] @@ -39,5 +39,5 @@ func TestProposalMsgs(t *testing.T) { require.True(t, ok) require.Equal(t, sdk.AccAddress(address.Module("gov")).String(), msgUpdateParams.Signer) - require.EqualValues(t, msgUpdateParams.Params.SendEnabled, false) + require.False(t, msgUpdateParams.Params.SendEnabled) } diff --git a/modules/apps/transfer/transfer_test.go b/modules/apps/transfer/transfer_test.go index a4a47911a4a..b373de3f392 100644 --- a/modules/apps/transfer/transfer_test.go +++ b/modules/apps/transfer/transfer_test.go @@ -25,18 +25,22 @@ type TransferTestSuite struct { chainC *ibctesting.TestChain } -func (suite *TransferTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func TestTransferTestSuite(t *testing.T) { + testifysuite.Run(t, new(TransferTestSuite)) +} + +func (s *TransferTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } // Constructs the following sends based on the established channels/connections // 1 - from chainA to chainB // 2 - from chainB to chainC // 3 - from chainC to chainB -func (suite *TransferTestSuite) TestHandleMsgTransfer() { +func (s *TransferTestSuite) TestHandleMsgTransfer() { var ( sourceDenomToTransfer string msgAmount sdkmath.Int @@ -55,7 +59,7 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() { func() { var ok bool msgAmount, ok = sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) - suite.Require().True(ok) + s.Require().True(ok) }, }, { @@ -67,14 +71,14 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset // setup between chainA and chainB // NOTE: // pathAToB.EndpointA = endpoint on chainA // pathAToB.EndpointB = endpoint on chainB - pathAToB := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + pathAToB := ibctesting.NewTransferPath(s.chainA, s.chainB) pathAToB.Setup() traceAToB := types.NewHop(pathAToB.EndpointB.ChannelConfig.PortID, pathAToB.EndpointB.ChannelID) @@ -83,63 +87,63 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() { tc.malleate() - originalBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), sourceDenomToTransfer) + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), sourceDenomToTransfer) timeoutHeight := clienttypes.NewHeight(1, 110) originalCoin := sdk.NewCoin(sourceDenomToTransfer, msgAmount) // send from chainA to chainB - msg := types.NewMsgTransfer(pathAToB.EndpointA.ChannelConfig.PortID, pathAToB.EndpointA.ChannelID, originalCoin, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") - res, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) // message committed + msg := types.NewMsgTransfer(pathAToB.EndpointA.ChannelConfig.PortID, pathAToB.EndpointA.ChannelID, originalCoin, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + res, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) // message committed packet, err := ibctesting.ParseV1PacketFromEvents(res.Events) - suite.Require().NoError(err) + s.Require().NoError(err) // Get the packet data to determine the amount of tokens being transferred (needed for sending entire balance) packetData, err := types.UnmarshalPacketData(packet.GetData(), pathAToB.EndpointA.GetChannel().Version, "") - suite.Require().NoError(err) + s.Require().NoError(err) transferAmount, ok := sdkmath.NewIntFromString(packetData.Token.Amount) - suite.Require().True(ok) + s.Require().True(ok) // relay send err = pathAToB.RelayPacket(packet) - suite.Require().NoError(err) // relay committed + s.Require().NoError(err) // relay committed escrowAddress := types.GetEscrowAddress(packet.GetSourcePort(), packet.GetSourceChannel()) // check that the balance for chainA is updated - chainABalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().True(originalBalance.Amount.Sub(transferAmount).Equal(chainABalance.Amount)) + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().True(originalBalance.Amount.Sub(transferAmount).Equal(chainABalance.Amount)) // check that module account escrow address has locked the tokens - chainAEscrowBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().True(transferAmount.Equal(chainAEscrowBalance.Amount)) + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().True(transferAmount.Equal(chainAEscrowBalance.Amount)) // check that voucher exists on chain B chainBDenom := types.NewDenom(originalCoin.Denom, traceAToB) - chainBBalance := suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), chainBDenom.IBCDenom()) + chainBBalance := s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), chainBDenom.IBCDenom()) coinSentFromAToB := sdk.NewCoin(chainBDenom.IBCDenom(), transferAmount) - suite.Require().Equal(coinSentFromAToB, chainBBalance) + s.Require().Equal(coinSentFromAToB, chainBBalance) // setup between chainB to chainC // NOTE: // pathBToC.EndpointA = endpoint on chainB // pathBToC.EndpointB = endpoint on chainC - pathBToC := ibctesting.NewTransferPath(suite.chainB, suite.chainC) + pathBToC := ibctesting.NewTransferPath(s.chainB, s.chainC) pathBToC.Setup() traceBToC := types.NewHop(pathBToC.EndpointB.ChannelConfig.PortID, pathBToC.EndpointB.ChannelID) // send from chainB to chainC - msg = types.NewMsgTransfer(pathBToC.EndpointA.ChannelConfig.PortID, pathBToC.EndpointA.ChannelID, coinSentFromAToB, suite.chainB.SenderAccount.GetAddress().String(), suite.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") - res, err = suite.chainB.SendMsgs(msg) - suite.Require().NoError(err) // message committed + msg = types.NewMsgTransfer(pathBToC.EndpointA.ChannelConfig.PortID, pathBToC.EndpointA.ChannelID, coinSentFromAToB, s.chainB.SenderAccount.GetAddress().String(), s.chainC.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + res, err = s.chainB.SendMsgs(msg) + s.Require().NoError(err) // message committed packet, err = ibctesting.ParseV1PacketFromEvents(res.Events) - suite.Require().NoError(err) + s.Require().NoError(err) err = pathBToC.RelayPacket(packet) - suite.Require().NoError(err) // relay committed + s.Require().NoError(err) // relay committed coinsSentFromBToC := sdk.NewCoins() // check balances for chainB and chainC after transfer from chainB to chainC @@ -148,54 +152,50 @@ func (suite *TransferTestSuite) TestHandleMsgTransfer() { // check that the balance is updated on chainC coinSentFromBToC := sdk.NewCoin(chainCDenom.IBCDenom(), transferAmount) - chainCBalance := suite.chainC.GetSimApp().BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), coinSentFromBToC.Denom) - suite.Require().Equal(coinSentFromBToC, chainCBalance) + chainCBalance := s.chainC.GetSimApp().BankKeeper.GetBalance(s.chainC.GetContext(), s.chainC.SenderAccount.GetAddress(), coinSentFromBToC.Denom) + s.Require().Equal(coinSentFromBToC, chainCBalance) // check that balance on chain B is empty - chainBBalance = suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromBToC.Denom) - suite.Require().Zero(chainBBalance.Amount.Int64()) + chainBBalance = s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), coinSentFromBToC.Denom) + s.Require().Zero(chainBBalance.Amount.Int64()) // send from chainC back to chainB - msg = types.NewMsgTransfer(pathBToC.EndpointB.ChannelConfig.PortID, pathBToC.EndpointB.ChannelID, coinSentFromBToC, suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") - res, err = suite.chainC.SendMsgs(msg) - suite.Require().NoError(err) // message committed + msg = types.NewMsgTransfer(pathBToC.EndpointB.ChannelConfig.PortID, pathBToC.EndpointB.ChannelID, coinSentFromBToC, s.chainC.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + res, err = s.chainC.SendMsgs(msg) + s.Require().NoError(err) // message committed packet, err = ibctesting.ParseV1PacketFromEvents(res.Events) - suite.Require().NoError(err) + s.Require().NoError(err) err = pathBToC.RelayPacket(packet) - suite.Require().NoError(err) // relay committed + s.Require().NoError(err) // relay committed // check balances for chainC are empty after transfer from chainC to chainB for _, coin := range coinsSentFromBToC { // check that balance on chain C is empty - chainCBalance := suite.chainC.GetSimApp().BankKeeper.GetBalance(suite.chainC.GetContext(), suite.chainC.SenderAccount.GetAddress(), coin.Denom) - suite.Require().Zero(chainCBalance.Amount.Int64()) + chainCBalance := s.chainC.GetSimApp().BankKeeper.GetBalance(s.chainC.GetContext(), s.chainC.SenderAccount.GetAddress(), coin.Denom) + s.Require().Zero(chainCBalance.Amount.Int64()) } // check balances for chainB after transfer from chainC to chainB // check that balance on chain B has the transferred amount - chainBBalance = suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom) - suite.Require().Equal(coinSentFromAToB, chainBBalance) + chainBBalance = s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), coinSentFromAToB.Denom) + s.Require().Equal(coinSentFromAToB, chainBBalance) // check that module account escrow address is empty escrowAddress = types.GetEscrowAddress(traceBToC.PortId, traceBToC.ChannelId) - chainBEscrowBalance := suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), escrowAddress, coinSentFromAToB.Denom) - suite.Require().Zero(chainBEscrowBalance.Amount.Int64()) + chainBEscrowBalance := s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), escrowAddress, coinSentFromAToB.Denom) + s.Require().Zero(chainBEscrowBalance.Amount.Int64()) // check balances for chainA after transfer from chainC to chainB // check that the balance is unchanged - chainABalance = suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().True(originalBalance.Amount.Sub(transferAmount).Equal(chainABalance.Amount)) + chainABalance = s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().True(originalBalance.Amount.Sub(transferAmount).Equal(chainABalance.Amount)) // check that module account escrow address is unchanged escrowAddress = types.GetEscrowAddress(pathAToB.EndpointA.ChannelConfig.PortID, pathAToB.EndpointA.ChannelID) - chainAEscrowBalance = suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().True(transferAmount.Equal(chainAEscrowBalance.Amount)) + chainAEscrowBalance = s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().True(transferAmount.Equal(chainAEscrowBalance.Amount)) }) } } - -func TestTransferTestSuite(t *testing.T) { - testifysuite.Run(t, new(TransferTestSuite)) -} diff --git a/modules/apps/transfer/types/codec_test.go b/modules/apps/transfer/types/codec_test.go index 8c07532dfa3..35f50390f6f 100644 --- a/modules/apps/transfer/types/codec_test.go +++ b/modules/apps/transfer/types/codec_test.go @@ -13,22 +13,22 @@ import ( ) // TestMustMarshalProtoJSON tests that the memo field is only emitted (marshalled) if it is populated -func (suite *TypesTestSuite) TestMustMarshalProtoJSON() { +func (s *TypesTestSuite) TestMustMarshalProtoJSON() { memo := "memo" - packetData := types.NewFungibleTokenPacketData(sdk.DefaultBondDenom, "1", suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), memo) + packetData := types.NewFungibleTokenPacketData(sdk.DefaultBondDenom, "1", s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), memo) bz := packetData.GetBytes() exists := strings.Contains(string(bz), memo) - suite.Require().True(exists) + s.Require().True(exists) packetData.Memo = "" bz = packetData.GetBytes() exists = strings.Contains(string(bz), memo) - suite.Require().False(exists) + s.Require().False(exists) } -func (suite *TypesTestSuite) TestCodecTypeRegistration() { +func (s *TypesTestSuite) TestCodecTypeRegistration() { testCases := []struct { name string typeURL string @@ -57,16 +57,16 @@ func (suite *TypesTestSuite) TestCodecTypeRegistration() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { encodingCfg := moduletestutil.MakeTestEncodingConfig(transfer.AppModuleBasic{}) msg, err := encodingCfg.Codec.InterfaceRegistry().Resolve(tc.typeURL) if tc.expErr == nil { - suite.Require().NotNil(msg) - suite.Require().NoError(err) + s.Require().NotNil(msg) + s.Require().NoError(err) } else { - suite.Require().Nil(msg) - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expErr) + s.Require().Nil(msg) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expErr) } }) } diff --git a/modules/apps/transfer/types/denom.go b/modules/apps/transfer/types/denom.go index eb601def846..fea285e1957 100644 --- a/modules/apps/transfer/types/denom.go +++ b/modules/apps/transfer/types/denom.go @@ -167,6 +167,7 @@ func ExtractDenomFromPath(fullPath string) Denom { // will be incorrectly parsed, but the token will continue to be treated correctly // as an IBC denomination. The hash used to store the token internally on our chain // will be the same value as the base denomination being correctly parsed. + // nolint:revive // Early return possible, but not needed here if i < length-1 && length > 2 && (channeltypes.IsValidChannelID(denomSplit[i+1]) || clienttypes.IsValidClientID(denomSplit[i+1])) { trace = append(trace, NewHop(denomSplit[i], denomSplit[i+1])) } else { @@ -206,6 +207,9 @@ func validateIBCDenom(denom string) error { if _, err := ParseHexHash(denomSplit[1]); err != nil { return errorsmod.Wrapf(err, "invalid denom trace hash %s", denomSplit[1]) } + + default: + // Valid base denomination or other valid format } return nil diff --git a/modules/apps/transfer/types/denom_test.go b/modules/apps/transfer/types/denom_test.go index 1b57da5b252..acd9d45c939 100644 --- a/modules/apps/transfer/types/denom_test.go +++ b/modules/apps/transfer/types/denom_test.go @@ -9,7 +9,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" ) -func (suite *TypesTestSuite) TestDenomsValidate() { +func (s *TypesTestSuite) TestDenomsValidate() { testCases := []struct { name string denoms types.Denoms @@ -46,18 +46,18 @@ func (suite *TypesTestSuite) TestDenomsValidate() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.denoms.Validate() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) } }) } } -func (suite *TypesTestSuite) TestPath() { +func (s *TypesTestSuite) TestPath() { testCases := []struct { name string denom types.Denom @@ -111,13 +111,13 @@ func (suite *TypesTestSuite) TestPath() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.Require().Equal(tc.expPath, tc.denom.Path()) + s.Run(tc.name, func() { + s.Require().Equal(tc.expPath, tc.denom.Path()) }) } } -func (suite *TypesTestSuite) TestSort() { +func (s *TypesTestSuite) TestSort() { testCases := []struct { name string denoms types.Denoms @@ -179,13 +179,13 @@ func (suite *TypesTestSuite) TestSort() { }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.Require().Equal(tc.expDenoms, tc.denoms.Sort()) + s.Run(tc.name, func() { + s.Require().Equal(tc.expDenoms, tc.denoms.Sort()) }) } } -func (suite *TypesTestSuite) TestDenomChainSource() { +func (s *TypesTestSuite) TestDenomChainSource() { testCases := []struct { name string denom types.Denom @@ -262,8 +262,8 @@ func (suite *TypesTestSuite) TestDenomChainSource() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.Require().Equal(tc.expHasPrefix, tc.denom.HasPrefix(tc.sourcePort, tc.sourceChannel)) + s.Run(tc.name, func() { + s.Require().Equal(tc.expHasPrefix, tc.denom.HasPrefix(tc.sourcePort, tc.sourceChannel)) }) } } @@ -287,7 +287,6 @@ func TestValidateIBCDenom(t *testing.T) { } for _, tc := range testCases { - err := types.ValidateIBCDenom(tc.denom) if tc.expError { require.Error(t, err, tc.name) @@ -326,7 +325,6 @@ func TestExtractDenomFromPath(t *testing.T) { } for _, tc := range testCases { - denom := types.ExtractDenomFromPath(tc.fullPath) require.Equal(t, tc.expDenom, denom, tc.name) } diff --git a/modules/apps/transfer/types/deprecated.go b/modules/apps/transfer/types/deprecated.go index d3afc0ac881..ef314a38de4 100644 --- a/modules/apps/transfer/types/deprecated.go +++ b/modules/apps/transfer/types/deprecated.go @@ -1,7 +1,7 @@ package types import ( - fmt "fmt" + "fmt" "strings" sdkmath "cosmossdk.io/math" diff --git a/modules/apps/transfer/types/expected_keepers.go b/modules/apps/transfer/types/expected_keepers.go index 3c069904a3a..098be515908 100644 --- a/modules/apps/transfer/types/expected_keepers.go +++ b/modules/apps/transfer/types/expected_keepers.go @@ -6,12 +6,12 @@ import ( "github.com/cosmos/cosmos-sdk/baseapp" sdk "github.com/cosmos/cosmos-sdk/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" clienttypesv2 "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/types" connectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" ) // AccountKeeper defines the contract required for account APIs. @@ -37,6 +37,7 @@ type BankKeeper interface { // ChannelKeeper defines the expected IBC channel keeper type ChannelKeeper interface { + porttypes.ICS4Wrapper GetChannel(ctx sdk.Context, srcPort, srcChan string) (channel channeltypes.Channel, found bool) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) GetAllChannelsWithPortPrefix(ctx sdk.Context, portPrefix string) []channeltypes.IdentifiedChannel @@ -63,8 +64,3 @@ type ClientKeeperV2 interface { type ConnectionKeeper interface { GetConnection(ctx sdk.Context, connectionID string) (connection connectiontypes.ConnectionEnd, found bool) } - -// ParamSubspace defines the expected Subspace interface for module parameters. -type ParamSubspace interface { - GetParamSet(ctx sdk.Context, ps paramtypes.ParamSet) -} diff --git a/modules/apps/transfer/types/genesis_test.go b/modules/apps/transfer/types/genesis_test.go index 4ce38dc67d9..389a2ad3e66 100644 --- a/modules/apps/transfer/types/genesis_test.go +++ b/modules/apps/transfer/types/genesis_test.go @@ -37,7 +37,6 @@ func TestValidateGenesis(t *testing.T) { } for _, tc := range testCases { - err := tc.genState.Validate() if tc.expErr == nil { require.NoError(t, err, tc.name) diff --git a/modules/apps/transfer/types/hop.go b/modules/apps/transfer/types/hop.go index 11abf5d04ef..d75adb6b44d 100644 --- a/modules/apps/transfer/types/hop.go +++ b/modules/apps/transfer/types/hop.go @@ -1,7 +1,7 @@ package types import ( - fmt "fmt" + "fmt" errorsmod "cosmossdk.io/errors" diff --git a/modules/apps/transfer/types/keys.go b/modules/apps/transfer/types/keys.go index 5b632c9d3f8..a78aa56bd2e 100644 --- a/modules/apps/transfer/types/keys.go +++ b/modules/apps/transfer/types/keys.go @@ -52,6 +52,11 @@ var ( // SupportedVersions defines all versions that are supported by the module SupportedVersions = []string{V1} + + // KeySendEnabled is store's key for SendEnabled Params + KeySendEnabled = []byte("SendEnabled") + // KeyReceiveEnabled is store's key for ReceiveEnabled Params + KeyReceiveEnabled = []byte("ReceiveEnabled") ) // GetEscrowAddress returns the escrow address for the specified channel. diff --git a/modules/apps/transfer/types/msgs.go b/modules/apps/transfer/types/msgs.go index e4aa4ef89bf..977b9760f69 100644 --- a/modules/apps/transfer/types/msgs.go +++ b/modules/apps/transfer/types/msgs.go @@ -8,6 +8,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" ibcerrors "github.com/cosmos/ibc-go/v10/modules/core/errors" ) @@ -34,9 +35,8 @@ func NewMsgUpdateParams(signer string, params Params) *MsgUpdateParams { // ValidateBasic implements sdk.Msg func (msg MsgUpdateParams) ValidateBasic() error { - _, err := sdk.AccAddressFromBech32(msg.Signer) - if err != nil { - return errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + if strings.TrimSpace(msg.Signer) == "" { + return errorsmod.Wrap(ibcerrors.ErrInvalidAddress, "missing sender address") } return nil @@ -61,13 +61,35 @@ func NewMsgTransfer( } } +// NewMsgTransferAliased creates a new MsgTransfer instance +// with isV2 set to true, indicating that it is using the V2 protocol +// with v1 channel identifiers. +func NewMsgTransferAliased( + sourcePort, sourceChannel string, + token sdk.Coin, sender, receiver string, + timeoutHeight clienttypes.Height, timeoutTimestamp uint64, + memo string, +) *MsgTransfer { + return &MsgTransfer{ + SourcePort: sourcePort, + SourceChannel: sourceChannel, + Token: token, + Sender: sender, + Receiver: receiver, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + Memo: memo, + UseAliasing: true, // This indicates that the message is using the V2 protocol with aliased channel identifiers + } +} + // NewMsgTransferWithEncoding creates a new MsgTransfer instance // with the provided encoding func NewMsgTransferWithEncoding( sourcePort, sourceChannel string, token sdk.Coin, sender, receiver string, timeoutHeight clienttypes.Height, timeoutTimestamp uint64, - memo string, encoding string, + memo string, encoding string, useAliasing bool, ) *MsgTransfer { return &MsgTransfer{ SourcePort: sourcePort, @@ -79,6 +101,7 @@ func NewMsgTransferWithEncoding( TimeoutTimestamp: timeoutTimestamp, Memo: memo, Encoding: encoding, + UseAliasing: useAliasing, } } @@ -96,9 +119,8 @@ func (msg MsgTransfer) ValidateBasic() error { return errorsmod.Wrap(ibcerrors.ErrInvalidCoins, msg.Token.String()) } - _, err := sdk.AccAddressFromBech32(msg.Sender) - if err != nil { - return errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) + if strings.TrimSpace(msg.Sender) == "" { + return errorsmod.Wrap(ibcerrors.ErrInvalidAddress, "missing sender address") } if strings.TrimSpace(msg.Receiver) == "" { return errorsmod.Wrap(ibcerrors.ErrInvalidAddress, "missing recipient address") @@ -118,8 +140,18 @@ func (msg MsgTransfer) validateIdentifiers() error { if err := host.PortIdentifierValidator(msg.SourcePort); err != nil { return errorsmod.Wrapf(err, "invalid source port ID %s", msg.SourcePort) } - if err := host.ChannelIdentifierValidator(msg.SourceChannel); err != nil { - return errorsmod.Wrapf(err, "invalid source channel ID %s", msg.SourceChannel) + // if we are using aliasing, then the source channel must be in the channel id format + // expected by ibc-go + // otherwise, it may be either a client id using v2 directly or a channel id using ibc v1 + // thus, we perform a less strict check + if msg.UseAliasing { + if _, err := channeltypes.ParseChannelSequence(msg.SourceChannel); err != nil { + return errorsmod.Wrapf(err, "invalid source channel ID %s", msg.SourceChannel) + } + } else { + if err := host.ChannelIdentifierValidator(msg.SourceChannel); err != nil { + return errorsmod.Wrapf(err, "invalid source channel ID %s", msg.SourceChannel) + } } return nil diff --git a/modules/apps/transfer/types/msgs_test.go b/modules/apps/transfer/types/msgs_test.go index ef35aab1992..33619e4ad8c 100644 --- a/modules/apps/transfer/types/msgs_test.go +++ b/modules/apps/transfer/types/msgs_test.go @@ -27,13 +27,11 @@ const ( // 195 characters invalidLongPort = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis eros neque, ultricies vel ligula ac, convallis porttitor elit. Maecenas tincidunt turpis elit, vel faucibus nisl pellentesque sodales" - validChannel = "testchannel" + validChannel = "channel-5" eurekaClient = "07-tendermint-0" invalidChannel = "(invalidchannel1)" invalidShortChannel = "invalid" invalidLongChannel = "invalidlongchannelinvalidlongchannelinvalidlongchannelinvalidlongchannel" - - invalidAddress = "invalid" ) var ( @@ -58,11 +56,13 @@ func TestMsgTransferValidation(t *testing.T) { expError error }{ {"valid msg with base denom", types.NewMsgTransfer(validPort, validChannel, coin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), nil}, + {"valid aliased channel", types.NewMsgTransferAliased(validPort, validChannel, coin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), nil}, + {"valid aliased channel with encoding", types.NewMsgTransferWithEncoding(validPort, validChannel, coin, sender, receiver, clienttypes.ZeroHeight(), 100, "", "application/json", true), nil}, {"valid eureka msg with base denom", types.NewMsgTransfer(validPort, eurekaClient, coin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), nil}, - {"valid eureka msg with base denom and encoding", types.NewMsgTransferWithEncoding(validPort, eurekaClient, coin, sender, receiver, clienttypes.ZeroHeight(), 100, "", "application/json"), nil}, + {"valid eureka msg with base denom and encoding", types.NewMsgTransferWithEncoding(validPort, eurekaClient, coin, sender, receiver, clienttypes.ZeroHeight(), 100, "", "application/json", false), nil}, {"valid msg with trace hash", types.NewMsgTransfer(validPort, validChannel, ibcCoin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), nil}, {"valid eureka msg with trace hash", types.NewMsgTransfer(validPort, eurekaClient, ibcCoin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), nil}, - {"valid eureka msg with trace hash with encoding", types.NewMsgTransferWithEncoding(validPort, eurekaClient, ibcCoin, sender, receiver, clienttypes.ZeroHeight(), 100, "", "application/json"), nil}, + {"valid eureka msg with trace hash with encoding", types.NewMsgTransferWithEncoding(validPort, eurekaClient, ibcCoin, sender, receiver, clienttypes.ZeroHeight(), 100, "", "application/json", false), nil}, {"invalid ibc denom", types.NewMsgTransfer(validPort, validChannel, invalidIBCCoin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), ibcerrors.ErrInvalidCoins}, {"too short port id", types.NewMsgTransfer(invalidShortPort, validChannel, coin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), host.ErrInvalidID}, {"too long port id", types.NewMsgTransfer(invalidLongPort, validChannel, coin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), host.ErrInvalidID}, @@ -77,6 +77,7 @@ func TestMsgTransferValidation(t *testing.T) { {"missing recipient address", types.NewMsgTransfer(validPort, validChannel, coin, sender, "", clienttypes.ZeroHeight(), 100, ""), ibcerrors.ErrInvalidAddress}, {"too long recipient address", types.NewMsgTransfer(validPort, validChannel, coin, sender, ibctesting.GenerateString(types.MaximumReceiverLength+1), clienttypes.ZeroHeight(), 100, ""), ibcerrors.ErrInvalidAddress}, {"empty coin", types.NewMsgTransfer(validPort, validChannel, sdk.Coin{}, sender, receiver, clienttypes.ZeroHeight(), 100, ""), ibcerrors.ErrInvalidCoins}, + {"invalid aliased channel", types.NewMsgTransferAliased(validPort, eurekaClient, coin, sender, receiver, clienttypes.ZeroHeight(), 100, ""), host.ErrInvalidID}, } for _, tc := range testCases { @@ -111,7 +112,6 @@ func TestMsgUpdateParamsValidateBasic(t *testing.T) { expError error }{ {"success: valid signer and valid params", types.NewMsgUpdateParams(ibctesting.TestAccAddress, types.DefaultParams()), nil}, - {"failure: invalid signer with valid params", types.NewMsgUpdateParams(invalidAddress, types.DefaultParams()), ibcerrors.ErrInvalidAddress}, {"failure: empty signer with valid params", types.NewMsgUpdateParams(emptyAddr, types.DefaultParams()), ibcerrors.ErrInvalidAddress}, } diff --git a/modules/apps/transfer/types/packet_test.go b/modules/apps/transfer/types/packet_test.go index 6fb542decc1..13b4a629c2d 100644 --- a/modules/apps/transfer/types/packet_test.go +++ b/modules/apps/transfer/types/packet_test.go @@ -16,10 +16,10 @@ import ( ) const ( - denom = "transfer/gaiachannel/atom" - amount = "100" - largeAmount = "18446744073709551616" // one greater than largest uint64 (^uint64(0)) - invalidLargeAmount = "115792089237316195423570985008687907853269984665640564039457584007913129639936" // 2^256 + packetDenom = "transfer/gaiachannel/atom" + packetAmount = "100" + packetLargeAmount = "18446744073709551616" // one greater than largest uint64 (^uint64(0)) + packetInvalidLargeAmount = "115792089237316195423570985008687907853269984665640564039457584007913129639936" // 2^256 ) // TestFungibleTokenPacketDataValidateBasic tests ValidateBasic for FungibleTokenPacketData @@ -29,21 +29,20 @@ func TestFungibleTokenPacketDataValidateBasic(t *testing.T) { packetData types.FungibleTokenPacketData expErr error }{ - {"valid packet", types.NewFungibleTokenPacketData(denom, amount, sender, receiver, ""), nil}, - {"valid packet with memo", types.NewFungibleTokenPacketData(denom, amount, sender, receiver, "memo"), nil}, - {"valid packet with large amount", types.NewFungibleTokenPacketData(denom, largeAmount, sender, receiver, ""), nil}, - {"invalid denom", types.NewFungibleTokenPacketData("", amount, sender, receiver, ""), types.ErrInvalidDenomForTransfer}, - {"invalid denom, invalid portID", types.NewFungibleTokenPacketData("(transfer)/channel-1/uatom", amount, sender, receiver, ""), host.ErrInvalidID}, - {"invalid empty amount", types.NewFungibleTokenPacketData(denom, "", sender, receiver, ""), types.ErrInvalidAmount}, - {"invalid zero amount", types.NewFungibleTokenPacketData(denom, "0", sender, receiver, ""), types.ErrInvalidAmount}, - {"invalid negative amount", types.NewFungibleTokenPacketData(denom, "-1", sender, receiver, ""), types.ErrInvalidAmount}, - {"invalid large amount", types.NewFungibleTokenPacketData(denom, invalidLargeAmount, sender, receiver, ""), types.ErrInvalidAmount}, - {"missing sender address", types.NewFungibleTokenPacketData(denom, amount, emptyAddr, receiver, ""), ibcerrors.ErrInvalidAddress}, - {"missing recipient address", types.NewFungibleTokenPacketData(denom, amount, sender, emptyAddr, ""), ibcerrors.ErrInvalidAddress}, + {"valid packet", types.NewFungibleTokenPacketData(packetDenom, packetAmount, sender, receiver, ""), nil}, + {"valid packet with memo", types.NewFungibleTokenPacketData(packetDenom, packetAmount, sender, receiver, "memo"), nil}, + {"valid packet with large amount", types.NewFungibleTokenPacketData(packetDenom, packetLargeAmount, sender, receiver, ""), nil}, + {"invalid denom", types.NewFungibleTokenPacketData("", packetAmount, sender, receiver, ""), types.ErrInvalidDenomForTransfer}, + {"invalid denom, invalid portID", types.NewFungibleTokenPacketData("(transfer)/channel-1/uatom", packetAmount, sender, receiver, ""), host.ErrInvalidID}, + {"invalid empty amount", types.NewFungibleTokenPacketData(packetDenom, "", sender, receiver, ""), types.ErrInvalidAmount}, + {"invalid zero amount", types.NewFungibleTokenPacketData(packetDenom, "0", sender, receiver, ""), types.ErrInvalidAmount}, + {"invalid negative amount", types.NewFungibleTokenPacketData(packetDenom, "-1", sender, receiver, ""), types.ErrInvalidAmount}, + {"invalid large amount", types.NewFungibleTokenPacketData(packetDenom, packetInvalidLargeAmount, sender, receiver, ""), types.ErrInvalidAmount}, + {"missing sender address", types.NewFungibleTokenPacketData(packetDenom, packetAmount, emptyAddr, receiver, ""), ibcerrors.ErrInvalidAddress}, + {"missing recipient address", types.NewFungibleTokenPacketData(packetDenom, packetAmount, sender, emptyAddr, ""), ibcerrors.ErrInvalidAddress}, } for i, tc := range testCases { - err := tc.packetData.ValidateBasic() if tc.expErr == nil { require.NoError(t, err, "valid test case %d failed: %v", i, err) @@ -53,19 +52,19 @@ func TestFungibleTokenPacketDataValidateBasic(t *testing.T) { } } -func (suite *TypesTestSuite) TestGetPacketSender() { +func (s *TypesTestSuite) TestGetPacketSender() { packetData := types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, Memo: "", } - suite.Require().Equal(sender, packetData.GetPacketSender(types.PortID)) + s.Require().Equal(sender, packetData.GetPacketSender(types.PortID)) } -func (suite *TypesTestSuite) TestPacketDataProvider() { +func (s *TypesTestSuite) TestPacketDataProvider() { testCases := []struct { name string packetData types.FungibleTokenPacketData @@ -74,8 +73,8 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { { "success: src_callback key in memo", types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, Memo: fmt.Sprintf(`{"src_callback": {"address": "%s"}}`, receiver), @@ -87,8 +86,8 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { { "success: src_callback key in memo with additional fields", types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, Memo: fmt.Sprintf(`{"src_callback": {"address": "%s", "gas_limit": "200000"}}`, receiver), @@ -101,8 +100,8 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { { "success: src_callback has string value", types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, Memo: `{"src_callback": "string"}`, @@ -112,8 +111,8 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { { "failure: empty memo", types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, Memo: "", @@ -123,8 +122,8 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { { "failure: non-json memo", types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, Memo: "invalid", @@ -134,34 +133,33 @@ func (suite *TypesTestSuite) TestPacketDataProvider() { } for _, tc := range testCases { - customData := tc.packetData.GetCustomPacketData("src_callback") - suite.Require().Equal(tc.expCustomData, customData) + s.Require().Equal(tc.expCustomData, customData) } } -func (suite *TypesTestSuite) TestFungibleTokenPacketDataOmitEmpty() { +func (s *TypesTestSuite) TestFungibleTokenPacketDataOmitEmpty() { // check that omitempty is present for the memo field packetData := types.FungibleTokenPacketData{ - Denom: denom, - Amount: amount, + Denom: packetDenom, + Amount: packetAmount, Sender: sender, Receiver: receiver, // Default value for non-specified memo field is empty string } bz, err := json.Marshal(packetData) - suite.Require().NoError(err) + s.Require().NoError(err) // check that the memo field is not present in the marshalled bytes - suite.Require().NotContains(string(bz), "memo") + s.Require().NotContains(string(bz), "memo") packetData.Memo = "abc" bz, err = json.Marshal(packetData) - suite.Require().NoError(err) + s.Require().NoError(err) // check that the memo field is present in the marshalled bytes - suite.Require().Contains(string(bz), "memo") + s.Require().Contains(string(bz), "memo") } // TestInternalTransferRepresentationValidateBasic tests ValidateBasic for FungibleTokenPacketData @@ -175,8 +173,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "success: valid packet", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -188,8 +186,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "success: valid packet with memo", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -201,8 +199,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "success: valid packet with large amount", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: largeAmount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetLargeAmount, }, sender, receiver, @@ -215,7 +213,7 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { types.NewInternalTransferRepresentation( types.Token{ Denom: types.NewDenom("", types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Amount: packetAmount, }, sender, receiver, @@ -227,7 +225,7 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: invalid empty amount", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), Amount: "", }, sender, @@ -240,7 +238,7 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: invalid zero amount", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), Amount: "0", }, sender, @@ -253,7 +251,7 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: invalid negative amount", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), Amount: "-100", }, sender, @@ -266,8 +264,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: invalid large amount", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: invalidLargeAmount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetInvalidLargeAmount, }, sender, receiver, @@ -279,8 +277,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: missing sender address", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, "", receiver, @@ -292,8 +290,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: missing recipient address", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, "", @@ -305,8 +303,8 @@ func TestInternalTransferRepresentationValidateBasic(t *testing.T) { "failure: memo field too large", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: largeAmount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetLargeAmount, }, sender, receiver, @@ -339,8 +337,8 @@ func TestGetPacketSender(t *testing.T) { "non-empty sender field", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -352,8 +350,8 @@ func TestGetPacketSender(t *testing.T) { "empty sender field", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, "", receiver, @@ -380,8 +378,8 @@ func TestPacketDataProvider(t *testing.T) { "success: src_callback key in memo", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -396,8 +394,8 @@ func TestPacketDataProvider(t *testing.T) { "success: src_callback key in memo with additional fields", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -412,8 +410,8 @@ func TestPacketDataProvider(t *testing.T) { "success: src_callback has string value", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -425,8 +423,8 @@ func TestPacketDataProvider(t *testing.T) { "failure: src_callback key not found memo", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -438,8 +436,8 @@ func TestPacketDataProvider(t *testing.T) { "failure: empty memo", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, @@ -451,8 +449,8 @@ func TestPacketDataProvider(t *testing.T) { "failure: non-json memo", types.NewInternalTransferRepresentation( types.Token{ - Denom: types.NewDenom(denom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), - Amount: amount, + Denom: types.NewDenom(packetDenom, types.NewHop("transfer", "channel-0"), types.NewHop("transfer", "channel-1")), + Amount: packetAmount, }, sender, receiver, diff --git a/modules/apps/transfer/types/params_legacy.go b/modules/apps/transfer/types/params_legacy.go deleted file mode 100644 index 1a2886c54b0..00000000000 --- a/modules/apps/transfer/types/params_legacy.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -NOTE: Usage of x/params to manage parameters is deprecated in favor of x/gov -controlled execution of MsgUpdateParams messages. These types remains solely -for migration purposes and will be removed in a future release. -[#3621](https://github.com/cosmos/ibc-go/issues/3621) -*/ -package types - -import ( - "fmt" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" -) - -var ( - // KeySendEnabled is store's key for SendEnabled Params - KeySendEnabled = []byte("SendEnabled") - // KeyReceiveEnabled is store's key for ReceiveEnabled Params - KeyReceiveEnabled = []byte("ReceiveEnabled") -) - -// ParamKeyTable type declaration for parameters -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -// ParamSetPairs implements params.ParamSet -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair(KeySendEnabled, &p.SendEnabled, validateEnabledTypeLegacy), - paramtypes.NewParamSetPair(KeyReceiveEnabled, &p.ReceiveEnabled, validateEnabledTypeLegacy), - } -} - -func validateEnabledTypeLegacy(i any) error { - _, ok := i.(bool) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - return nil -} diff --git a/modules/apps/transfer/types/query.pb.go b/modules/apps/transfer/types/query.pb.go index 5f732082602..a6b352d8787 100644 --- a/modules/apps/transfer/types/query.pb.go +++ b/modules/apps/transfer/types/query.pb.go @@ -909,6 +909,7 @@ func _Query_TotalEscrowForDenom_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.applications.transfer.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/apps/transfer/types/solidity_abi_test.go b/modules/apps/transfer/types/solidity_abi_test.go index e684877768d..71f4df4a788 100644 --- a/modules/apps/transfer/types/solidity_abi_test.go +++ b/modules/apps/transfer/types/solidity_abi_test.go @@ -4,7 +4,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" ) -func (suite *TypesTestSuite) TestFTPD() { +func (s *TypesTestSuite) TestFTPD() { packetData := types.FungibleTokenPacketData{ Denom: "uatom", Amount: "1000000", @@ -14,10 +14,10 @@ func (suite *TypesTestSuite) TestFTPD() { } bz, err := types.EncodeABIFungibleTokenPacketData(&packetData) - suite.Require().NoError(err) + s.Require().NoError(err) decodedPacketData, err := types.DecodeABIFungibleTokenPacketData(bz) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Equal(packetData, *decodedPacketData) + s.Require().Equal(packetData, *decodedPacketData) } diff --git a/modules/apps/transfer/types/token_test.go b/modules/apps/transfer/types/token_test.go index 637aae0b096..102122c7feb 100644 --- a/modules/apps/transfer/types/token_test.go +++ b/modules/apps/transfer/types/token_test.go @@ -1,4 +1,4 @@ -package types +package types_test import ( "errors" @@ -9,133 +9,135 @@ import ( sdkmath "cosmossdk.io/math" sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" ) const ( - denom = "atom/pool" - amount = "100" + tokenDenom = "atom/pool" + tokenAmount = "100" ) func TestValidate(t *testing.T) { testCases := []struct { name string - token Token + token types.Token expError error }{ { "success: multiple port channel pair denom", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "atom", - Trace: []Hop{ - NewHop("transfer", "channel-0"), - NewHop("transfer", "channel-1"), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-0"), + types.NewHop("transfer", "channel-1"), }, }, - Amount: amount, + Amount: tokenAmount, }, nil, }, { "success: one port channel pair denom", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "uatom", - Trace: []Hop{ - NewHop("transfer", "channel-1"), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-1"), }, }, - Amount: amount, + Amount: tokenAmount, }, nil, }, { "success: non transfer port trace", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "uatom", - Trace: []Hop{ - NewHop("transfer", "channel-0"), - NewHop("transfer", "channel-1"), - NewHop("transfer-custom", "channel-2"), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-0"), + types.NewHop("transfer", "channel-1"), + types.NewHop("transfer-custom", "channel-2"), }, }, - Amount: amount, + Amount: tokenAmount, }, nil, }, { "failure: empty denom", - Token{ - Denom: Denom{}, - Amount: amount, + types.Token{ + Denom: types.Denom{}, + Amount: tokenAmount, }, - ErrInvalidDenomForTransfer, + types.ErrInvalidDenomForTransfer, }, { "failure: invalid amount string", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "atom", - Trace: []Hop{ - NewHop("transfer", "channel-0"), - NewHop("transfer", "channel-1"), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-0"), + types.NewHop("transfer", "channel-1"), }, }, Amount: "value", }, - ErrInvalidAmount, + types.ErrInvalidAmount, }, { "failure: amount is zero", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "atom", - Trace: []Hop{ - NewHop("transfer", "channel-0"), - NewHop("transfer", "channel-1"), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-0"), + types.NewHop("transfer", "channel-1"), }, }, Amount: "0", }, - ErrInvalidAmount, + types.ErrInvalidAmount, }, { "failure: amount is negative", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "atom", - Trace: []Hop{ - NewHop("transfer", "channel-0"), - NewHop("transfer", "channel-1"), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-0"), + types.NewHop("transfer", "channel-1"), }, }, Amount: "-1", }, - ErrInvalidAmount, + types.ErrInvalidAmount, }, { "failure: invalid identifier in trace", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "uatom", - Trace: []Hop{ - NewHop("transfer", "channel-1"), - NewHop("randomport", ""), + Trace: []types.Hop{ + types.NewHop("transfer", "channel-1"), + types.NewHop("randomport", ""), }, }, - Amount: amount, + Amount: tokenAmount, }, errors.New("invalid token denom: invalid trace: invalid hop source channel ID : identifier cannot be blank: invalid identifier"), }, { "failure: empty identifier in trace", - Token{ - Denom: Denom{ + types.Token{ + Denom: types.Denom{ Base: "uatom", - Trace: []Hop{{}}, + Trace: []types.Hop{{}}, }, - Amount: amount, + Amount: tokenAmount, }, errors.New("invalid token denom: invalid trace: invalid hop source port ID : identifier cannot be blank: invalid identifier"), }, @@ -156,33 +158,33 @@ func TestValidate(t *testing.T) { func TestToCoin(t *testing.T) { testCases := []struct { name string - token Token + token types.Token expCoin sdk.Coin expError error }{ { "success: convert token to coin", - Token{ - Denom: Denom{ - Base: denom, - Trace: []Hop{}, + types.Token{ + Denom: types.Denom{ + Base: tokenDenom, + Trace: []types.Hop{}, }, - Amount: amount, + Amount: tokenAmount, }, - sdk.NewCoin(denom, sdkmath.NewInt(100)), + sdk.NewCoin(tokenDenom, sdkmath.NewInt(100)), nil, }, { "failure: invalid amount string", - Token{ - Denom: Denom{ - Base: denom, - Trace: []Hop{}, + types.Token{ + Denom: types.Denom{ + Base: tokenDenom, + Trace: []types.Hop{}, }, Amount: "value", }, sdk.Coin{}, - ErrInvalidAmount, + types.ErrInvalidAmount, }, } diff --git a/modules/apps/transfer/types/transfer_authorization.go b/modules/apps/transfer/types/transfer_authorization.go index 1ddc852f7c6..058e9c00d3b 100644 --- a/modules/apps/transfer/types/transfer_authorization.go +++ b/modules/apps/transfer/types/transfer_authorization.go @@ -31,12 +31,12 @@ func NewTransferAuthorization(allocations ...Allocation) *TransferAuthorization } // MsgTypeURL implements Authorization.MsgTypeURL. -func (TransferAuthorization) MsgTypeURL() string { +func (*TransferAuthorization) MsgTypeURL() string { return sdk.MsgTypeURL(&MsgTransfer{}) } // Accept implements Authorization.Accept. -func (a TransferAuthorization) Accept(goCtx context.Context, msg proto.Message) (authz.AcceptResponse, error) { +func (a *TransferAuthorization) Accept(goCtx context.Context, msg proto.Message) (authz.AcceptResponse, error) { msgTransfer, ok := msg.(*MsgTransfer) if !ok { return authz.AcceptResponse{}, errorsmod.Wrap(ibcerrors.ErrInvalidType, "type mismatch") @@ -96,7 +96,7 @@ func (a TransferAuthorization) Accept(goCtx context.Context, msg proto.Message) } // ValidateBasic implements Authorization.ValidateBasic. -func (a TransferAuthorization) ValidateBasic() error { +func (a *TransferAuthorization) ValidateBasic() error { if len(a.Allocations) == 0 { return errorsmod.Wrap(ErrInvalidAuthorization, "allocations cannot be empty") } diff --git a/modules/apps/transfer/types/transfer_authorization_test.go b/modules/apps/transfer/types/transfer_authorization_test.go index acdf5ace502..61c4eb76a89 100644 --- a/modules/apps/transfer/types/transfer_authorization_test.go +++ b/modules/apps/transfer/types/transfer_authorization_test.go @@ -21,7 +21,7 @@ const ( testMemo2 = `{"forward":{"channel":"channel-11","port":"transfer","receiver":"stars1twfv52yxcyykx2lcvgl42svw46hsm5dd4ww6xy","retries":2,"timeout":1712146014542131200}}` ) -func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { +func (s *TypesTestSuite) TestTransferAuthorizationAccept() { var ( msgTransfer *types.MsgTransfer transferAuthz types.TransferAuthorization @@ -36,11 +36,11 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { "success", func() {}, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().True(res.Delete) - suite.Require().Nil(res.Updated) + s.Require().True(res.Accept) + s.Require().True(res.Delete) + s.Require().Nil(res.Updated) }, }, { @@ -49,16 +49,16 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Token = sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(50)) }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().False(res.Delete) + s.Require().True(res.Accept) + s.Require().False(res.Delete) updatedAuthz, ok := res.Updated.(*types.TransferAuthorization) - suite.Require().True(ok) + s.Require().True(ok) isEqual := updatedAuthz.Allocations[0].SpendLimit.Equal(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(50)))) - suite.Require().True(isEqual) + s.Require().True(isEqual) }, }, { @@ -67,11 +67,11 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { transferAuthz.Allocations[0].AllowList = []string{} }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().True(res.Delete) - suite.Require().Nil(res.Updated) + s.Require().True(res.Accept) + s.Require().True(res.Delete) + s.Require().Nil(res.Updated) }, }, { @@ -80,11 +80,11 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { transferAuthz.Allocations[0].SpendLimit = sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, types.UnboundedSpendLimit())) }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().False(res.Delete) - suite.Require().Nil(res.Updated) + s.Require().True(res.Accept) + s.Require().False(res.Delete) + s.Require().Nil(res.Updated) }, }, { @@ -94,11 +94,11 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { transferAuthz.Allocations[0].AllowedPacketData = allowedList }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().True(res.Delete) - suite.Require().Nil(res.Updated) + s.Require().True(res.Accept) + s.Require().True(res.Delete) + s.Require().Nil(res.Updated) }, }, { @@ -109,11 +109,11 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Memo = testMemo1 }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().True(res.Delete) - suite.Require().Nil(res.Updated) + s.Require().True(res.Accept) + s.Require().True(res.Delete) + s.Require().Nil(res.Updated) }, }, { @@ -124,11 +124,11 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Memo = testMemo1 }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(res.Accept) - suite.Require().True(res.Delete) - suite.Require().Nil(res.Updated) + s.Require().True(res.Accept) + s.Require().True(res.Delete) + s.Require().Nil(res.Updated) }, }, { @@ -139,7 +139,7 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Memo = testMemo1 }, func(res authz.AcceptResponse, err error) { - suite.Require().Error(err) + s.Require().Error(err) }, }, { @@ -150,8 +150,8 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Memo = testMemo2 }, func(res authz.AcceptResponse, err error) { - suite.Require().Error(err) - suite.Require().ErrorContains(err, fmt.Sprintf("not allowed memo: %s", testMemo2)) + s.Require().Error(err) + s.Require().ErrorContains(err, fmt.Sprintf("not allowed memo: %s", testMemo2)) }, }, { @@ -166,19 +166,19 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Token = sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(50)) }, func(res authz.AcceptResponse, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) updatedTransferAuthz, ok := res.Updated.(*types.TransferAuthorization) - suite.Require().True(ok) + s.Require().True(ok) remainder := updatedTransferAuthz.Allocations[0].SpendLimit.AmountOf(sdk.DefaultBondDenom) - suite.Require().True(sdkmath.NewInt(50).Equal(remainder)) + s.Require().True(sdkmath.NewInt(50).Equal(remainder)) remainder = updatedTransferAuthz.Allocations[0].SpendLimit.AmountOf("test-denom") - suite.Require().True(sdkmath.NewInt(100).Equal(remainder)) + s.Require().True(sdkmath.NewInt(100).Equal(remainder)) remainder = updatedTransferAuthz.Allocations[0].SpendLimit.AmountOf("test-denom2") - suite.Require().True(sdkmath.NewInt(100).Equal(remainder)) + s.Require().True(sdkmath.NewInt(100).Equal(remainder)) }, }, { @@ -188,7 +188,7 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.SourceChannel = "channel-9" }, func(res authz.AcceptResponse, err error) { - suite.Require().Error(err) + s.Require().Error(err) }, }, { @@ -197,25 +197,25 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { msgTransfer.Token = sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(1000)) }, func(res authz.AcceptResponse, err error) { - suite.Require().Error(err) + s.Require().Error(err) }, }, { "receiver address not permitted via allow list", func() { - msgTransfer.Receiver = suite.chainB.SenderAccount.GetAddress().String() + msgTransfer.Receiver = s.chainB.SenderAccount.GetAddress().String() }, func(res authz.AcceptResponse, err error) { - suite.Require().Error(err) + s.Require().Error(err) }, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path := ibctesting.NewTransferPath(suite.chainA, suite.chainB) + path := ibctesting.NewTransferPath(s.chainA, s.chainB) path.Setup() transferAuthz = types.TransferAuthorization{ @@ -233,27 +233,27 @@ func (suite *TypesTestSuite) TestTransferAuthorizationAccept() { path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.TestCoin, - suite.chainA.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), ibctesting.TestAccAddress, - suite.chainB.GetTimeoutHeight(), + s.chainB.GetTimeoutHeight(), 0, "", ) tc.malleate() - res, err := transferAuthz.Accept(suite.chainA.GetContext(), msgTransfer) + res, err := transferAuthz.Accept(s.chainA.GetContext(), msgTransfer) tc.assertResult(res, err) }) } } -func (suite *TypesTestSuite) TestTransferAuthorizationMsgTypeURL() { +func (s *TypesTestSuite) TestTransferAuthorizationMsgTypeURL() { var transferAuthz types.TransferAuthorization - suite.Require().Equal(sdk.MsgTypeURL(&types.MsgTransfer{}), transferAuthz.MsgTypeURL(), "invalid type url for transfer authorization") + s.Require().Equal(sdk.MsgTypeURL(&types.MsgTransfer{}), transferAuthz.MsgTypeURL(), "invalid type url for transfer authorization") } -func (suite *TypesTestSuite) TestTransferAuthorizationValidateBasic() { +func (s *TypesTestSuite) TestTransferAuthorizationValidateBasic() { var transferAuthz types.TransferAuthorization testCases := []struct { @@ -367,7 +367,7 @@ func (suite *TypesTestSuite) TestTransferAuthorizationValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { transferAuthz = types.TransferAuthorization{ Allocations: []types.Allocation{ { @@ -384,9 +384,9 @@ func (suite *TypesTestSuite) TestTransferAuthorizationValidateBasic() { err := transferAuthz.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/apps/transfer/types/tx.pb.go b/modules/apps/transfer/types/tx.pb.go index 868539cf142..abef21f1aca 100644 --- a/modules/apps/transfer/types/tx.pb.go +++ b/modules/apps/transfer/types/tx.pb.go @@ -58,6 +58,13 @@ type MsgTransfer struct { Memo string `protobuf:"bytes,8,opt,name=memo,proto3" json:"memo,omitempty"` // optional encoding Encoding string `protobuf:"bytes,9,opt,name=encoding,proto3" json:"encoding,omitempty"` + // boolean flag to indicate if the transfer message + // is sent with the IBC v2 protocol but uses v1 channel identifiers. + // In this case, the v1 channel identifiers function as aliases to the + // underlying client ids. + // This only needs to be set if the channel IDs + // are V1 channel identifiers. + UseAliasing bool `protobuf:"varint,10,opt,name=use_aliasing,json=useAliasing,proto3" json:"use_aliasing,omitempty"` } func (m *MsgTransfer) Reset() { *m = MsgTransfer{} } @@ -225,47 +232,48 @@ func init() { } var fileDescriptor_7401ed9bed2f8e09 = []byte{ - // 629 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x31, 0x6f, 0xd4, 0x4a, - 0x10, 0x3e, 0xbf, 0x5c, 0xee, 0x25, 0x7b, 0x2f, 0xc9, 0x8b, 0x41, 0x89, 0x63, 0x21, 0x5f, 0x74, - 0x22, 0x52, 0xb8, 0x28, 0xbb, 0x38, 0x08, 0x81, 0xae, 0xbc, 0x34, 0x14, 0x44, 0x8a, 0xac, 0xd0, - 0xd0, 0x44, 0xf6, 0xde, 0xe0, 0x5b, 0xe5, 0xbc, 0x6b, 0xbc, 0x7b, 0x27, 0x68, 0x10, 0xa2, 0x42, - 0x54, 0xfc, 0x00, 0x0a, 0x4a, 0xca, 0xfc, 0x8c, 0x94, 0x29, 0xa9, 0x10, 0x4a, 0x8a, 0x34, 0xfc, - 0x08, 0xb4, 0xeb, 0xf5, 0x61, 0x28, 0x02, 0x34, 0xf6, 0xce, 0xcc, 0x37, 0xdf, 0xcc, 0x7c, 0xb3, - 0x8b, 0xb6, 0x58, 0x42, 0x49, 0x9c, 0xe7, 0x63, 0x46, 0x63, 0xc5, 0x04, 0x97, 0x44, 0x15, 0x31, - 0x97, 0xcf, 0xa0, 0x20, 0xd3, 0x90, 0xa8, 0x17, 0x38, 0x2f, 0x84, 0x12, 0xee, 0x2d, 0x96, 0x50, - 0x5c, 0x87, 0xe1, 0x0a, 0x86, 0xa7, 0xa1, 0xbf, 0x1a, 0x67, 0x8c, 0x0b, 0x62, 0xbe, 0x65, 0x82, - 0x7f, 0x33, 0x15, 0xa9, 0x30, 0x47, 0xa2, 0x4f, 0xd6, 0xbb, 0x4e, 0x85, 0xcc, 0x84, 0x24, 0x99, - 0x4c, 0x35, 0x7d, 0x26, 0x53, 0x1b, 0x08, 0x6c, 0x20, 0x89, 0x25, 0x90, 0x69, 0x98, 0x80, 0x8a, - 0x43, 0x42, 0x05, 0xe3, 0x36, 0xde, 0xd1, 0x6d, 0x52, 0x51, 0x00, 0xa1, 0x63, 0x06, 0x5c, 0xe9, - 0xec, 0xf2, 0x64, 0x01, 0x3b, 0xd7, 0xcf, 0x51, 0x35, 0x6b, 0xc0, 0xdd, 0x0f, 0x73, 0xa8, 0x7d, - 0x20, 0xd3, 0x23, 0xeb, 0x75, 0x3b, 0xa8, 0x2d, 0xc5, 0xa4, 0xa0, 0x70, 0x9c, 0x8b, 0x42, 0x79, - 0xce, 0xa6, 0xb3, 0xbd, 0x18, 0xa1, 0xd2, 0x75, 0x28, 0x0a, 0xe5, 0x6e, 0xa1, 0x65, 0x0b, 0xa0, - 0xa3, 0x98, 0x73, 0x18, 0x7b, 0xff, 0x18, 0xcc, 0x52, 0xe9, 0xdd, 0x2f, 0x9d, 0x6e, 0x1f, 0xcd, - 0x2b, 0x71, 0x02, 0xdc, 0x9b, 0xdb, 0x74, 0xb6, 0xdb, 0x7b, 0x1b, 0xb8, 0x9c, 0x0a, 0xeb, 0xa9, - 0xb0, 0x9d, 0x0a, 0xef, 0x0b, 0xc6, 0x07, 0x8b, 0x67, 0x5f, 0x3a, 0x8d, 0x4f, 0x57, 0xa7, 0x3d, - 0x27, 0x2a, 0x53, 0xdc, 0x35, 0xd4, 0x92, 0xc0, 0x87, 0x50, 0x78, 0x4d, 0x43, 0x6d, 0x2d, 0xd7, - 0x47, 0x0b, 0x05, 0x50, 0x60, 0x53, 0x28, 0xbc, 0x79, 0x13, 0x99, 0xd9, 0xee, 0x63, 0xb4, 0xac, - 0x58, 0x06, 0x62, 0xa2, 0x8e, 0x47, 0xc0, 0xd2, 0x91, 0xf2, 0x5a, 0xa6, 0xb0, 0x8f, 0xf5, 0xba, - 0xb4, 0x5c, 0xd8, 0x8a, 0x34, 0x0d, 0xf1, 0x23, 0x83, 0xa8, 0x57, 0x5e, 0xb2, 0xc9, 0x65, 0xc4, - 0xdd, 0x41, 0xab, 0x15, 0x9b, 0xfe, 0x4b, 0x15, 0x67, 0xb9, 0xf7, 0xef, 0xa6, 0xb3, 0xdd, 0x8c, - 0xfe, 0xb7, 0x81, 0xa3, 0xca, 0xef, 0xba, 0xa8, 0x99, 0x41, 0x26, 0xbc, 0x05, 0xd3, 0x92, 0x39, - 0xeb, 0x56, 0x81, 0x53, 0x31, 0x64, 0x3c, 0xf5, 0x16, 0xcb, 0x56, 0x2b, 0xbb, 0xdf, 0x7b, 0xfb, - 0xb1, 0xd3, 0x78, 0x73, 0x75, 0xda, 0xb3, 0x73, 0xbd, 0xbb, 0x3a, 0xed, 0xad, 0x95, 0xf2, 0xec, - 0xca, 0xe1, 0x09, 0xa9, 0xad, 0xa3, 0xfb, 0x00, 0xdd, 0xa8, 0x99, 0x11, 0xc8, 0x5c, 0x70, 0x09, - 0x9a, 0x5e, 0xc2, 0xf3, 0x09, 0x70, 0x0a, 0x66, 0x45, 0xcd, 0x68, 0x66, 0xf7, 0x9b, 0x9a, 0xbe, - 0xfb, 0x0a, 0xad, 0x1c, 0xc8, 0xf4, 0x49, 0x3e, 0x8c, 0x15, 0x1c, 0xc6, 0x45, 0x9c, 0x49, 0x23, - 0x2b, 0x4b, 0x39, 0x14, 0x76, 0xab, 0xd6, 0x72, 0x07, 0xa8, 0x95, 0x1b, 0x84, 0xd9, 0x64, 0x7b, - 0xef, 0x36, 0xbe, 0xee, 0x86, 0xe3, 0x92, 0x6d, 0xd0, 0xd4, 0xe2, 0x45, 0x36, 0xb3, 0xbf, 0xf2, - 0x63, 0x26, 0x43, 0xda, 0xdd, 0x40, 0xeb, 0xbf, 0xd4, 0xaf, 0x9a, 0xdf, 0xfb, 0xe6, 0xa0, 0xb9, - 0x03, 0x99, 0xba, 0x23, 0xb4, 0x30, 0xbb, 0x76, 0x77, 0xae, 0xaf, 0x59, 0xd3, 0xc0, 0x0f, 0xff, - 0x18, 0x3a, 0x93, 0x4b, 0xa1, 0xff, 0x7e, 0x52, 0x62, 0xf7, 0xb7, 0x14, 0x75, 0xb8, 0x7f, 0xff, - 0xaf, 0xe0, 0x55, 0x55, 0x7f, 0xfe, 0xb5, 0xbe, 0x5a, 0x83, 0xe8, 0xec, 0x22, 0x70, 0xce, 0x2f, - 0x02, 0xe7, 0xeb, 0x45, 0xe0, 0xbc, 0xbf, 0x0c, 0x1a, 0xe7, 0x97, 0x41, 0xe3, 0xf3, 0x65, 0xd0, - 0x78, 0xfa, 0x30, 0x65, 0x6a, 0x34, 0x49, 0x30, 0x15, 0x19, 0xb1, 0x8f, 0x9e, 0x25, 0x74, 0x37, - 0x15, 0x64, 0x1a, 0xde, 0x25, 0x99, 0x18, 0x4e, 0xc6, 0x20, 0xf5, 0x4b, 0xae, 0xbd, 0x60, 0xf5, - 0x32, 0x07, 0x99, 0xb4, 0xcc, 0xe3, 0xbd, 0xf7, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x87, 0xc2, - 0x5c, 0xb3, 0x04, 0x00, 0x00, + // 653 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbd, 0x6e, 0x13, 0x41, + 0x10, 0xf6, 0x11, 0xdb, 0x38, 0xeb, 0xfc, 0x90, 0x03, 0x25, 0x17, 0x0b, 0xd9, 0x96, 0x45, 0x24, + 0xe3, 0x28, 0xbb, 0x38, 0x08, 0x81, 0xdc, 0xe1, 0x34, 0x14, 0x44, 0x8a, 0xac, 0xd0, 0xd0, 0x44, + 0x77, 0xeb, 0xe1, 0xbc, 0x8a, 0x6f, 0xf7, 0xb8, 0xdd, 0xb3, 0xa0, 0x41, 0x88, 0x0a, 0x51, 0xf1, + 0x08, 0x94, 0x94, 0x79, 0x0a, 0x94, 0x32, 0x25, 0x15, 0x42, 0x49, 0x91, 0x86, 0x87, 0x40, 0xbb, + 0xb7, 0x67, 0x0e, 0x8a, 0x00, 0x8d, 0xbd, 0x33, 0xf3, 0xcd, 0xcc, 0x37, 0xdf, 0x78, 0x8c, 0xb6, + 0x58, 0x40, 0x89, 0x1f, 0xc7, 0x53, 0x46, 0x7d, 0xc5, 0x04, 0x97, 0x44, 0x25, 0x3e, 0x97, 0x2f, + 0x20, 0x21, 0xb3, 0x3e, 0x51, 0xaf, 0x70, 0x9c, 0x08, 0x25, 0xdc, 0xdb, 0x2c, 0xa0, 0xb8, 0x08, + 0xc3, 0x39, 0x0c, 0xcf, 0xfa, 0x8d, 0x35, 0x3f, 0x62, 0x5c, 0x10, 0xf3, 0x99, 0x25, 0x34, 0x6e, + 0x85, 0x22, 0x14, 0xe6, 0x49, 0xf4, 0xcb, 0x7a, 0x37, 0xa8, 0x90, 0x91, 0x90, 0x24, 0x92, 0xa1, + 0x2e, 0x1f, 0xc9, 0xd0, 0x06, 0x9a, 0x36, 0x10, 0xf8, 0x12, 0xc8, 0xac, 0x1f, 0x80, 0xf2, 0xfb, + 0x84, 0x0a, 0xc6, 0x6d, 0xbc, 0xa5, 0x69, 0x52, 0x91, 0x00, 0xa1, 0x53, 0x06, 0x5c, 0xe9, 0xec, + 0xec, 0x65, 0x01, 0xdb, 0x57, 0xcf, 0x91, 0x93, 0x35, 0xe0, 0xce, 0x97, 0x05, 0x54, 0xdf, 0x97, + 0xe1, 0xa1, 0xf5, 0xba, 0x2d, 0x54, 0x97, 0x22, 0x4d, 0x28, 0x1c, 0xc5, 0x22, 0x51, 0x9e, 0xd3, + 0x76, 0xba, 0x8b, 0x23, 0x94, 0xb9, 0x0e, 0x44, 0xa2, 0xdc, 0x2d, 0xb4, 0x62, 0x01, 0x74, 0xe2, + 0x73, 0x0e, 0x53, 0xef, 0x9a, 0xc1, 0x2c, 0x67, 0xde, 0xbd, 0xcc, 0xe9, 0x0e, 0x50, 0x45, 0x89, + 0x63, 0xe0, 0xde, 0x42, 0xdb, 0xe9, 0xd6, 0x77, 0x37, 0x71, 0x36, 0x15, 0xd6, 0x53, 0x61, 0x3b, + 0x15, 0xde, 0x13, 0x8c, 0x0f, 0x17, 0x4f, 0xbf, 0xb5, 0x4a, 0x9f, 0x2f, 0x4f, 0x7a, 0xce, 0x28, + 0x4b, 0x71, 0xd7, 0x51, 0x55, 0x02, 0x1f, 0x43, 0xe2, 0x95, 0x4d, 0x69, 0x6b, 0xb9, 0x0d, 0x54, + 0x4b, 0x80, 0x02, 0x9b, 0x41, 0xe2, 0x55, 0x4c, 0x64, 0x6e, 0xbb, 0x4f, 0xd1, 0x8a, 0x62, 0x11, + 0x88, 0x54, 0x1d, 0x4d, 0x80, 0x85, 0x13, 0xe5, 0x55, 0x4d, 0xe3, 0x06, 0xd6, 0xeb, 0xd2, 0x72, + 0x61, 0x2b, 0xd2, 0xac, 0x8f, 0x9f, 0x18, 0x44, 0xb1, 0xf3, 0xb2, 0x4d, 0xce, 0x22, 0xee, 0x36, + 0x5a, 0xcb, 0xab, 0xe9, 0x6f, 0xa9, 0xfc, 0x28, 0xf6, 0xae, 0xb7, 0x9d, 0x6e, 0x79, 0x74, 0xc3, + 0x06, 0x0e, 0x73, 0xbf, 0xeb, 0xa2, 0x72, 0x04, 0x91, 0xf0, 0x6a, 0x86, 0x92, 0x79, 0x6b, 0xaa, + 0xc0, 0xa9, 0x18, 0x33, 0x1e, 0x7a, 0x8b, 0x19, 0xd5, 0xdc, 0x76, 0xbb, 0x68, 0x29, 0x95, 0x70, + 0xe4, 0x4f, 0x99, 0x2f, 0x75, 0x1c, 0xb5, 0x9d, 0x6e, 0x6d, 0x58, 0xc9, 0x88, 0xd4, 0x53, 0x09, + 0x8f, 0x6d, 0x64, 0xd0, 0x7b, 0xff, 0xa9, 0x55, 0x7a, 0x77, 0x79, 0xd2, 0xb3, 0x0a, 0x7c, 0xb8, + 0x3c, 0xe9, 0xad, 0x67, 0x42, 0xee, 0xc8, 0xf1, 0x31, 0x29, 0x2c, 0xae, 0xf3, 0x10, 0xdd, 0x2c, + 0x98, 0x23, 0x90, 0xb1, 0xe0, 0x12, 0x34, 0x11, 0x09, 0x2f, 0x53, 0xe0, 0x14, 0xcc, 0x32, 0xcb, + 0xa3, 0xb9, 0x3d, 0x28, 0xeb, 0xf2, 0x9d, 0x37, 0x68, 0x75, 0x5f, 0x86, 0xcf, 0xe2, 0xb1, 0xaf, + 0xe0, 0xc0, 0x4f, 0xfc, 0x48, 0x9a, 0x05, 0xb0, 0x90, 0x43, 0x62, 0xf7, 0x6f, 0x2d, 0x77, 0x88, + 0xaa, 0xb1, 0x41, 0x98, 0x9d, 0xd7, 0x77, 0xef, 0xe0, 0xab, 0x6e, 0x01, 0x67, 0xd5, 0x86, 0x65, + 0x2d, 0xf3, 0xc8, 0x66, 0x0e, 0x56, 0x7f, 0xcd, 0x64, 0x8a, 0x76, 0x36, 0xd1, 0xc6, 0x1f, 0xfd, + 0x73, 0xf2, 0xbb, 0x3f, 0x1c, 0xb4, 0xb0, 0x2f, 0x43, 0x77, 0x82, 0x6a, 0xf3, 0x1f, 0xe8, 0xdd, + 0xab, 0x7b, 0x16, 0x34, 0x68, 0xf4, 0xff, 0x19, 0x3a, 0x97, 0x4b, 0xa1, 0xa5, 0xdf, 0x94, 0xd8, + 0xf9, 0x6b, 0x89, 0x22, 0xbc, 0xf1, 0xe0, 0xbf, 0xe0, 0x79, 0xd7, 0x46, 0xe5, 0xad, 0xde, 0xfd, + 0x70, 0x74, 0x7a, 0xde, 0x74, 0xce, 0xce, 0x9b, 0xce, 0xf7, 0xf3, 0xa6, 0xf3, 0xf1, 0xa2, 0x59, + 0x3a, 0xbb, 0x68, 0x96, 0xbe, 0x5e, 0x34, 0x4b, 0xcf, 0x1f, 0x85, 0x4c, 0x4d, 0xd2, 0x00, 0x53, + 0x11, 0x11, 0xfb, 0xf7, 0xc0, 0x02, 0xba, 0x13, 0x0a, 0x32, 0xeb, 0xdf, 0x23, 0x91, 0x18, 0xa7, + 0x53, 0x90, 0xfa, 0xe6, 0x0b, 0xb7, 0xae, 0x5e, 0xc7, 0x20, 0x83, 0xaa, 0x39, 0xf3, 0xfb, 0x3f, + 0x03, 0x00, 0x00, 0xff, 0xff, 0xd4, 0xc3, 0xca, 0xd9, 0xdd, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -371,6 +379,7 @@ func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.applications.transfer.v1.Msg", HandlerType: (*MsgServer)(nil), @@ -408,6 +417,16 @@ func (m *MsgTransfer) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.UseAliasing { + i-- + if m.UseAliasing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } if len(m.Encoding) > 0 { i -= len(m.Encoding) copy(dAtA[i:], m.Encoding) @@ -617,6 +636,9 @@ func (m *MsgTransfer) Size() (n int) { if l > 0 { n += 1 + l + sovTx(uint64(l)) } + if m.UseAliasing { + n += 2 + } return n } @@ -968,6 +990,26 @@ func (m *MsgTransfer) Unmarshal(dAtA []byte) error { } m.Encoding = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseAliasing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseAliasing = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTx(dAtA[iNdEx:]) diff --git a/modules/apps/transfer/types/types_test.go b/modules/apps/transfer/types/types_test.go index 2995b72936f..d0551f99c86 100644 --- a/modules/apps/transfer/types/types_test.go +++ b/modules/apps/transfer/types/types_test.go @@ -17,13 +17,13 @@ type TypesTestSuite struct { chainB *ibctesting.TestChain } -func (suite *TypesTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) -} - func TestTypesTestSuite(t *testing.T) { testifysuite.Run(t, new(TypesTestSuite)) } + +func (s *TypesTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} diff --git a/modules/apps/transfer/v2/alias_test.go b/modules/apps/transfer/v2/alias_test.go new file mode 100644 index 00000000000..1797487a868 --- /dev/null +++ b/modules/apps/transfer/v2/alias_test.go @@ -0,0 +1,275 @@ +package v2_test + +import ( + "time" + + sdkmath "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v11" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" + ibctesting "github.com/cosmos/ibc-go/v10/testing" + mockv2 "github.com/cosmos/ibc-go/v10/testing/mock/v2" +) + +// This test migrates a V1 channel and then does the following: +// It will send a transfer packet using the V1 format, +// then it will send a transfer packet using the V2 format on the same channel. +// It will then send a transfer packet back using the V2 format on the same channel. +// It checks that the escrow and receiver amounts are correct after each packet is sent. +func (s *TransferTestSuite) TestAliasedTransferChannel() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // mock v1 format for both sides of the channel + s.mockV1Format(path.EndpointA) + s.mockV1Format(path.EndpointB) + + // migrate the store for both chains + err := v11.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.App.GetIBCKeeper()) + s.Require().NoError(err) + err = v11.MigrateStore(s.chainB.GetContext(), runtime.NewKVStoreService(s.chainB.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainB.App.AppCodec(), s.chainB.App.GetIBCKeeper()) + s.Require().NoError(err) + + // create v2 path from the original client ids + // the path config is only used for updating + // the packet client ids will be the original channel identifiers + // but they are not validated against the client ids in the path in the tests + pathv2 := ibctesting.NewPath(s.chainA, s.chainB) + pathv2.EndpointA.ClientID = path.EndpointA.ClientID + pathv2.EndpointB.ClientID = path.EndpointB.ClientID + + // save original amount that sender has in its balance + originalAmount := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), ibctesting.TestCoin.Denom).Amount + + // send v1 packet with default values + sender := s.chainA.SenderAccount.GetAddress() + receiver := s.chainB.SenderAccount.GetAddress() + transferMsg := types.NewMsgTransfer( + path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + ibctesting.TestCoin, sender.String(), receiver.String(), + s.chainB.GetTimeoutHeight(), 0, "", + ) + + result, err := s.chainA.SendMsgs(transferMsg) + s.Require().NoError(err) // message committed + + packet, err := ibctesting.ParseV1PacketFromEvents(result.Events) + s.Require().NoError(err) + s.Require().Equal(uint64(1), packet.Sequence, "sequence should be 1 for first packet") + + err = path.RelayPacket(packet) + s.Require().NoError(err) + + // check that the escrow and receiver amounts are correct + // after first packet + s.assertEscrowEqual(s.chainA, ibctesting.TestCoin, ibctesting.DefaultCoinAmount) + ibcDenom := types.NewDenom( + ibctesting.TestCoin.Denom, + types.NewHop(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), + ) + s.assertReceiverEqual(s.chainB, ibcDenom.IBCDenom(), receiver, ibctesting.DefaultCoinAmount) + + // v2 packets only support timeout timestamps in UNIX time. + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + + // send v2 packet on aliased channel + msgTransferAlias := types.NewMsgTransferAliased( + path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + ibctesting.TestCoin, sender.String(), receiver.String(), + clienttypes.Height{}, timeoutTimestamp, "", + ) + res, err := path.EndpointA.Chain.SendMsgs(msgTransferAlias) + s.Require().NoError(err, "send v2 packet failed") + + packetv2, err := ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(2), packetv2.Sequence, "sequence should be incremented across protocol versions") + + err = path.EndpointB.UpdateClient() + s.Require().NoError(err) + + // relay v2 packet + err = pathv2.EndpointA.RelayPacket(packetv2) + s.Require().NoError(err) + + // check that the escrow and receiver amounts are correct + // after first packet + // this should be double the default amount since we sent the same amount twice + // once with IBC v1 and once with IBC v2 + newAmount := ibctesting.DefaultCoinAmount.MulRaw(2) + s.assertEscrowEqual(s.chainA, ibctesting.TestCoin, newAmount) + s.assertReceiverEqual(s.chainB, ibcDenom.IBCDenom(), receiver, newAmount) + + // send all the tokens back using IBC v2 + // NOTE: Creating a reversed path to use helper functions + // sender and receiver are swapped + revPath := ibctesting.NewPath(s.chainB, s.chainA) + revPath.EndpointA.ClientID = path.EndpointB.ClientID + revPath.EndpointB.ClientID = path.EndpointA.ClientID + + revToken := types.Token{ + Denom: types.Denom{ + Trace: []types.Hop{ + { + PortId: path.EndpointB.ChannelConfig.PortID, + ChannelId: path.EndpointB.ChannelID, + }, + }, + Base: ibctesting.TestCoin.Denom, + }, + Amount: ibctesting.TestCoin.Amount.MulRaw(2).String(), + } + revCoin, err := revToken.ToCoin() + s.Require().NoError(err, "convert token to coin failed") + + revTimeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Hour).Unix()) + + // send v2 packet + // using encoding here just to use both message constructor functions + msgTransferRev := types.NewMsgTransferWithEncoding( + path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, + revCoin, receiver.String(), sender.String(), + clienttypes.Height{}, revTimeoutTimestamp, "", "application/json", true, + ) + res, err = revPath.EndpointA.Chain.SendMsgs(msgTransferRev) + s.Require().NoError(err, "send v2 packet failed") + + packetv2, err = ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(1), packetv2.Sequence, "sequence should be 1 on the counterparty chain") + + err = revPath.EndpointB.UpdateClient() + s.Require().NoError(err) + + // relay v2 packet + err = revPath.EndpointA.RelayPacket(packetv2) + s.Require().NoError(err) + + // check that the balances are back to their original state + // after the reverse packet is sent with the full amount + s.assertEscrowEqual(s.chainA, ibctesting.TestCoin, sdkmath.ZeroInt()) + s.assertReceiverEqual(s.chainA, ibctesting.TestCoin.Denom, sender, originalAmount) + s.assertReceiverEqual(s.chainB, ibcDenom.IBCDenom(), receiver, sdkmath.ZeroInt()) +} + +// This test ensures we can send a different application on the same channel identifier +// and that the sequences are still incremented correctly as a global app agnostic sequence. +func (s *TransferTestSuite) TestDifferentAppPostAlias() { + path := ibctesting.NewTransferPath(s.chainA, s.chainB) + path.Setup() + + // mock v1 format for both sides of the channel + s.mockV1Format(path.EndpointA) + s.mockV1Format(path.EndpointB) + + // migrate the store for both chains + err := v11.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.App.GetIBCKeeper()) + s.Require().NoError(err) + err = v11.MigrateStore(s.chainB.GetContext(), runtime.NewKVStoreService(s.chainB.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainB.App.AppCodec(), s.chainB.App.GetIBCKeeper()) + s.Require().NoError(err) + + // create v2 path from the original client ids + // the path config is only used for updating + // the packet client ids will be the original channel identifiers + // but they are not validated against the client ids in the path in the tests + pathv2 := ibctesting.NewPath(s.chainA, s.chainB) + pathv2.EndpointA.ClientID = path.EndpointA.ClientID + pathv2.EndpointB.ClientID = path.EndpointB.ClientID + + // create default packet with a timed out timestamp + mockPayload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + + timeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Hour).Unix()) + + // send v2 packet with mock payload + // over a v1 transfer channel's channel identifier + msgSendPacket := channeltypesv2.NewMsgSendPacket( + path.EndpointA.ChannelID, + timeoutTimestamp, + path.EndpointA.Chain.SenderAccount.GetAddress().String(), + mockPayload, + ) + res, err := path.EndpointA.Chain.SendMsgs(msgSendPacket) + s.Require().NoError(err, "send v2 packet failed") + + packetv2, err := ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(1), packetv2.Sequence, "sequence should be 1 for first packet") + + err = path.EndpointB.UpdateClient() + s.Require().NoError(err) + + // relay v2 packet + err = pathv2.EndpointA.RelayPacket(packetv2) + s.Require().NoError(err) + + sender := s.chainA.SenderAccount.GetAddress() + receiver := s.chainB.SenderAccount.GetAddress() + + // now send a transfer v2 packet + msgTransferAlias := types.NewMsgTransferAliased( + path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + ibctesting.TestCoin, sender.String(), receiver.String(), + clienttypes.Height{}, timeoutTimestamp, "", + ) + res, err = path.EndpointA.Chain.SendMsgs(msgTransferAlias) + s.Require().NoError(err, "send v2 packet failed") + + transferv2Packet, err := ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(2), transferv2Packet.Sequence, "sequence should be incremented across applications") + + err = path.EndpointB.UpdateClient() + s.Require().NoError(err) + + // now send a transfer v1 packet + transferMsg := types.NewMsgTransfer( + path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + ibctesting.TestCoin, sender.String(), receiver.String(), + s.chainB.GetTimeoutHeight(), 0, "", + ) + + result, err := s.chainA.SendMsgs(transferMsg) + s.Require().NoError(err) // message committed + + transferv1Packet, err := ibctesting.ParseV1PacketFromEvents(result.Events) + s.Require().NoError(err) + + err = path.RelayPacket(transferv1Packet) + s.Require().NoError(err) + s.Require().Equal(uint64(3), transferv1Packet.Sequence, "sequence should be incremented across protocol versions") +} + +// assertEscrowEqual asserts that the amounts escrowed for each of the coins on chain matches the expectedAmounts +func (s *TransferTestSuite) assertEscrowEqual(chain *ibctesting.TestChain, coin sdk.Coin, expectedAmount sdkmath.Int) { + amount := chain.GetSimApp().TransferKeeper.GetTotalEscrowForDenom(chain.GetContext(), coin.GetDenom()) + s.Require().Equal(expectedAmount, amount.Amount) +} + +// assertReceiverEqual asserts that the amounts received by the receiver account matches the expectedAmounts +func (s *TransferTestSuite) assertReceiverEqual(chain *ibctesting.TestChain, denom string, receiver sdk.AccAddress, expectedAmount sdkmath.Int) { + amount := chain.GetSimApp().BankKeeper.GetBalance(chain.GetContext(), receiver, denom) + s.Require().Equal(expectedAmount, amount.Amount, "receiver balance should match expected amount") +} + +func (s *TransferTestSuite) mockV1Format(endpoint *ibctesting.Endpoint) { + // mock v1 format by setting the sequence in the old key + seq, ok := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID) + s.Require().True(ok, "should be able to get next sequence send for channel") + + // move the next sequence send back to the old v1 format key + // so we can migrate it in our tests + storeService := runtime.NewKVStoreService(endpoint.Chain.GetSimApp().GetKey(ibcexported.StoreKey)) + store := storeService.OpenKVStore(endpoint.Chain.GetContext()) + err := store.Set(v11.NextSequenceSendV1Key(endpoint.ChannelConfig.PortID, endpoint.ChannelID), sdk.Uint64ToBigEndian(seq)) + s.Require().NoError(err) + err = store.Delete(hostv2.NextSequenceSendKey(endpoint.ChannelID)) + s.Require().NoError(err) +} diff --git a/modules/apps/transfer/v2/ibc_module.go b/modules/apps/transfer/v2/ibc_module.go index 84a54a2c161..7220ffa1b14 100644 --- a/modules/apps/transfer/v2/ibc_module.go +++ b/modules/apps/transfer/v2/ibc_module.go @@ -23,17 +23,17 @@ import ( var _ api.IBCModule = (*IBCModule)(nil) // NewIBCModule creates a new IBCModule given the keeper -func NewIBCModule(k keeper.Keeper) *IBCModule { - return &IBCModule{ +func NewIBCModule(k *keeper.Keeper) IBCModule { + return IBCModule{ keeper: k, } } type IBCModule struct { - keeper keeper.Keeper + keeper *keeper.Keeper } -func (im *IBCModule) OnSendPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, signer sdk.AccAddress) error { +func (im IBCModule) OnSendPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, signer sdk.AccAddress) error { // Enforce that the source and destination portIDs are the same and equal to the transfer portID // Enforce that the source and destination clientIDs are also in the clientID format that transfer expects: {clientid}-{sequence} // This is necessary for IBC v2 since the portIDs (and thus the application-application connection) is not prenegotiated @@ -52,12 +52,12 @@ func (im *IBCModule) OnSendPacket(ctx sdk.Context, sourceChannel string, destina return err } - sender, err := sdk.AccAddressFromBech32(data.Sender) + sender, err := im.keeper.GetAddressCodec().StringToBytes(data.Sender) if err != nil { return err } - if !signer.Equals(sender) { + if !bytes.Equal(sender, signer) { return errorsmod.Wrapf(ibcerrors.ErrUnauthorized, "sender %s is different from signer %s", sender, signer) } @@ -76,14 +76,14 @@ func (im *IBCModule) OnSendPacket(ctx sdk.Context, sourceChannel string, destina return err } - events.EmitTransferEvent(ctx, sender.String(), data.Receiver, data.Token, data.Memo) + events.EmitTransferEvent(ctx, data.Sender, data.Receiver, data.Token, data.Memo) telemetry.ReportTransfer(payload.SourcePort, sourceChannel, payload.DestinationPort, destinationChannel, data.Token) return nil } -func (im *IBCModule) OnRecvPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) channeltypesv2.RecvPacketResult { +func (im IBCModule) OnRecvPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) channeltypesv2.RecvPacketResult { // Enforce that the source and destination portIDs are the same and equal to the transfer portID // Enforce that the source and destination clientIDs are also in the clientID format that transfer expects: {clientid}-{sequence} // This is necessary for IBC v2 since the portIDs (and thus the application-application connection) is not prenegotiated @@ -147,7 +147,7 @@ func (im *IBCModule) OnRecvPacket(ctx sdk.Context, sourceChannel string, destina return recvResult } -func (im *IBCModule) OnTimeoutPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { +func (im IBCModule) OnTimeoutPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { data, err := types.UnmarshalPacketData(payload.Value, payload.Version, payload.Encoding) if err != nil { return err @@ -163,7 +163,7 @@ func (im *IBCModule) OnTimeoutPacket(ctx sdk.Context, sourceChannel string, dest return nil } -func (im *IBCModule) OnAcknowledgementPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, acknowledgement []byte, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { +func (im IBCModule) OnAcknowledgementPacket(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, acknowledgement []byte, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { var ack channeltypes.Acknowledgement // construct an error acknowledgement if the acknowledgement bytes are the sentinel error acknowledgement so we can use the shared transfer logic if bytes.Equal(acknowledgement, channeltypesv2.ErrorAcknowledgement[:]) { @@ -194,6 +194,6 @@ func (im *IBCModule) OnAcknowledgementPacket(ctx sdk.Context, sourceChannel stri // UnmarshalPacketData unmarshals the ICS20 packet data based on the version and encoding // it implements the PacketDataUnmarshaler interface -func (*IBCModule) UnmarshalPacketData(payload channeltypesv2.Payload) (any, error) { +func (IBCModule) UnmarshalPacketData(payload channeltypesv2.Payload) (any, error) { return types.UnmarshalPacketData(payload.Value, payload.Version, payload.Encoding) } diff --git a/modules/apps/transfer/v2/ibc_module_test.go b/modules/apps/transfer/v2/ibc_module_test.go index f3e75a85ab0..38e455a8d27 100644 --- a/modules/apps/transfer/v2/ibc_module_test.go +++ b/modules/apps/transfer/v2/ibc_module_test.go @@ -19,7 +19,10 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -const testclientid = "testclientid" +const ( + testclientid = "testclientid" + invalidPortID = "invalidportid" +) type TransferTestSuite struct { testifysuite.Suite @@ -35,35 +38,33 @@ type TransferTestSuite struct { pathBToC *ibctesting.Path } -const invalidPortID = "invalidportid" +func TestTransferTestSuite(t *testing.T) { + testifysuite.Run(t, new(TransferTestSuite)) +} -func (suite *TransferTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *TransferTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) // setup between chainA and chainB // NOTE: // pathAToB.EndpointA = endpoint on chainA // pathAToB.EndpointB = endpoint on chainB - suite.pathAToB = ibctesting.NewPath(suite.chainA, suite.chainB) + s.pathAToB = ibctesting.NewPath(s.chainA, s.chainB) // setup between chainB and chainC // pathBToC.EndpointA = endpoint on chainB // pathBToC.EndpointB = endpoint on chainC - suite.pathBToC = ibctesting.NewPath(suite.chainB, suite.chainC) + s.pathBToC = ibctesting.NewPath(s.chainB, s.chainC) // setup IBC v2 paths between the chains - suite.pathAToB.SetupV2() - suite.pathBToC.SetupV2() + s.pathAToB.SetupV2() + s.pathBToC.SetupV2() } -func TestTransferTestSuite(t *testing.T) { - testifysuite.Run(t, new(TransferTestSuite)) -} - -func (suite *TransferTestSuite) TestOnSendPacket() { +func (s *TransferTestSuite) TestOnSendPacket() { var payload channeltypesv2.Payload testCases := []struct { name string @@ -97,7 +98,7 @@ func (suite *TransferTestSuite) TestOnSendPacket() { "transfer with invalid source client", sdk.DefaultBondDenom, func() { - suite.pathAToB.EndpointA.ClientID = testclientid + s.pathAToB.EndpointA.ClientID = testclientid }, channeltypesv2.ErrInvalidPacket, }, @@ -105,7 +106,7 @@ func (suite *TransferTestSuite) TestOnSendPacket() { "transfer with invalid destination client", sdk.DefaultBondDenom, func() { - suite.pathAToB.EndpointB.ClientID = testclientid + s.pathAToB.EndpointB.ClientID = testclientid }, channeltypesv2.ErrInvalidPacket, }, @@ -124,13 +125,13 @@ func (suite *TransferTestSuite) TestOnSendPacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - originalBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) amount, ok := sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) - suite.Require().True(ok) + s.Require().True(ok) originalCoin := sdk.NewCoin(tc.sourceDenomToTransfer, amount) token := types.Token{ @@ -138,38 +139,38 @@ func (suite *TransferTestSuite) TestOnSendPacket() { Amount: originalCoin.Amount.String(), } - transferData := types.NewFungibleTokenPacketData(token.Denom.Path(), token.Amount, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), "") - bz := suite.chainA.Codec.MustMarshal(&transferData) + transferData := types.NewFungibleTokenPacketData(token.Denom.Path(), token.Amount, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), "") + bz := s.chainA.Codec.MustMarshal(&transferData) payload = channeltypesv2.NewPayload(types.PortID, types.PortID, types.V1, types.EncodingProtobuf, bz) // malleate payload tc.malleate() - ctx := suite.chainA.GetContext() - cbs := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) + ctx := s.chainA.GetContext() + cbs := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) - err := cbs.OnSendPacket(ctx, suite.pathAToB.EndpointA.ClientID, suite.pathAToB.EndpointB.ClientID, 1, payload, suite.chainA.SenderAccount.GetAddress()) + err := cbs.OnSendPacket(ctx, s.pathAToB.EndpointA.ClientID, s.pathAToB.EndpointB.ClientID, 1, payload, s.chainA.SenderAccount.GetAddress()) if tc.expError != nil { - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Contains(err.Error(), tc.expError.Error()) return } - suite.Require().NoError(err) + s.Require().NoError(err) - escrowAddress := types.GetEscrowAddress(types.PortID, suite.pathAToB.EndpointA.ClientID) + escrowAddress := types.GetEscrowAddress(types.PortID, s.pathAToB.EndpointA.ClientID) // check that the balance for chainA is updated - chainABalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) // check that module account escrow address has locked the tokens - chainAEscrowBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().Equal(originalCoin, chainAEscrowBalance) + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(originalCoin, chainAEscrowBalance) }) } } -func (suite *TransferTestSuite) TestOnRecvPacket() { +func (s *TransferTestSuite) TestOnRecvPacket() { var payload channeltypesv2.Payload testCases := []struct { name string @@ -203,7 +204,7 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { "transfer with invalid source client", sdk.DefaultBondDenom, func() { - suite.pathAToB.EndpointA.ClientID = testclientid + s.pathAToB.EndpointA.ClientID = testclientid }, true, }, @@ -211,72 +212,72 @@ func (suite *TransferTestSuite) TestOnRecvPacket() { "transfer with invalid destination client", sdk.DefaultBondDenom, func() { - suite.pathAToB.EndpointB.ClientID = testclientid + s.pathAToB.EndpointB.ClientID = testclientid }, true, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - originalBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) amount, ok := sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) - suite.Require().True(ok) + s.Require().True(ok) originalCoin := sdk.NewCoin(tc.sourceDenomToTransfer, amount) - msg := types.NewMsgTransferWithEncoding(types.PortID, suite.pathAToB.EndpointA.ClientID, originalCoin, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, "", types.EncodingProtobuf) - resp, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) // message committed + msg := types.NewMsgTransferWithEncoding(types.PortID, s.pathAToB.EndpointA.ClientID, originalCoin, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, "", types.EncodingProtobuf, false) + resp, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) // message committed packets, err := ibctesting.ParseIBCV2Packets(channeltypes.EventTypeSendPacket, resp.Events) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Len(packets, 1) - suite.Require().Len(packets[0].Payloads, 1) + s.Require().Len(packets, 1) + s.Require().Len(packets[0].Payloads, 1) payload = packets[0].Payloads[0] - ctx := suite.chainB.GetContext() - cbs := suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) + ctx := s.chainB.GetContext() + cbs := s.chainB.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) // malleate payload after it has been sent but before OnRecvPacket callback is called tc.malleate() - recvResult := cbs.OnRecvPacket(ctx, suite.pathAToB.EndpointA.ClientID, suite.pathAToB.EndpointB.ClientID, packets[0].Sequence, payload, suite.chainB.SenderAccount.GetAddress()) + recvResult := cbs.OnRecvPacket(ctx, s.pathAToB.EndpointA.ClientID, s.pathAToB.EndpointB.ClientID, packets[0].Sequence, payload, s.chainB.SenderAccount.GetAddress()) if tc.expErr { - suite.Require().Equal(channeltypesv2.PacketStatus_Failure, recvResult.Status) + s.Require().Equal(channeltypesv2.PacketStatus_Failure, recvResult.Status) return } - suite.Require().Equal(channeltypesv2.PacketStatus_Success, recvResult.Status) - suite.Require().Equal(channeltypes.NewResultAcknowledgement([]byte{byte(1)}).Acknowledgement(), recvResult.Acknowledgement) + s.Require().Equal(channeltypesv2.PacketStatus_Success, recvResult.Status) + s.Require().Equal(channeltypes.NewResultAcknowledgement([]byte{byte(1)}).Acknowledgement(), recvResult.Acknowledgement) - escrowAddress := types.GetEscrowAddress(types.PortID, suite.pathAToB.EndpointA.ClientID) + escrowAddress := types.GetEscrowAddress(types.PortID, s.pathAToB.EndpointA.ClientID) // check that the balance for chainA is updated - chainABalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) // check that module account escrow address has locked the tokens - chainAEscrowBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().Equal(originalCoin, chainAEscrowBalance) + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(originalCoin, chainAEscrowBalance) - traceAToB := types.NewHop(types.PortID, suite.pathAToB.EndpointB.ClientID) + traceAToB := types.NewHop(types.PortID, s.pathAToB.EndpointB.ClientID) // check that voucher exists on chain B chainBDenom := types.NewDenom(originalCoin.Denom, traceAToB) - chainBBalance := suite.chainB.GetSimApp().BankKeeper.GetBalance(suite.chainB.GetContext(), suite.chainB.SenderAccount.GetAddress(), chainBDenom.IBCDenom()) + chainBBalance := s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), chainBDenom.IBCDenom()) coinSentFromAToB := sdk.NewCoin(chainBDenom.IBCDenom(), amount) - suite.Require().Equal(coinSentFromAToB, chainBBalance) + s.Require().Equal(coinSentFromAToB, chainBBalance) }) } } -func (suite *TransferTestSuite) TestOnAckPacket() { +func (s *TransferTestSuite) TestOnAckPacket() { testCases := []struct { name string sourceDenomToTransfer string @@ -288,78 +289,78 @@ func (suite *TransferTestSuite) TestOnAckPacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - originalBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) amount, ok := sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) - suite.Require().True(ok) + s.Require().True(ok) originalCoin := sdk.NewCoin(tc.sourceDenomToTransfer, amount) - msg := types.NewMsgTransferWithEncoding(types.PortID, suite.pathAToB.EndpointA.ClientID, originalCoin, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, "", types.EncodingProtobuf) + msg := types.NewMsgTransferWithEncoding(types.PortID, s.pathAToB.EndpointA.ClientID, originalCoin, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, "", types.EncodingProtobuf, false) - resp, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) // message committed + resp, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) // message committed packets, err := ibctesting.ParseIBCV2Packets(channeltypes.EventTypeSendPacket, resp.Events) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Len(packets, 1) - suite.Require().Len(packets[0].Payloads, 1) + s.Require().Len(packets, 1) + s.Require().Len(packets[0].Payloads, 1) payload := packets[0].Payloads[0] - ctx := suite.chainA.GetContext() - cbs := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) + ctx := s.chainA.GetContext() + cbs := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) err = cbs.OnAcknowledgementPacket( - ctx, suite.pathAToB.EndpointA.ClientID, suite.pathAToB.EndpointB.ClientID, - packets[0].Sequence, ack.Acknowledgement(), payload, suite.chainA.SenderAccount.GetAddress(), + ctx, s.pathAToB.EndpointA.ClientID, s.pathAToB.EndpointB.ClientID, + packets[0].Sequence, ack.Acknowledgement(), payload, s.chainA.SenderAccount.GetAddress(), ) - suite.Require().NoError(err) + s.Require().NoError(err) // on successful ack, the tokens sent in packets should still be in escrow - escrowAddress := types.GetEscrowAddress(types.PortID, suite.pathAToB.EndpointA.ClientID) + escrowAddress := types.GetEscrowAddress(types.PortID, s.pathAToB.EndpointA.ClientID) // check that the balance for chainA is updated - chainABalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) // check that module account escrow address has locked the tokens - chainAEscrowBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().Equal(originalCoin, chainAEscrowBalance) + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(originalCoin, chainAEscrowBalance) // create a custom error ack and replay the callback to ensure it fails with IBC v2 callbacks errAck := channeltypes.NewErrorAcknowledgement(types.ErrInvalidAmount) err = cbs.OnAcknowledgementPacket( - ctx, suite.pathAToB.EndpointA.ClientID, suite.pathAToB.EndpointB.ClientID, - 1, errAck.Acknowledgement(), payload, suite.chainA.SenderAccount.GetAddress(), + ctx, s.pathAToB.EndpointA.ClientID, s.pathAToB.EndpointB.ClientID, + 1, errAck.Acknowledgement(), payload, s.chainA.SenderAccount.GetAddress(), ) - suite.Require().Error(err) + s.Require().Error(err) // create the sentinel error ack and replay the callback to ensure the tokens are correctly refunded // we can replay the callback here because the replay protection is handled in the IBC handler err = cbs.OnAcknowledgementPacket( - ctx, suite.pathAToB.EndpointA.ClientID, suite.pathAToB.EndpointB.ClientID, - 1, channeltypesv2.ErrorAcknowledgement[:], payload, suite.chainA.SenderAccount.GetAddress(), + ctx, s.pathAToB.EndpointA.ClientID, s.pathAToB.EndpointB.ClientID, + 1, channeltypesv2.ErrorAcknowledgement[:], payload, s.chainA.SenderAccount.GetAddress(), ) - suite.Require().NoError(err) + s.Require().NoError(err) // on error ack, the tokens sent in packets should be returned to sender // check that the balance for chainA is refunded - chainABalance = suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().Equal(originalBalance.Amount, chainABalance.Amount) + chainABalance = s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount, chainABalance.Amount) // check that module account escrow address has no tokens - chainAEscrowBalance = suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().Equal(sdk.NewCoin(originalCoin.Denom, sdkmath.ZeroInt()), chainAEscrowBalance) + chainAEscrowBalance = s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(sdk.NewCoin(originalCoin.Denom, sdkmath.ZeroInt()), chainAEscrowBalance) }) } } -func (suite *TransferTestSuite) TestOnTimeoutPacket() { +func (s *TransferTestSuite) TestOnTimeoutPacket() { testCases := []struct { name string sourceDenomToTransfer string @@ -371,51 +372,52 @@ func (suite *TransferTestSuite) TestOnTimeoutPacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - originalBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), tc.sourceDenomToTransfer) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) amount, ok := sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) - suite.Require().True(ok) + s.Require().True(ok) originalCoin := sdk.NewCoin(tc.sourceDenomToTransfer, amount) - msg := types.NewMsgTransferWithEncoding(types.PortID, suite.pathAToB.EndpointA.ClientID, originalCoin, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, "", types.EncodingProtobuf) - resp, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) // message committed + msg := types.NewMsgTransferWithEncoding(types.PortID, s.pathAToB.EndpointA.ClientID, originalCoin, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), clienttypes.Height{}, timeoutTimestamp, "", types.EncodingProtobuf, false) + resp, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) // message committed + packets, err := ibctesting.ParseIBCV2Packets(channeltypes.EventTypeSendPacket, resp.Events) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Len(packets, 1) - suite.Require().Len(packets[0].Payloads, 1) + s.Require().Len(packets, 1) + s.Require().Len(packets[0].Payloads, 1) payload := packets[0].Payloads[0] // on successful send, the tokens sent in packets should be in escrow - escrowAddress := types.GetEscrowAddress(types.PortID, suite.pathAToB.EndpointA.ClientID) + escrowAddress := types.GetEscrowAddress(types.PortID, s.pathAToB.EndpointA.ClientID) // check that the balance for chainA is updated - chainABalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) // check that module account escrow address has locked the tokens - chainAEscrowBalance := suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().Equal(originalCoin, chainAEscrowBalance) + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(originalCoin, chainAEscrowBalance) - ctx := suite.chainA.GetContext() - cbs := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) + ctx := s.chainA.GetContext() + cbs := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.Router.Route(ibctesting.TransferPort) - err = cbs.OnTimeoutPacket(ctx, suite.pathAToB.EndpointA.ClientID, suite.pathAToB.EndpointB.ClientID, packets[0].Sequence, payload, suite.chainA.SenderAccount.GetAddress()) - suite.Require().NoError(err) + err = cbs.OnTimeoutPacket(ctx, s.pathAToB.EndpointA.ClientID, s.pathAToB.EndpointB.ClientID, packets[0].Sequence, payload, s.chainA.SenderAccount.GetAddress()) + s.Require().NoError(err) // on timeout, the tokens sent in packets should be returned to sender // check that the balance for chainA is refunded - chainABalance = suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), suite.chainA.SenderAccount.GetAddress(), originalCoin.Denom) - suite.Require().Equal(originalBalance.Amount, chainABalance.Amount) + chainABalance = s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount, chainABalance.Amount) // check that module account escrow address has no tokens - chainAEscrowBalance = suite.chainA.GetSimApp().BankKeeper.GetBalance(suite.chainA.GetContext(), escrowAddress, originalCoin.Denom) - suite.Require().Equal(sdk.NewCoin(originalCoin.Denom, sdkmath.ZeroInt()), chainAEscrowBalance) + chainAEscrowBalance = s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(sdk.NewCoin(originalCoin.Denom, sdkmath.ZeroInt()), chainAEscrowBalance) }) } } diff --git a/modules/apps/transfer/v2/transfer_test.go b/modules/apps/transfer/v2/transfer_test.go new file mode 100644 index 00000000000..e009cb1b8e7 --- /dev/null +++ b/modules/apps/transfer/v2/transfer_test.go @@ -0,0 +1,309 @@ +package v2_test + +import ( + "time" + + sdkmath "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + "github.com/cosmos/ibc-go/v10/testing/mock" + mockv2 "github.com/cosmos/ibc-go/v10/testing/mock/v2" +) + +func (s *TransferTestSuite) TestTransferV2Flow() { + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) + + amount, ok := sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) + s.Require().True(ok) + originalCoin := sdk.NewCoin(sdk.DefaultBondDenom, amount) + + token := types.Token{ + Denom: types.Denom{Base: originalCoin.Denom}, + Amount: originalCoin.Amount.String(), + } + + transferData := types.NewFungibleTokenPacketData(token.Denom.Path(), token.Amount, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), "") + bz := s.chainA.Codec.MustMarshal(&transferData) + payload := channeltypesv2.NewPayload(types.PortID, types.PortID, types.V1, types.EncodingProtobuf, bz) + + // Set a timeout of 1 hour from the current block time on receiver chain + timeout := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + + packet, err := s.pathAToB.EndpointA.MsgSendPacket(timeout, payload) + s.Require().NoError(err) + + err = s.pathAToB.EndpointA.RelayPacket(packet) + s.Require().NoError(err) + + escrowAddress := types.GetEscrowAddress(types.PortID, s.pathAToB.EndpointA.ClientID) + // check that the balance for chainA is updated + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + s.Require().Equal(originalBalance.Amount.Sub(amount).Int64(), chainABalance.Amount.Int64()) + + // check that module account escrow address has locked the tokens + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), escrowAddress, originalCoin.Denom) + s.Require().Equal(originalCoin, chainAEscrowBalance) + + traceAToB := types.NewHop(types.PortID, s.pathAToB.EndpointB.ClientID) + + // check that voucher exists on chain B + chainBDenom := types.NewDenom(originalCoin.Denom, traceAToB) + chainBBalance := s.chainB.GetSimApp().BankKeeper.GetBalance(s.chainB.GetContext(), s.chainB.SenderAccount.GetAddress(), chainBDenom.IBCDenom()) + coinSentFromAToB := sdk.NewCoin(chainBDenom.IBCDenom(), amount) + s.Require().Equal(coinSentFromAToB, chainBBalance) +} + +func (s *TransferTestSuite) TestMultiPayloadTransferV2Flow() { + mockPayload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + mockErrPayload := mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + + var ( + timeout uint64 + payload channeltypesv2.Payload + payloads []channeltypesv2.Payload + ) + + type expResult int + const ( + success expResult = iota + sendError + recvError + ackError + timeoutError + ) + + testCases := []struct { + name string + malleate func() + expRes expResult + }{ + { + name: "success with transfer payloads", + malleate: func() { + payloads = []channeltypesv2.Payload{payload, payload} + }, + expRes: success, + }, + { + name: "success with transfer and mock payloads", + malleate: func() { + payloads = []channeltypesv2.Payload{payload, mockPayload, mockPayload, payload} + }, + expRes: success, + }, + { + name: "send error should revert transfer", + malleate: func() { + // mock the send packet callback to return an error + s.pathAToB.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnSendPacket = func(ctx sdk.Context, sourceChannel, destinationChannel string, sequence uint64, data channeltypesv2.Payload, signer sdk.AccAddress) error { + return mock.MockApplicationCallbackError + } + payloads = []channeltypesv2.Payload{payload, mockPayload, payload} + }, + expRes: sendError, + }, + { + name: "recv error on mock should revert transfer", + malleate: func() { + payloads = []channeltypesv2.Payload{payload, mockPayload, mockErrPayload, payload} + }, + expRes: recvError, + }, + { + name: "ack error on mock should block refund on acknowledgement", + malleate: func() { + // mock the acknowledgement packet callback to return an error + s.pathAToB.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnAcknowledgementPacket = func(ctx sdk.Context, sourceChannel, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, acknowledgement []byte, relayer sdk.AccAddress) error { + return mock.MockApplicationCallbackError + } + payloads = []channeltypesv2.Payload{payload, mockPayload, mockPayload, payload} + }, + expRes: ackError, + }, + { + name: "timeout error on mock should block refund on timeout", + malleate: func() { + // mock the timeout packet callback to return an error + s.pathAToB.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnTimeoutPacket = func(ctx sdk.Context, sourceChannel, destinationChannel string, sequence uint64, payload channeltypesv2.Payload, relayer sdk.AccAddress) error { + return mock.MockApplicationCallbackError + } + // set the timeout to be 1 second from now so that the packet will timeout + timeout = uint64(s.chainB.GetContext().BlockTime().Add(time.Second).Unix()) + payloads = []channeltypesv2.Payload{payload, mockPayload, mockPayload, payload} + }, + expRes: timeoutError, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.SetupTest() // reset + + originalBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(s.chainA.GetContext(), s.chainA.SenderAccount.GetAddress(), sdk.DefaultBondDenom) + + // total amount is the sum of all amounts in the payloads which is always 2 * amount + totalAmount, ok := sdkmath.NewIntFromString("9223372036854775808") // 2^63 (one above int64) + s.Require().True(ok) + amount := totalAmount.QuoRaw(2) // divide by 2 to account for the two payloads + originalCoin := sdk.NewCoin(sdk.DefaultBondDenom, amount) + totalCoin := sdk.NewCoin(originalCoin.Denom, totalAmount) + + token := types.Token{ + Denom: types.Denom{Base: originalCoin.Denom}, + Amount: originalCoin.Amount.String(), + } + + transferData := types.NewFungibleTokenPacketData(token.Denom.Path(), token.Amount, s.chainA.SenderAccount.GetAddress().String(), s.chainB.SenderAccount.GetAddress().String(), "") + bz := s.chainA.Codec.MustMarshal(&transferData) + + payload = channeltypesv2.NewPayload(types.PortID, types.PortID, types.V1, types.EncodingProtobuf, bz) + + escrowAddress := types.GetEscrowAddress(types.PortID, s.pathAToB.EndpointA.ClientID) + + // Set a timeout of 1 hour from the current block time on receiver chain + timeout = uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + + // malleate the test case to set up the payloads + // and modulate test case behavior + tc.malleate() + + packet, sendErr := s.pathAToB.EndpointA.MsgSendPacket(timeout, payloads...) + + if tc.expRes == sendError { + s.Require().Error(sendErr, "expected error when sending packet with send error") + } else { + s.Require().NoError(sendErr, "unexpected error when sending packet") + + // relay the packet + relayErr := s.pathAToB.EndpointA.RelayPacket(packet) + + // relayer should have error in response on ack error and timeout error + // recv error should not return an error since the error is handled as error acknowledgement + if tc.expRes == ackError || tc.expRes == timeoutError { + s.Require().Error(relayErr, "expected error when relaying packet with acknowledgement error or timeout error") + } else { + s.Require().NoError(relayErr, "unexpected error when relaying packet") + } + } + + ctxA := s.pathAToB.EndpointA.Chain.GetContext() + ctxB := s.pathAToB.EndpointB.Chain.GetContext() + + // GET TRANSFER STATE AFTER RELAY FOR TESTING CHECKS + // get account balances after relaying packet + chainABalance := s.chainA.GetSimApp().BankKeeper.GetBalance(ctxA, s.chainA.SenderAccount.GetAddress(), originalCoin.Denom) + chainAEscrowBalance := s.chainA.GetSimApp().BankKeeper.GetBalance(ctxA, escrowAddress, originalCoin.Denom) + + traceAToB := types.NewHop(types.PortID, s.pathAToB.EndpointB.ClientID) + + // get chain B balance for voucer + chainBDenom := types.NewDenom(originalCoin.Denom, traceAToB) + chainBBalance := s.chainB.GetSimApp().BankKeeper.GetBalance(ctxB, s.chainB.SenderAccount.GetAddress(), chainBDenom.IBCDenom()) + + // calculate the expected coin sent from chain A to chain B + coinSentFromAToB := sdk.NewCoin(chainBDenom.IBCDenom(), amount.MulRaw(2)) + + // GET IBC STATE AFTER RELAY FOR TESTING CHECKS + nextSequenceSend, ok := s.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2.GetNextSequenceSend(s.pathAToB.EndpointA.Chain.GetContext(), s.pathAToB.EndpointA.ClientID) + s.Require().True(ok) + + packetCommitment := s.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2.GetPacketCommitment(ctxA, packet.SourceClient, packet.Sequence) + hasReceipt := s.chainB.GetSimApp().IBCKeeper.ChannelKeeperV2.HasPacketReceipt(ctxB, packet.DestinationClient, packet.Sequence) + hasAck := s.chainB.GetSimApp().IBCKeeper.ChannelKeeperV2.HasPacketAcknowledgement(ctxB, packet.DestinationClient, packet.Sequence) + + switch tc.expRes { + case success: + // check transfer state after successful relay + // check that the balance for chainA is updated + s.Require().Equal(originalBalance.Amount.Sub(totalAmount), chainABalance.Amount, "chain A balance should be deducted after successful transfer") + // check that module account escrow address has locked the tokens + s.Require().Equal(totalCoin, chainAEscrowBalance, "escrow balance should be locked after successful transfer") + // check that voucher exists on chain B + s.Require().Equal(coinSentFromAToB, chainBBalance, "voucher balance should be updated after successful transfer") + + // check IBC state after successful relay + s.Require().Equal(uint64(2), nextSequenceSend, "next sequence send was not incremented correctly") + // packet commitment should be cleared + s.Require().Nil(packetCommitment) + + // packet receipt and acknowledgement should be written + s.Require().True(hasReceipt, "packet receipt should exist") + s.Require().True(hasAck, "packet acknowledgement should exist") + case sendError: + // check transfer state after send error + // check that the balance for chainA is unchanged + s.Require().Equal(originalBalance.Amount, chainABalance.Amount, "chain A balance should be unchanged after send error") + // check that module account escrow address has not locked the tokens + s.Require().Equal(sdk.NewCoin(originalCoin.Denom, sdkmath.ZeroInt()), chainAEscrowBalance, "escrow balance should be zero after send error") + // check that voucher does not exist on chain B + s.Require().Equal(sdk.NewCoin(chainBDenom.IBCDenom(), sdkmath.ZeroInt()), chainBBalance, "voucher balance should be zero after send error") + + // check IBC state after send error + s.Require().Equal(uint64(1), nextSequenceSend, "next sequence send should not be incremented after send error") + // packet commitment should not exist + s.Require().Nil(packetCommitment, "packet commitment should not exist after send error") + // packet receipt and acknowledgement should not be written + s.Require().False(hasReceipt, "packet receipt should not exist after send error") + s.Require().False(hasAck, "packet acknowledgement should not exist after send error") + case recvError: + // check transfer state after receive error + // check that the balance for chainA is refunded after error acknowledgement is relayed + s.Require().Equal(originalBalance.Amount, chainABalance.Amount, "chain A balance should be unchanged after receive error") + // check that module account escrow address has reverted the locked tokens + s.Require().Equal(sdk.NewCoin(originalCoin.Denom, sdkmath.ZeroInt()), chainAEscrowBalance, "escrow balance should be reverted after receive error") + // check that voucher does not exist on chain B + s.Require().Equal(sdk.NewCoin(chainBDenom.IBCDenom(), sdkmath.ZeroInt()), chainBBalance, "voucher balance should be zero after receive error") + + // check IBC state after receive error + s.Require().Equal(uint64(2), nextSequenceSend, "next sequence send should be incremented after receive error") + // packet commitment should be cleared + s.Require().Nil(packetCommitment, "packet commitment should be cleared after receive error") + // packet receipt should be written + s.Require().True(hasReceipt, "packet receipt should exist after receive error") + // packet acknowledgement should be written + s.Require().True(hasAck, "packet acknowledgement should exist after receive error") + case ackError: + // check transfer state after acknowledgement error + // check that the balance for chainA is still deducted since acknowledgement failed + s.Require().Equal(originalBalance.Amount.Sub(totalAmount), chainABalance.Amount, "chain A balance should still be deducted after acknowledgement error") + // check that module account escrow address has still locked the tokens + s.Require().Equal(totalCoin, chainAEscrowBalance, "escrow balance should still be locked after acknowledgement error") + // check that voucher does not exist on chain B since receive returned error acknowledgement + s.Require().Equal(sdk.NewCoin(chainBDenom.IBCDenom(), totalAmount), chainBBalance, "voucher balance should be zero after acknowledgement error") + + // check IBC state after acknowledgement error + s.Require().Equal(uint64(2), nextSequenceSend, "next sequence send should be incremented after acknowledgement error") + // packet commitment should not be cleared + s.Require().NotNil(packetCommitment, "packet commitment should not be cleared after acknowledgement error") + // packet receipt should be written + s.Require().True(hasReceipt, "packet receipt should exist after acknowledgement error") + // packet acknowledgement should be written + s.Require().True(hasAck, "packet acknowledgement should exist after acknowledgement error") + case timeoutError: + // check transfer state after acknowledgement error + // check that the balance for chainA is still deducted since acknowledgement failed + s.Require().Equal(originalBalance.Amount.Sub(totalAmount), chainABalance.Amount, "chain A balance should still be deducted after timeout error") + // check that module account escrow address has still locked the tokens + s.Require().Equal(totalCoin, chainAEscrowBalance, "escrow balance should still be locked after timeout error") + // check that voucher does not exist on chain B since receive returned error acknowledgement + s.Require().Equal(sdk.NewCoin(chainBDenom.IBCDenom(), sdkmath.ZeroInt()), chainBBalance, "voucher balance should be zero after timeout error") + + // check IBC state after timeout error + // check IBC state after acknowledgement error + s.Require().Equal(uint64(2), nextSequenceSend, "next sequence send should be incremented after timeout error") + // packet commitment should not be cleared + s.Require().NotNil(packetCommitment, "packet commitment should not be cleared after timeout error") + // packet receipt should not be written + s.Require().False(hasReceipt, "packet receipt should not exist after timeout error") + // packet acknowledgement should not be written + s.Require().False(hasAck, "packet acknowledgement should not exist after timeout error") + + default: + s.T().Fatalf("unexpected expRes: %v", tc.expRes) + } + }) + } +} diff --git a/modules/core/02-client/abci_test.go b/modules/core/02-client/abci_test.go index fb9c8c780ff..72cf2546270 100644 --- a/modules/core/02-client/abci_test.go +++ b/modules/core/02-client/abci_test.go @@ -27,95 +27,95 @@ type ClientTestSuite struct { chainB *ibctesting.TestChain } -func (suite *ClientTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) -} - func TestClientTestSuite(t *testing.T) { testifysuite.Run(t, new(ClientTestSuite)) } -func (suite *ClientTestSuite) TestBeginBlocker() { +func (s *ClientTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + +func (s *ClientTestSuite) TestBeginBlocker() { for range 10 { // increment height - suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + s.coordinator.CommitBlock(s.chainA, s.chainB) - suite.Require().NotPanics(func() { - client.BeginBlocker(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + s.Require().NotPanics(func() { + client.BeginBlocker(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) }, "BeginBlocker shouldn't panic") } } -func (suite *ClientTestSuite) TestBeginBlockerConsensusState() { +func (s *ClientTestSuite) TestBeginBlockerConsensusState() { plan := &upgradetypes.Plan{ Name: "test", - Height: suite.chainA.GetContext().BlockHeight() + 1, + Height: s.chainA.GetContext().BlockHeight() + 1, } // set upgrade plan in the upgrade store - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey)) - bz := suite.chainA.App.AppCodec().MustMarshal(plan) + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(upgradetypes.StoreKey)) + bz := s.chainA.App.AppCodec().MustMarshal(plan) store.Set(upgradetypes.PlanKey(), bz) nextValsHash := []byte("nextValsHash") - newCtx := suite.chainA.GetContext().WithBlockHeader(cmtproto.Header{ - ChainID: suite.chainA.ChainID, - Height: suite.chainA.GetContext().BlockHeight(), + newCtx := s.chainA.GetContext().WithBlockHeader(cmtproto.Header{ + ChainID: s.chainA.ChainID, + Height: s.chainA.GetContext().BlockHeight(), NextValidatorsHash: nextValsHash, }) - err := suite.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state")) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state")) + s.Require().NoError(err) - client.BeginBlocker(newCtx, suite.chainA.App.GetIBCKeeper().ClientKeeper) + client.BeginBlocker(newCtx, s.chainA.App.GetIBCKeeper().ClientKeeper) // plan Height is at ctx.BlockHeight+1 - consState, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedConsensusState(newCtx, plan.Height) - suite.Require().NoError(err) + consState, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradedConsensusState(newCtx, plan.Height) + s.Require().NoError(err) - bz, err = types.MarshalConsensusState(suite.chainA.App.AppCodec(), &ibctm.ConsensusState{Timestamp: newCtx.BlockTime(), NextValidatorsHash: nextValsHash}) - suite.Require().NoError(err) - suite.Require().Equal(bz, consState) + bz, err = types.MarshalConsensusState(s.chainA.App.AppCodec(), &ibctm.ConsensusState{Timestamp: newCtx.BlockTime(), NextValidatorsHash: nextValsHash}) + s.Require().NoError(err) + s.Require().Equal(bz, consState) } -func (suite *ClientTestSuite) TestBeginBlockerUpgradeEvents() { +func (s *ClientTestSuite) TestBeginBlockerUpgradeEvents() { plan := &upgradetypes.Plan{ Name: "test", - Height: suite.chainA.GetContext().BlockHeight() + 1, + Height: s.chainA.GetContext().BlockHeight() + 1, } // set upgrade plan in the upgrade store - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey)) - bz := suite.chainA.App.AppCodec().MustMarshal(plan) + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(upgradetypes.StoreKey)) + bz := s.chainA.App.AppCodec().MustMarshal(plan) store.Set(upgradetypes.PlanKey(), bz) nextValsHash := []byte("nextValsHash") - newCtx := suite.chainA.GetContext().WithBlockHeader(cmtproto.Header{ - Height: suite.chainA.GetContext().BlockHeight(), + newCtx := s.chainA.GetContext().WithBlockHeader(cmtproto.Header{ + Height: s.chainA.GetContext().BlockHeight(), NextValidatorsHash: nextValsHash, }) - err := suite.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state")) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(newCtx, plan.Height, []byte("client state")) + s.Require().NoError(err) - cacheCtx, writeCache := suite.chainA.GetContext().CacheContext() + cacheCtx, writeCache := s.chainA.GetContext().CacheContext() - client.BeginBlocker(cacheCtx, suite.chainA.App.GetIBCKeeper().ClientKeeper) + client.BeginBlocker(cacheCtx, s.chainA.App.GetIBCKeeper().ClientKeeper) writeCache() - suite.requireContainsEvent(cacheCtx.EventManager().Events(), types.EventTypeUpgradeChain, true) + s.requireContainsEvent(cacheCtx.EventManager().Events(), types.EventTypeUpgradeChain, true) } -func (suite *ClientTestSuite) TestBeginBlockerUpgradeEventsAbsence() { - cacheCtx, writeCache := suite.chainA.GetContext().CacheContext() - client.BeginBlocker(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) +func (s *ClientTestSuite) TestBeginBlockerUpgradeEventsAbsence() { + cacheCtx, writeCache := s.chainA.GetContext().CacheContext() + client.BeginBlocker(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) writeCache() - suite.requireContainsEvent(cacheCtx.EventManager().Events(), types.EventTypeUpgradeChain, false) + s.requireContainsEvent(cacheCtx.EventManager().Events(), types.EventTypeUpgradeChain, false) } // requireContainsEvent verifies if an event of a specific type was emitted. -func (suite *ClientTestSuite) requireContainsEvent(events sdk.Events, eventType string, shouldContain bool) { +func (s *ClientTestSuite) requireContainsEvent(events sdk.Events, eventType string, shouldContain bool) { found := false var eventTypes []string for _, e := range events { @@ -126,8 +126,8 @@ func (suite *ClientTestSuite) requireContainsEvent(events sdk.Events, eventType } } if shouldContain { - suite.Require().True(found, "event type %s was not found in %s", eventType, strings.Join(eventTypes, ",")) + s.Require().True(found, "event type %s was not found in %s", eventType, strings.Join(eventTypes, ",")) } else { - suite.Require().False(found, "event type %s was found in %s", eventType, strings.Join(eventTypes, ",")) + s.Require().False(found, "event type %s was found in %s", eventType, strings.Join(eventTypes, ",")) } } diff --git a/modules/core/02-client/client/cli/cli.go b/modules/core/02-client/client/cli/cli.go index f030f0fe730..2078d3c6853 100644 --- a/modules/core/02-client/client/cli/cli.go +++ b/modules/core/02-client/client/cli/cli.go @@ -51,7 +51,6 @@ func NewTxCmd() *cobra.Command { newCreateClientCmd(), newAddCounterpartyCmd(), newUpdateClientCmd(), - newSubmitMisbehaviourCmd(), // Deprecated newUpgradeClientCmd(), newSubmitRecoverClientProposalCmd(), newScheduleIBCUpgradeProposalCmd(), diff --git a/modules/core/02-client/client/cli/tx.go b/modules/core/02-client/client/cli/tx.go index 5c29fe32211..3074c3e9bba 100644 --- a/modules/core/02-client/client/cli/tx.go +++ b/modules/core/02-client/client/cli/tx.go @@ -48,7 +48,6 @@ func newCreateClientCmd() *cobra.Command { var clientState exported.ClientState clientContentOrFileName := args[0] if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil { - // check for file path if JSON input is not provided contents, err := os.ReadFile(clientContentOrFileName) if err != nil { @@ -64,7 +63,6 @@ func newCreateClientCmd() *cobra.Command { var consensusState exported.ConsensusState consensusContentOrFileName := args[1] if err := cdc.UnmarshalInterfaceJSON([]byte(consensusContentOrFileName), &consensusState); err != nil { - // check for file path if JSON input is not provided contents, err := os.ReadFile(consensusContentOrFileName) if err != nil { @@ -201,7 +199,6 @@ func newUpdateClientCmd() *cobra.Command { var clientMsg exported.ClientMessage clientMsgContentOrFileName := args[1] if err := cdc.UnmarshalInterfaceJSON([]byte(clientMsgContentOrFileName), &clientMsg); err != nil { - // check for file path if JSON input is not provided contents, err := os.ReadFile(clientMsgContentOrFileName) if err != nil { @@ -226,53 +223,6 @@ func newUpdateClientCmd() *cobra.Command { return cmd } -// newSubmitMisbehaviourCmd defines the command to submit a misbehaviour to prevent -// future updates. -// Deprecated: NewSubmitMisbehaviourCmd is deprecated and will be removed in a future release. -// Please use NewUpdateClientCmd instead. -func newSubmitMisbehaviourCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "misbehaviour [clientID] [path/to/misbehaviour.json]", - Short: "submit a client misbehaviour", - Long: "submit a client misbehaviour to prevent future updates", - Example: fmt.Sprintf("%s tx ibc %s misbehaviour [clientID] [path/to/misbehaviour.json] --from node0 --home ../node0/cli --chain-id $CID", version.AppName, types.SubModuleName), - Args: cobra.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - clientCtx, err := client.GetClientTxContext(cmd) - if err != nil { - return err - } - cdc := codec.NewProtoCodec(clientCtx.InterfaceRegistry) - - var misbehaviour exported.ClientMessage - clientID := args[0] - misbehaviourContentOrFileName := args[1] - if err := cdc.UnmarshalInterfaceJSON([]byte(misbehaviourContentOrFileName), &misbehaviour); err != nil { - - // check for file path if JSON input is not provided - contents, err := os.ReadFile(misbehaviourContentOrFileName) - if err != nil { - return fmt.Errorf("neither JSON input nor path to .json file for misbehaviour were provided: %w", err) - } - - if err := cdc.UnmarshalInterfaceJSON(contents, &misbehaviour); err != nil { - return fmt.Errorf("error unmarshalling misbehaviour file: %w", err) - } - } - - msg, err := types.NewMsgSubmitMisbehaviour(clientID, misbehaviour, clientCtx.GetFromAddress().String()) - if err != nil { - return err - } - - return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) - }, - } - - flags.AddTxFlagsToCmd(cmd) - return cmd -} - // newUpgradeClientCmd defines the command to upgrade an IBC light client. func newUpgradeClientCmd() *cobra.Command { cmd := &cobra.Command{ @@ -295,7 +245,6 @@ func newUpgradeClientCmd() *cobra.Command { var clientState exported.ClientState clientContentOrFileName := args[1] if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil { - // check for file path if JSON input is not provided contents, err := os.ReadFile(clientContentOrFileName) if err != nil { @@ -311,7 +260,6 @@ func newUpgradeClientCmd() *cobra.Command { var consensusState exported.ConsensusState consensusContentOrFileName := args[2] if err := cdc.UnmarshalInterfaceJSON([]byte(consensusContentOrFileName), &consensusState); err != nil { - // check for file path if JSON input is not provided contents, err := os.ReadFile(consensusContentOrFileName) if err != nil { @@ -495,7 +443,6 @@ func newScheduleIBCUpgradeProposalCmd() *cobra.Command { var clientState exported.ClientState clientContentOrFileName := args[2] if err := cdc.UnmarshalInterfaceJSON([]byte(clientContentOrFileName), &clientState); err != nil { - // check for file path if JSON input is not provided contents, err := os.ReadFile(clientContentOrFileName) if err != nil { diff --git a/modules/core/02-client/genesis.go b/modules/core/02-client/genesis.go index ce3ca7d526b..3f15db5c916 100644 --- a/modules/core/02-client/genesis.go +++ b/modules/core/02-client/genesis.go @@ -15,7 +15,7 @@ import ( // state. func InitGenesis(ctx sdk.Context, k *keeper.Keeper, gs types.GenesisState) { if err := gs.Params.Validate(); err != nil { - panic(fmt.Errorf("invalid ibc client genesis state parameters: %v", err)) + panic(fmt.Errorf("invalid ibc client genesis state parameters: %w", err)) } k.SetParams(ctx, gs.Params) diff --git a/modules/core/02-client/keeper/client_test.go b/modules/core/02-client/keeper/client_test.go index 9153ddb75f0..ebfd8c2d1c0 100644 --- a/modules/core/02-client/keeper/client_test.go +++ b/modules/core/02-client/keeper/client_test.go @@ -21,7 +21,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestCreateClient() { +func (s *KeeperTestSuite) TestCreateClient() { var ( clientState []byte consensusState []byte @@ -37,8 +37,8 @@ func (suite *KeeperTestSuite) TestCreateClient() { "success: 07-tendermint client type supported", func() { tmClientState := ibctm.NewClientState(testChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) - clientState = suite.chainA.App.AppCodec().MustMarshal(tmClientState) - consensusState = suite.chainA.App.AppCodec().MustMarshal(suite.consensusState) + clientState = s.chainA.App.AppCodec().MustMarshal(tmClientState) + consensusState = s.chainA.App.AppCodec().MustMarshal(s.consensusState) }, exported.Tendermint, nil, @@ -48,8 +48,8 @@ func (suite *KeeperTestSuite) TestCreateClient() { func() { tmClientState := ibctm.NewClientState(testChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) tmClientState.FrozenHeight = ibctm.FrozenHeight - clientState = suite.chainA.App.AppCodec().MustMarshal(tmClientState) - consensusState = suite.chainA.App.AppCodec().MustMarshal(suite.consensusState) + clientState = s.chainA.App.AppCodec().MustMarshal(tmClientState) + consensusState = s.chainA.App.AppCodec().MustMarshal(s.consensusState) }, exported.Tendermint, errorsmod.Wrapf(clienttypes.ErrClientNotActive, "cannot create client (07-tendermint-0) with status Frozen"), @@ -57,10 +57,10 @@ func (suite *KeeperTestSuite) TestCreateClient() { { "success: 06-solomachine client type supported", func() { - smClientState := solomachine.NewClientState(1, &solomachine.ConsensusState{PublicKey: suite.solomachine.ConsensusState().PublicKey, Diversifier: suite.solomachine.Diversifier, Timestamp: suite.solomachine.Time}) - smConsensusState := &solomachine.ConsensusState{PublicKey: suite.solomachine.ConsensusState().PublicKey, Diversifier: suite.solomachine.Diversifier, Timestamp: suite.solomachine.Time} - clientState = suite.chainA.App.AppCodec().MustMarshal(smClientState) - consensusState = suite.chainA.App.AppCodec().MustMarshal(smConsensusState) + smClientState := solomachine.NewClientState(1, &solomachine.ConsensusState{PublicKey: s.solomachine.ConsensusState().PublicKey, Diversifier: s.solomachine.Diversifier, Timestamp: s.solomachine.Time}) + smConsensusState := &solomachine.ConsensusState{PublicKey: s.solomachine.ConsensusState().PublicKey, Diversifier: s.solomachine.Diversifier, Timestamp: s.solomachine.Time} + clientState = s.chainA.App.AppCodec().MustMarshal(smClientState) + consensusState = s.chainA.App.AppCodec().MustMarshal(smConsensusState) }, exported.Solomachine, nil, @@ -74,50 +74,50 @@ func (suite *KeeperTestSuite) TestCreateClient() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset clientState, consensusState = []byte{}, []byte{} tc.malleate() - clientID, err := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.CreateClient(suite.chainA.GetContext(), tc.clientType, clientState, consensusState) + clientID, err := s.chainA.GetSimApp().IBCKeeper.ClientKeeper.CreateClient(s.chainA.GetContext(), tc.clientType, clientState, consensusState) // assert correct behaviour based on expected error - clientState, found := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), clientID) + clientState, found := s.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetClientState(s.chainA.GetContext(), clientID) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotEmpty(clientID) - suite.Require().True(found) - suite.Require().NotEmpty(clientState) + s.Require().NoError(err) + s.Require().NotEmpty(clientID) + s.Require().True(found) + s.Require().NotEmpty(clientState) } else { - suite.Require().Error(err) - suite.Require().Empty(clientID) - suite.Require().False(found) - suite.Require().Empty(clientState) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().Empty(clientID) + s.Require().False(found) + s.Require().Empty(clientState) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestUpdateClientTendermint() { +func (s *KeeperTestSuite) TestUpdateClientTendermint() { var ( path *ibctesting.Path updateHeader *ibctm.Header ) - // Must create header creation functions since suite.header gets recreated on each test case + // Must create header creation functions since s.header gets recreated on each test case createFutureUpdateFn := func(trustedHeight clienttypes.Height) *ibctm.Header { header, err := path.EndpointB.Chain.IBCClientHeader(path.EndpointB.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) return header } createPastUpdateFn := func(fillHeight, trustedHeight clienttypes.Height) *ibctm.Header { - consState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, trustedHeight) - suite.Require().True(found) + consState, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(s.chainA.GetContext(), path.EndpointA.ClientID, trustedHeight) + s.Require().True(found) - return suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(fillHeight.RevisionHeight), trustedHeight, consState.(*ibctm.ConsensusState).Timestamp.Add(time.Second*5), - suite.chainB.Vals, suite.chainB.Vals, suite.chainB.Vals, suite.chainB.Signers) + return s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(fillHeight.RevisionHeight), trustedHeight, consState.(*ibctm.ConsensusState).Timestamp.Add(time.Second*5), + s.chainB.Vals, s.chainB.Vals, s.chainB.Vals, s.chainB.Signers) } cases := []struct { @@ -131,28 +131,28 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { // store intermediate consensus state to check that trustedHeight does not need to be highest consensus state before header height err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) updateHeader = createFutureUpdateFn(trustedHeight.(clienttypes.Height)) }, nil, false}, {"valid past update", func() { trustedHeight := path.EndpointA.GetClientLatestHeight() - currHeight := suite.chainB.ProposedHeader.Height + currHeight := s.chainB.ProposedHeader.Height fillHeight := clienttypes.NewHeight(trustedHeight.GetRevisionNumber(), uint64(currHeight)) // commit a couple blocks to allow client to fill in gaps - suite.coordinator.CommitBlock(suite.chainB) // this height is not filled in yet - suite.coordinator.CommitBlock(suite.chainB) // this height is filled in by the update below + s.coordinator.CommitBlock(s.chainB) // this height is not filled in yet + s.coordinator.CommitBlock(s.chainB) // this height is filled in by the update below err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // ensure fill height not set - _, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, fillHeight) - suite.Require().False(found) + _, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(s.chainA.GetContext(), path.EndpointA.ClientID, fillHeight) + s.Require().False(found) - // updateHeader will fill in consensus state between prevConsState and suite.consState + // updateHeader will fill in consensus state between prevConsState and s.consState // clientState should not be updated updateHeader = createPastUpdateFn(fillHeight, trustedHeight.(clienttypes.Height)) }, nil, false}, @@ -161,16 +161,16 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { // store previous consensus state prevConsState := &ibctm.ConsensusState{ - Timestamp: suite.past, - NextValidatorsHash: suite.chainB.Vals.Hash(), + Timestamp: s.past, + NextValidatorsHash: s.chainB.Vals.Hash(), } path.EndpointA.SetConsensusState(prevConsState, height1) height5 := clienttypes.NewHeight(1, 5) // store next consensus state to check that trustedHeight does not need to be highest consensus state before header height nextConsState := &ibctm.ConsensusState{ - Timestamp: suite.past.Add(time.Minute), - NextValidatorsHash: suite.chainB.Vals.Hash(), + Timestamp: s.past.Add(time.Minute), + NextValidatorsHash: s.chainB.Vals.Hash(), } path.EndpointA.SetConsensusState(nextConsState, height5) @@ -180,7 +180,7 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { path.EndpointA.SetClientState(clientState) height3 := clienttypes.NewHeight(1, 3) - // updateHeader will fill in consensus state between prevConsState and suite.consState + // updateHeader will fill in consensus state between prevConsState and s.consState // clientState should not be updated updateHeader = createPastUpdateFn(height3, height1) // set updateHeader's consensus state in store to create duplicate UpdateClient scenario @@ -192,55 +192,55 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { height1 := clienttypes.NewHeight(1, 1) // store previous consensus state prevConsState := &ibctm.ConsensusState{ - Timestamp: suite.past, - NextValidatorsHash: suite.chainB.Vals.Hash(), + Timestamp: s.past, + NextValidatorsHash: s.chainB.Vals.Hash(), } - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, height1, prevConsState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientID, height1, prevConsState) height5 := clienttypes.NewHeight(1, 5) // store next consensus state to check that trustedHeight does not need to be highest consensus state before header height nextConsState := &ibctm.ConsensusState{ - Timestamp: suite.past.Add(time.Minute), - NextValidatorsHash: suite.chainB.Vals.Hash(), + Timestamp: s.past.Add(time.Minute), + NextValidatorsHash: s.chainB.Vals.Hash(), } - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, height5, nextConsState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientID, height5, nextConsState) height3 := clienttypes.NewHeight(1, 3) - // updateHeader will fill in consensus state between prevConsState and suite.consState + // updateHeader will fill in consensus state between prevConsState and s.consState // clientState should not be updated updateHeader = createPastUpdateFn(height3, height1) // set conflicting consensus state in store to create misbehaviour scenario conflictConsState := updateHeader.ConsensusState() conflictConsState.Root = commitmenttypes.NewMerkleRoot([]byte("conflicting apphash")) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, updateHeader.GetHeight(), conflictConsState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientID, updateHeader.GetHeight(), conflictConsState) }, nil, true}, {"misbehaviour detection: monotonic time violation", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientID := path.EndpointA.ClientID trustedHeight := clientState.LatestHeight // store intermediate consensus state at a time greater than updateHeader time // this will break time monotonicity incrementedClientHeight, ok := clientState.LatestHeight.Increment().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) intermediateConsState := &ibctm.ConsensusState{ - Timestamp: suite.coordinator.CurrentTime.Add(2 * time.Hour), - NextValidatorsHash: suite.chainB.Vals.Hash(), + Timestamp: s.coordinator.CurrentTime.Add(2 * time.Hour), + NextValidatorsHash: s.chainB.Vals.Hash(), } - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientID, incrementedClientHeight, intermediateConsState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientID, incrementedClientHeight, intermediateConsState) // set iteration key - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) ibctm.SetIterationKey(clientStore, incrementedClientHeight) clientState.LatestHeight = incrementedClientHeight - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) updateHeader = createFutureUpdateFn(trustedHeight) }, nil, true}, {"client state not found", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) updateHeader = createFutureUpdateFn(clientState.LatestHeight) path.EndpointA.ClientID = ibctesting.InvalidID @@ -248,33 +248,33 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { {"consensus state not found", func() { clientState := path.EndpointA.GetClientState() tmClient, ok := clientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClient.LatestHeight, ok = tmClient.LatestHeight.Increment().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, clientState) updateHeader = createFutureUpdateFn(tmClient.LatestHeight) }, errorsmod.Wrapf(clienttypes.ErrClientNotActive, "cannot update client (07-tendermint-0) with status Expired"), false}, {"client is not active", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(1, 1) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, clientState) updateHeader = createFutureUpdateFn(clientState.LatestHeight) }, errorsmod.Wrapf(clienttypes.ErrClientNotActive, "cannot update client (07-tendermint-0) with status Frozen"), false}, {"invalid header", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) updateHeader = createFutureUpdateFn(clientState.LatestHeight) updateHeader.TrustedHeight, ok = updateHeader.TrustedHeight.Increment().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) }, errorsmod.Wrapf(clienttypes.ErrConsensusStateNotFound, "could not get trusted consensus state from clientStore for Header at TrustedHeight: 1-3"), false}, } for _, tc := range cases { - suite.Run(fmt.Sprintf("Case %s", tc.name), func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.name), func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tc.malleate() @@ -283,19 +283,19 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { var ok bool if tc.expErr == nil { clientState, ok = path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) } - err := suite.chainA.App.GetIBCKeeper().ClientKeeper.UpdateClient(suite.chainA.GetContext(), path.EndpointA.ClientID, updateHeader) + err := s.chainA.App.GetIBCKeeper().ClientKeeper.UpdateClient(s.chainA.GetContext(), path.EndpointA.ClientID, updateHeader) if tc.expErr == nil { - suite.Require().NoError(err, err) + s.Require().NoError(err, err) newClientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) if tc.expFreeze { - suite.Require().True(!newClientState.FrozenHeight.IsZero(), "client did not freeze after conflicting header was submitted to UpdateClient") + s.Require().True(!newClientState.FrozenHeight.IsZero(), "client did not freeze after conflicting header was submitted to UpdateClient") } else { expConsensusState := &ibctm.ConsensusState{ Timestamp: updateHeader.GetTime(), @@ -303,30 +303,30 @@ func (suite *KeeperTestSuite) TestUpdateClientTendermint() { NextValidatorsHash: updateHeader.Header.NextValidatorsHash, } - consensusState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, updateHeader.GetHeight()) - suite.Require().True(found) + consensusState, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(s.chainA.GetContext(), path.EndpointA.ClientID, updateHeader.GetHeight()) + s.Require().True(found) // Determine if clientState should be updated or not if updateHeader.GetHeight().GT(clientState.LatestHeight) { // Header Height is greater than clientState latest Height, clientState should be updated with header.GetHeight() - suite.Require().Equal(updateHeader.GetHeight(), newClientState.LatestHeight, "clientstate height did not update") + s.Require().Equal(updateHeader.GetHeight(), newClientState.LatestHeight, "clientstate height did not update") } else { // Update will add past consensus state, clientState should not be updated at all - suite.Require().Equal(clientState.LatestHeight, newClientState.LatestHeight, "client state height updated for past header") + s.Require().Equal(clientState.LatestHeight, newClientState.LatestHeight, "client state height updated for past header") } - suite.Require().NoError(err) - suite.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name) + s.Require().NoError(err) + s.Require().Equal(expConsensusState, consensusState, "consensus state should have been updated on case %s", tc.name) } } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestUpgradeClient() { +func (s *KeeperTestSuite) TestUpgradeClient() { var ( path *ibctesting.Path upgradedClient *ibctm.ClientState @@ -345,26 +345,26 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { name: "successful upgrade", setup: func() { // upgrade Height is at next block - upgradeHeight = clienttypes.NewHeight(1, uint64(suite.chainB.GetContext().BlockHeight()+1)) + upgradeHeight = clienttypes.NewHeight(1, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - err := suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedClientAny)) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedConsStateAny)) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedClientAny)) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedConsStateAny)) + s.Require().NoError(err) // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: nil, }, @@ -372,27 +372,27 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { name: "client state not found", setup: func() { // upgrade height is at next block - upgradeHeight = clienttypes.NewHeight(1, uint64(suite.chainB.GetContext().BlockHeight()+1)) + upgradeHeight = clienttypes.NewHeight(1, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - err := suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedClientAny)) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedConsStateAny)) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedClientAny)) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedConsStateAny)) + s.Require().NoError(err) // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) path.EndpointA.ClientID = "wrongclientid" }, @@ -404,32 +404,32 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { // client is frozen // upgrade height is at next block - upgradeHeight = clienttypes.NewHeight(1, uint64(suite.chainB.GetContext().BlockHeight()+1)) + upgradeHeight = clienttypes.NewHeight(1, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - err := suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedClientAny)) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedConsStateAny)) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedClientAny)) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedConsStateAny)) + s.Require().NoError(err) // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) // set frozen client in store tmClient, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClient.FrozenHeight = clienttypes.NewHeight(1, 1) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmClient) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, tmClient) }, expErr: errorsmod.Wrap(clienttypes.ErrClientNotActive, "cannot upgrade client (07-tendermint-2) with status Frozen"), }, @@ -437,102 +437,102 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { name: "light client module VerifyUpgradeAndUpdateState fails", setup: func() { // upgrade height is at next block - upgradeHeight = clienttypes.NewHeight(1, uint64(suite.chainB.GetContext().BlockHeight()+1)) + upgradeHeight = clienttypes.NewHeight(1, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - err := suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedClientAny)) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedConsStateAny)) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedClientAny)) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedConsStateAny)) + s.Require().NoError(err) // change upgradedClient client-specified parameters upgradedClient.ChainId = "wrongchainID" upgradedClientAny, err = codectypes.NewAnyWithValue(upgradedClient) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed to verify membership proof at index 0: provided value doesn't match proof"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) revisionNumber := clienttypes.ParseChainID(clientState.ChainId) newChainID, err := clienttypes.SetRevisionNumber(clientState.ChainId, revisionNumber+1) - suite.Require().NoError(err) + s.Require().NoError(err) upgradedClient = ibctm.NewClientState(newChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, clienttypes.NewHeight(revisionNumber+1, clientState.LatestHeight.GetRevisionHeight()+1), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) upgradedClient = upgradedClient.ZeroCustomFields() upgradedClientAny, err = codectypes.NewAnyWithValue(upgradedClient) - suite.Require().NoError(err) + s.Require().NoError(err) upgradedConsState = &ibctm.ConsensusState{NextValidatorsHash: []byte("nextValsHash")} upgradedConsStateAny, err = codectypes.NewAnyWithValue(upgradedConsState) - suite.Require().NoError(err) + s.Require().NoError(err) tc.setup() - err = suite.chainA.App.GetIBCKeeper().ClientKeeper.UpgradeClient(suite.chainA.GetContext(), path.EndpointA.ClientID, upgradedClientAny.Value, upgradedConsStateAny.Value, upgradedClientProof, upgradedConsensusStateProof) + err = s.chainA.App.GetIBCKeeper().ClientKeeper.UpgradeClient(s.chainA.GetContext(), path.EndpointA.ClientID, upgradedClientAny.Value, upgradedConsStateAny.Value, upgradedClientProof, upgradedConsensusStateProof) if tc.expErr == nil { - suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) + s.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) } else { - suite.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, "verify upgrade passed on invalid case: %s", tc.name) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestUpdateClientEventEmission() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestUpdateClientEventEmission() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tmClientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) trustedHeight := tmClientState.LatestHeight header, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) msg, err := clienttypes.NewMsgUpdateClient( path.EndpointA.ClientID, header, - suite.chainA.SenderAccount.GetAddress().String(), + s.chainA.SenderAccount.GetAddress().String(), ) - suite.Require().NoError(err) + s.Require().NoError(err) - result, err := suite.chainA.SendMsgs(msg) + result, err := s.chainA.SendMsgs(msg) // check that update client event was emitted - suite.Require().NoError(err) + s.Require().NoError(err) var event abci.Event for _, e := range result.Events { if e.Type == clienttypes.EventTypeUpdateClient { event = e } } - suite.Require().NotNil(event) + s.Require().NotNil(event) } -func (suite *KeeperTestSuite) TestRecoverClient() { +func (s *KeeperTestSuite) TestRecoverClient() { var ( subject, substitute string subjectClientState, substituteClientState exported.ClientState @@ -552,18 +552,18 @@ func (suite *KeeperTestSuite) TestRecoverClient() { "success, subject and substitute use different revision number", func() { tmClientState, ok := substituteClientState.(*ibctm.ClientState) - suite.Require().True(ok) - consState, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight) - suite.Require().True(found) + s.Require().True(ok) + consState, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(s.chainA.GetContext(), substitute, tmClientState.LatestHeight) + s.Require().True(found) newRevisionNumber := tmClientState.LatestHeight.GetRevisionNumber() + 1 tmClientState.LatestHeight = clienttypes.NewHeight(newRevisionNumber, tmClientState.LatestHeight.GetRevisionHeight()) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState) - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitute) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), substitute, tmClientState.LatestHeight, consState) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), substitute) ibctm.SetProcessedTime(clientStore, tmClientState.LatestHeight, 100) ibctm.SetProcessedHeight(clientStore, tmClientState.LatestHeight, clienttypes.NewHeight(0, 1)) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), substitute, tmClientState) }, nil, }, @@ -578,10 +578,10 @@ func (suite *KeeperTestSuite) TestRecoverClient() { "subject is Active", func() { tmClientState, ok := subjectClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // Set FrozenHeight to zero to ensure client is reported as Active tmClientState.FrozenHeight = clienttypes.ZeroHeight() - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), subject, tmClientState) }, clienttypes.ErrInvalidRecoveryClient, }, @@ -596,9 +596,9 @@ func (suite *KeeperTestSuite) TestRecoverClient() { "subject and substitute have equal latest height", func() { tmClientState, ok := subjectClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.LatestHeight = substituteClientState.(*ibctm.ClientState).LatestHeight - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), subject, tmClientState) }, clienttypes.ErrInvalidHeight, }, @@ -606,10 +606,10 @@ func (suite *KeeperTestSuite) TestRecoverClient() { "subject height is greater than substitute height", func() { tmClientState, ok := subjectClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.LatestHeight, ok = substituteClientState.(*ibctm.ClientState).LatestHeight.Increment().(clienttypes.Height) - suite.Require().True(ok) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + s.Require().True(ok) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), subject, tmClientState) }, clienttypes.ErrInvalidHeight, }, @@ -617,9 +617,9 @@ func (suite *KeeperTestSuite) TestRecoverClient() { "substitute is frozen", func() { tmClientState, ok := substituteClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.FrozenHeight = clienttypes.NewHeight(0, 1) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), substitute, tmClientState) }, clienttypes.ErrClientNotActive, }, @@ -627,46 +627,46 @@ func (suite *KeeperTestSuite) TestRecoverClient() { "light client module RecoverClient fails, substitute client trust level doesn't match subject client trust level", func() { tmClientState, ok := substituteClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.UnbondingPeriod += time.Minute - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitute, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), substitute, tmClientState) }, clienttypes.ErrInvalidSubstitute, }, } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset - subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) + subjectPath := ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() subject = subjectPath.EndpointA.ClientID - subjectClientState = suite.chainA.GetClientState(subject) + subjectClientState = s.chainA.GetClientState(subject) - substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath := ibctesting.NewPath(s.chainA, s.chainB) substitutePath.SetupClients() substitute = substitutePath.EndpointA.ClientID // update substitute twice err := substitutePath.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) err = substitutePath.EndpointA.UpdateClient() - suite.Require().NoError(err) - substituteClientState = suite.chainA.GetClientState(substitute) + s.Require().NoError(err) + substituteClientState = s.chainA.GetClientState(substitute) tmClientState, ok := subjectClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.FrozenHeight = tmClientState.LatestHeight - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), subject, tmClientState) tc.malleate() - ctx := suite.chainA.GetContext() - err = suite.chainA.App.GetIBCKeeper().ClientKeeper.RecoverClient(ctx, subject, substitute) + ctx := s.chainA.GetContext() + err = s.chainA.App.GetIBCKeeper().ClientKeeper.RecoverClient(ctx, subject, substitute) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) expectedEvents := sdk.Events{ sdk.NewEvent( @@ -677,16 +677,15 @@ func (suite *KeeperTestSuite) TestRecoverClient() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) // Assert that client status is now Active - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) - suite.Require().NoError(err) - suite.Require().Equal(lightClientModule.Status(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID), exported.Active) - + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), subjectPath.EndpointA.ClientID) + s.Require().NoError(err) + s.Require().Equal(lightClientModule.Status(s.chainA.GetContext(), subjectPath.EndpointA.ClientID), exported.Active) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/02-client/keeper/events_test.go b/modules/core/02-client/keeper/events_test.go index 8b560be86fd..1f55e672abf 100644 --- a/modules/core/02-client/keeper/events_test.go +++ b/modules/core/02-client/keeper/events_test.go @@ -9,17 +9,17 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestMsgCreateClientEvents() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestMsgCreateClientEvents() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) path.EndpointA.Counterparty.Chain.NextBlock() tmConfig, ok := path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig) - suite.Require().True(ok) + s.Require().True(ok) height, ok := path.EndpointA.Counterparty.Chain.LatestCommittedHeader.GetHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientState := ibctm.NewClientState( path.EndpointA.Counterparty.Chain.ChainID, tmConfig.TrustLevel, tmConfig.TrustingPeriod, tmConfig.UnbondingPeriod, tmConfig.MaxClockDrift, @@ -29,11 +29,11 @@ func (suite *KeeperTestSuite) TestMsgCreateClientEvents() { msg, err := clienttypes.NewMsgCreateClient( clientState, consensusState, path.EndpointA.Chain.SenderAccount.GetAddress().String(), ) - suite.Require().NoError(err) + s.Require().NoError(err) - res, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + res, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) + s.Require().NotNil(res) events := res.Events expectedEvents := sdk.Events{ @@ -47,35 +47,35 @@ func (suite *KeeperTestSuite) TestMsgCreateClientEvents() { var indexSet map[string]struct{} expectedEvents = sdk.MarkEventsToIndex(expectedEvents, indexSet) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, events) + ibctesting.AssertEvents(&s.Suite, expectedEvents, events) } -func (suite *KeeperTestSuite) TestMsgUpdateClientEvents() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestMsgUpdateClientEvents() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) - suite.Require().NoError(path.EndpointA.CreateClient()) + s.Require().NoError(path.EndpointA.CreateClient()) - suite.chainB.Coordinator.CommitBlock(suite.chainB) + s.chainB.Coordinator.CommitBlock(s.chainB) clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) trustedHeight := clientState.LatestHeight - header, err := suite.chainB.IBCClientHeader(suite.chainB.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) - suite.Require().NotNil(header) + header, err := s.chainB.IBCClientHeader(s.chainB.LatestCommittedHeader, trustedHeight) + s.Require().NoError(err) + s.Require().NotNil(header) msg, err := clienttypes.NewMsgUpdateClient( ibctesting.FirstClientID, header, path.EndpointA.Chain.SenderAccount.GetAddress().String(), ) - suite.Require().NoError(err) + s.Require().NoError(err) - res, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + res, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) + s.Require().NotNil(res) events := res.Events expectedEvents := sdk.Events{ @@ -90,5 +90,5 @@ func (suite *KeeperTestSuite) TestMsgUpdateClientEvents() { var indexSet map[string]struct{} expectedEvents = sdk.MarkEventsToIndex(expectedEvents, indexSet) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, events) + ibctesting.AssertEvents(&s.Suite, expectedEvents, events) } diff --git a/modules/core/02-client/keeper/grpc_query_test.go b/modules/core/02-client/keeper/grpc_query_test.go index 0254617c560..97c18d49d10 100644 --- a/modules/core/02-client/keeper/grpc_query_test.go +++ b/modules/core/02-client/keeper/grpc_query_test.go @@ -23,7 +23,7 @@ import ( "github.com/cosmos/ibc-go/v10/testing/mock" ) -func (suite *KeeperTestSuite) TestQueryClientState() { +func (s *KeeperTestSuite) TestQueryClientState() { var ( req *types.QueryClientStateRequest expClientState *codectypes.Any @@ -63,12 +63,12 @@ func (suite *KeeperTestSuite) TestQueryClientState() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() var err error expClientState, err = types.PackClientState(path.EndpointA.GetClientState()) - suite.Require().NoError(err) + s.Require().NoError(err) req = &types.QueryClientStateRequest{ ClientId: path.EndpointA.ClientID, @@ -79,32 +79,32 @@ func (suite *KeeperTestSuite) TestQueryClientState() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ClientState(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expClientState, res.ClientState) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expClientState, res.ClientState) // ensure UnpackInterfaces is defined cachedValue := res.ClientState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryClientStates() { +func (s *KeeperTestSuite) TestQueryClientStates() { var ( req *types.QueryClientStatesRequest expClientStates = types.IdentifiedClientStates{} @@ -133,10 +133,10 @@ func (suite *KeeperTestSuite) TestQueryClientStates() { { "success", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupClients() - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) path2.SetupClients() clientStateA1 := path1.EndpointA.GetClientState() @@ -159,27 +159,27 @@ func (suite *KeeperTestSuite) TestQueryClientStates() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ClientStates(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expClientStates.Sort(), res.ClientStates) - suite.Require().Equal(len(expClientStates), int(res.Pagination.Total)) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expClientStates.Sort(), res.ClientStates) + s.Require().Equal(len(expClientStates), int(res.Pagination.Total)) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConsensusState() { +func (s *KeeperTestSuite) TestQueryConsensusState() { var ( req *types.QueryConsensusStateRequest expConsensusState *codectypes.Any @@ -229,13 +229,13 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() { { "success latest height", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() cs := path.EndpointA.GetConsensusState(path.EndpointA.GetClientLatestHeight()) var err error expConsensusState, err = types.PackConsensusState(cs) - suite.Require().NoError(err) + s.Require().NoError(err) req = &types.QueryConsensusStateRequest{ ClientId: path.EndpointA.ClientID, @@ -247,18 +247,18 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() { { "success with height", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() height := path.EndpointA.GetClientLatestHeight() cs := path.EndpointA.GetConsensusState(height) var err error expConsensusState, err = types.PackConsensusState(cs) - suite.Require().NoError(err) + s.Require().NoError(err) // update client to new height err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) req = &types.QueryConsensusStateRequest{ ClientId: path.EndpointA.ClientID, @@ -271,31 +271,31 @@ func (suite *KeeperTestSuite) TestQueryConsensusState() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ConsensusState(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expConsensusState, res.ConsensusState) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expConsensusState, res.ConsensusState) // ensure UnpackInterfaces is defined cachedValue := res.ConsensusState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConsensusStates() { +func (s *KeeperTestSuite) TestQueryConsensusStates() { var ( req *types.QueryConsensusStatesRequest expConsensusStates []types.ConsensusStateWithHeight @@ -331,11 +331,11 @@ func (suite *KeeperTestSuite) TestQueryConsensusStates() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() height1, ok := path.EndpointA.GetClientLatestHeight().(types.Height) - suite.Require().True(ok) + s.Require().True(ok) expConsensusStates = append( expConsensusStates, types.NewConsensusStateWithHeight( @@ -344,9 +344,9 @@ func (suite *KeeperTestSuite) TestQueryConsensusStates() { )) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height2, ok := path.EndpointA.GetClientLatestHeight().(types.Height) - suite.Require().True(ok) + s.Require().True(ok) expConsensusStates = append( expConsensusStates, types.NewConsensusStateWithHeight( @@ -374,34 +374,34 @@ func (suite *KeeperTestSuite) TestQueryConsensusStates() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ConsensusStates(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(len(expConsensusStates), len(res.ConsensusStates)) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(len(expConsensusStates), len(res.ConsensusStates)) for i := range expConsensusStates { - suite.Require().NotNil(res.ConsensusStates[i]) - suite.Require().Equal(expConsensusStates[i], res.ConsensusStates[i]) + s.Require().NotNil(res.ConsensusStates[i]) + s.Require().Equal(expConsensusStates[i], res.ConsensusStates[i]) // ensure UnpackInterfaces is defined cachedValue := res.ConsensusStates[i].ConsensusState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConsensusStateHeights() { +func (s *KeeperTestSuite) TestQueryConsensusStateHeights() { var ( req *types.QueryConsensusStateHeightsRequest expConsensusStateHeights []types.Height @@ -437,13 +437,13 @@ func (suite *KeeperTestSuite) TestQueryConsensusStateHeights() { { "success: returns consensus heights", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() expConsensusStateHeights = append(expConsensusStateHeights, path.EndpointA.GetClientLatestHeight().(types.Height)) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) expConsensusStateHeights = append(expConsensusStateHeights, path.EndpointA.GetClientLatestHeight().(types.Height)) @@ -467,31 +467,31 @@ func (suite *KeeperTestSuite) TestQueryConsensusStateHeights() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ConsensusStateHeights(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(len(expConsensusStateHeights), len(res.ConsensusStateHeights)) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(len(expConsensusStateHeights), len(res.ConsensusStateHeights)) for i := range expConsensusStateHeights { - suite.Require().NotNil(res.ConsensusStateHeights[i]) - suite.Require().Equal(expConsensusStateHeights[i], res.ConsensusStateHeights[i]) + s.Require().NotNil(res.ConsensusStateHeights[i]) + s.Require().Equal(expConsensusStateHeights[i], res.ConsensusStateHeights[i]) } } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryClientStatus() { +func (s *KeeperTestSuite) TestQueryClientStatus() { var req *types.QueryClientStatusRequest testCases := []struct { @@ -526,7 +526,7 @@ func (suite *KeeperTestSuite) TestQueryClientStatus() { { "Active client status", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() req = &types.QueryClientStatusRequest{ ClientId: path.EndpointA.ClientID, @@ -537,14 +537,14 @@ func (suite *KeeperTestSuite) TestQueryClientStatus() { { "Unknown client status", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // increment latest height so no consensus state is stored clientState.LatestHeight, ok = clientState.LatestHeight.Increment().(types.Height) - suite.Require().True(ok) + s.Require().True(ok) path.EndpointA.SetClientState(clientState) req = &types.QueryClientStatusRequest{ @@ -556,10 +556,10 @@ func (suite *KeeperTestSuite) TestQueryClientStatus() { { "Frozen client status", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = types.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) @@ -573,27 +573,27 @@ func (suite *KeeperTestSuite) TestQueryClientStatus() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ClientStatus(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(tc.expStatus, res.Status) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(tc.expStatus, res.Status) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryUpgradedClientState() { +func (s *KeeperTestSuite) TestQueryUpgradedClientState() { var ( req *types.QueryUpgradedClientStateRequest path *ibctesting.Path @@ -613,7 +613,7 @@ func (suite *KeeperTestSuite) TestQueryUpgradedClientState() { { "success", func() { - validAuthority := suite.chainA.App.GetIBCKeeper().GetAuthority() + validAuthority := s.chainA.App.GetIBCKeeper().GetAuthority() // update trusting period clientState := path.EndpointA.GetClientState() @@ -624,15 +624,15 @@ func (suite *KeeperTestSuite) TestQueryUpgradedClientState() { upgradePlan, clientState, ) - suite.Require().NoError(err) + s.Require().NoError(err) - resp, err := suite.chainA.App.GetIBCKeeper().IBCSoftwareUpgrade(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) - suite.Require().NotNil(resp) + resp, err := s.chainA.App.GetIBCKeeper().IBCSoftwareUpgrade(s.chainA.GetContext(), msg) + s.Require().NoError(err) + s.Require().NotNil(resp) var ok bool expClientState, ok = clientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) }, nil, }, @@ -653,56 +653,56 @@ func (suite *KeeperTestSuite) TestQueryUpgradedClientState() { { "no upgraded client set in store", func() { - err := suite.chainA.GetSimApp().UpgradeKeeper.ScheduleUpgrade(suite.chainA.GetContext(), upgradePlan) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().UpgradeKeeper.ScheduleUpgrade(s.chainA.GetContext(), upgradePlan) + s.Require().NoError(err) }, status.Error(codes.NotFound, "upgraded client not found"), }, { "invalid upgraded client state", func() { - err := suite.chainA.GetSimApp().UpgradeKeeper.ScheduleUpgrade(suite.chainA.GetContext(), upgradePlan) - suite.Require().NoError(err) + err := s.chainA.GetSimApp().UpgradeKeeper.ScheduleUpgrade(s.chainA.GetContext(), upgradePlan) + s.Require().NoError(err) bz := []byte{1, 2, 3} - err = suite.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainA.GetContext(), upgradePlan.Height, bz) - suite.Require().NoError(err) + err = s.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainA.GetContext(), upgradePlan.Height, bz) + s.Require().NoError(err) }, status.Error(codes.Internal, "proto: Any: illegal tag 0 (wire type 1)"), }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() req = &types.QueryUpgradedClientStateRequest{} tc.malleate() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - res, err := queryServer.UpgradedClientState(suite.chainA.GetContext(), req) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + res, err := queryServer.UpgradedClientState(s.chainA.GetContext(), req) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) upgradedClientState, err := types.UnpackClientState(res.UpgradedClientState) - suite.Require().NoError(err) + s.Require().NoError(err) upgradedClientStateCmt, ok := upgradedClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - suite.Require().Equal(expClientState.ZeroCustomFields(), upgradedClientStateCmt) + s.Require().Equal(expClientState.ZeroCustomFields(), upgradedClientStateCmt) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestQueryUpgradedConsensusStates() { +func (s *KeeperTestSuite) TestQueryUpgradedConsensusStates() { var ( req *types.QueryUpgradedConsensusStateRequest expConsensusState *codectypes.Any @@ -733,40 +733,40 @@ func (suite *KeeperTestSuite) TestQueryUpgradedConsensusStates() { func() { req = &types.QueryUpgradedConsensusStateRequest{} - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() lastHeight := types.NewHeight(0, uint64(ctx.BlockHeight())) height = int64(lastHeight.GetRevisionHeight()) ctx = ctx.WithBlockHeight(height) - expConsensusState = types.MustPackConsensusState(suite.consensusState) - bz := types.MustMarshalConsensusState(suite.cdc, suite.consensusState) - err := suite.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetUpgradedConsensusState(ctx, height, bz) - suite.Require().NoError(err) + expConsensusState = types.MustPackConsensusState(s.consensusState) + bz := types.MustMarshalConsensusState(s.cdc, s.consensusState) + err := s.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetUpgradedConsensusState(ctx, height, bz) + s.Require().NoError(err) }, nil, }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - res, err := queryServer.UpgradedConsensusState(suite.chainA.GetContext(), req) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + res, err := queryServer.UpgradedConsensusState(s.chainA.GetContext(), req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().True(expConsensusState.Equal(res.UpgradedConsensusState)) + s.Require().NoError(err) + s.Require().True(expConsensusState.Equal(res.UpgradedConsensusState)) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryCreator() { +func (s *KeeperTestSuite) TestQueryCreator() { var ( req *types.QueryClientCreatorRequest expRes *types.QueryClientCreatorResponse @@ -812,7 +812,7 @@ func (suite *KeeperTestSuite) TestQueryCreator() { ClientId: path.EndpointA.ClientID, } expRes = &types.QueryClientCreatorResponse{ - Creator: suite.chainA.SenderAccount.GetAddress().String(), + Creator: s.chainA.SenderAccount.GetAddress().String(), } }, nil, @@ -820,36 +820,36 @@ func (suite *KeeperTestSuite) TestQueryCreator() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.name), func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.name), func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.ClientCreator(ctx, req) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expRes, res) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expRes, res) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestQueryClientParams() { - ctx := suite.chainA.GetContext() +func (s *KeeperTestSuite) TestQueryClientParams() { + ctx := s.chainA.GetContext() expParams := types.DefaultParams() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, _ := queryServer.ClientParams(ctx, &types.QueryClientParamsRequest{}) - suite.Require().Equal(&expParams, res.Params) + s.Require().Equal(&expParams, res.Params) } -func (suite *KeeperTestSuite) TestQueryVerifyMembershipProof() { +func (s *KeeperTestSuite) TestQueryVerifyMembershipProof() { const wasmClientID = "08-wasm-0" var ( @@ -866,14 +866,14 @@ func (suite *KeeperTestSuite) TestQueryVerifyMembershipProof() { "success", func() { channel := path.EndpointB.GetChannel() - bz, err := suite.chainB.Codec.Marshal(&channel) - suite.Require().NoError(err) + bz, err := s.chainB.Codec.Marshal(&channel) + s.Require().NoError(err) channelProof, proofHeight := path.EndpointB.QueryProof(host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)) merklePath := commitmenttypes.NewMerklePath(host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID)) - merklePath, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) req = &types.QueryVerifyMembershipRequest{ ClientId: path.EndpointA.ClientID, @@ -980,7 +980,7 @@ func (suite *KeeperTestSuite) TestQueryVerifyMembershipProof() { "client type not allowed", func() { params := types.NewParams("") // disable all clients - suite.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), params) + s.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetParams(s.chainA.GetContext(), params) req = &types.QueryVerifyMembershipRequest{ ClientId: path.EndpointA.ClientID, @@ -995,30 +995,30 @@ func (suite *KeeperTestSuite) TestQueryVerifyMembershipProof() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() initialGas := ctx.GasMeter().GasConsumed() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) res, err := queryServer.VerifyMembership(ctx, req) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().True(res.Success, "failed to verify membership proof") + s.Require().NoError(err) + s.Require().True(res.Success, "failed to verify membership proof") gasConsumed := ctx.GasMeter().GasConsumed() - suite.Require().Greater(gasConsumed, initialGas, "gas consumed should be greater than initial gas") + s.Require().Greater(gasConsumed, initialGas, "gas consumed should be greater than initial gas") } else { - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) gasConsumed := ctx.GasMeter().GasConsumed() - suite.Require().GreaterOrEqual(gasConsumed, initialGas, "gas consumed should be greater than or equal to initial gas") + s.Require().GreaterOrEqual(gasConsumed, initialGas, "gas consumed should be greater than or equal to initial gas") } }) } diff --git a/modules/core/02-client/keeper/keeper.go b/modules/core/02-client/keeper/keeper.go index d49d56b72be..d721c88c1ad 100644 --- a/modules/core/02-client/keeper/keeper.go +++ b/modules/core/02-client/keeper/keeper.go @@ -26,25 +26,23 @@ import ( // Keeper represents a type that grants read and write permissions to any client // state information type Keeper struct { - storeService corestore.KVStoreService - cdc codec.BinaryCodec - router *types.Router - legacySubspace types.ParamSubspace - upgradeKeeper types.UpgradeKeeper + storeService corestore.KVStoreService + cdc codec.BinaryCodec + router *types.Router + upgradeKeeper types.UpgradeKeeper } // NewKeeper creates a new NewKeeper instance -func NewKeeper(cdc codec.BinaryCodec, storeService corestore.KVStoreService, legacySubspace types.ParamSubspace, uk types.UpgradeKeeper) *Keeper { +func NewKeeper(cdc codec.BinaryCodec, storeService corestore.KVStoreService, uk types.UpgradeKeeper) *Keeper { router := types.NewRouter() localhostModule := localhost.NewLightClientModule(cdc, storeService) router.AddRoute(exported.Localhost, localhostModule) return &Keeper{ - storeService: storeService, - cdc: cdc, - router: router, - legacySubspace: legacySubspace, - upgradeKeeper: uk, + storeService: storeService, + cdc: cdc, + router: router, + upgradeKeeper: uk, } } @@ -54,7 +52,7 @@ func (k *Keeper) Codec() codec.BinaryCodec { } // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+exported.ModuleName+"/"+types.SubModuleName) } diff --git a/modules/core/02-client/keeper/keeper_test.go b/modules/core/02-client/keeper/keeper_test.go index a8272afa9ab..f6ce7a65a0c 100644 --- a/modules/core/02-client/keeper/keeper_test.go +++ b/modules/core/02-client/keeper/keeper_test.go @@ -68,90 +68,90 @@ type KeeperTestSuite struct { signers map[string]cmttypes.PrivValidator } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func TestKeeperTestSuite(t *testing.T) { + testifysuite.Run(t, new(KeeperTestSuite)) +} - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) isCheckTx := false - suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) - suite.past = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) - app := simapp.Setup(suite.T(), isCheckTx) + s.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + s.past = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + app := simapp.Setup(s.T(), isCheckTx) - suite.cdc = app.AppCodec() - suite.ctx = app.NewContext(isCheckTx) - suite.keeper = app.IBCKeeper.ClientKeeper - suite.privVal = cmttypes.NewMockPV() - pubKey, err := suite.privVal.GetPubKey() - suite.Require().NoError(err) + s.cdc = app.AppCodec() + s.ctx = app.NewContext(isCheckTx) + s.keeper = app.IBCKeeper.ClientKeeper + s.privVal = cmttypes.NewMockPV() + pubKey, err := s.privVal.GetPubKey() + s.Require().NoError(err) validator := cmttypes.NewValidator(pubKey, 1) - suite.valSet = cmttypes.NewValidatorSet([]*cmttypes.Validator{validator}) - suite.valSetHash = suite.valSet.Hash() + s.valSet = cmttypes.NewValidatorSet([]*cmttypes.Validator{validator}) + s.valSetHash = s.valSet.Hash() - suite.signers = make(map[string]cmttypes.PrivValidator, 1) - suite.signers[validator.Address.String()] = suite.privVal + s.signers = make(map[string]cmttypes.PrivValidator, 1) + s.signers[validator.Address.String()] = s.privVal - suite.consensusState = ibctm.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("hash")), suite.valSetHash) + s.consensusState = ibctm.NewConsensusState(s.now, commitmenttypes.NewMerkleRoot([]byte("hash")), s.valSetHash) var validators stakingtypes.Validators for i := 1; i < 11; i++ { privVal := cmttypes.NewMockPV() tmPk, err := privVal.GetPubKey() - suite.Require().NoError(err) + s.Require().NoError(err) pk, err := cryptocodec.FromCmtPubKeyInterface(tmPk) - suite.Require().NoError(err) + s.Require().NoError(err) val, err := stakingtypes.NewValidator(pk.Address().String(), pk, stakingtypes.Description{}) - suite.Require().NoError(err) + s.Require().NoError(err) val.Status = stakingtypes.Bonded val.Tokens = sdkmath.NewInt(rand.Int63()) validators.Validators = append(validators.Validators, val) - hi := stakingtypes.NewHistoricalInfo(suite.ctx.BlockHeader(), validators, sdk.DefaultPowerReduction) - err = app.StakingKeeper.SetHistoricalInfo(suite.ctx, int64(i), &hi) - suite.Require().NoError(err) + hi := stakingtypes.NewHistoricalInfo(s.ctx.BlockHeader(), validators, sdk.DefaultPowerReduction) + err = app.StakingKeeper.SetHistoricalInfo(s.ctx, int64(i), &hi) + s.Require().NoError(err) } - suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1) + s.solomachine = ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachinesingle", "testing", 1) } -func TestKeeperTestSuite(t *testing.T) { - testifysuite.Run(t, new(KeeperTestSuite)) -} - -func (suite *KeeperTestSuite) TestSetClientState() { +func (s *KeeperTestSuite) TestSetClientState() { clientState := ibctm.NewClientState(testChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) - suite.keeper.SetClientState(suite.ctx, testClientID, clientState) + s.keeper.SetClientState(s.ctx, testClientID, clientState) - retrievedState, found := suite.keeper.GetClientState(suite.ctx, testClientID) - suite.Require().True(found, "GetClientState failed") - suite.Require().Equal(clientState, retrievedState, "Client states are not equal") + retrievedState, found := s.keeper.GetClientState(s.ctx, testClientID) + s.Require().True(found, "GetClientState failed") + s.Require().Equal(clientState, retrievedState, "Client states are not equal") } -func (suite *KeeperTestSuite) TestSetClientCreator() { - creator := suite.chainA.SenderAccount.GetAddress() - suite.keeper.SetClientCreator(suite.ctx, testClientID, creator) - getCreator := suite.keeper.GetClientCreator(suite.ctx, testClientID) - suite.Require().Equal(creator, getCreator) - suite.keeper.DeleteClientCreator(suite.ctx, testClientID) - getCreator = suite.keeper.GetClientCreator(suite.ctx, testClientID) - suite.Require().Equal(sdk.AccAddress(nil), getCreator) +func (s *KeeperTestSuite) TestSetClientCreator() { + creator := s.chainA.SenderAccount.GetAddress() + s.keeper.SetClientCreator(s.ctx, testClientID, creator) + getCreator := s.keeper.GetClientCreator(s.ctx, testClientID) + s.Require().Equal(creator, getCreator) + s.keeper.DeleteClientCreator(s.ctx, testClientID) + getCreator = s.keeper.GetClientCreator(s.ctx, testClientID) + s.Require().Equal(sdk.AccAddress(nil), getCreator) } -func (suite *KeeperTestSuite) TestSetClientConsensusState() { - suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState) +func (s *KeeperTestSuite) TestSetClientConsensusState() { + s.keeper.SetClientConsensusState(s.ctx, testClientID, testClientHeight, s.consensusState) - retrievedConsState, found := suite.keeper.GetClientConsensusState(suite.ctx, testClientID, testClientHeight) - suite.Require().True(found, "GetConsensusState failed") + retrievedConsState, found := s.keeper.GetClientConsensusState(s.ctx, testClientID, testClientHeight) + s.Require().True(found, "GetConsensusState failed") tmConsState, ok := retrievedConsState.(*ibctm.ConsensusState) - suite.Require().True(ok) - suite.Require().Equal(suite.consensusState, tmConsState, "ConsensusState not stored correctly") + s.Require().True(ok) + s.Require().Equal(s.consensusState, tmConsState, "ConsensusState not stored correctly") } -func (suite *KeeperTestSuite) TestGetAllGenesisClients() { +func (s *KeeperTestSuite) TestGetAllGenesisClients() { clientIDs := []string{ testClientID2, testClientID3, testClientID, } @@ -164,28 +164,28 @@ func (suite *KeeperTestSuite) TestGetAllGenesisClients() { expGenClients := make(types.IdentifiedClientStates, len(expClients)) for i := range expClients { - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientIDs[i], expClients[i]) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientIDs[i], expClients[i]) expGenClients[i] = types.NewIdentifiedClientState(clientIDs[i], expClients[i]) } - genClients := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext()) + genClients := s.chainA.App.GetIBCKeeper().ClientKeeper.GetAllGenesisClients(s.chainA.GetContext()) - suite.Require().Equal(expGenClients.Sort(), genClients) + s.Require().Equal(expGenClients.Sort(), genClients) } -func (suite *KeeperTestSuite) TestGetAllGenesisMetadata() { +func (s *KeeperTestSuite) TestGetAllGenesisMetadata() { clientA, clientB := "07-tendermint-1", "clientB" // create some starting state - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientA, &ibctm.ClientState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, types.NewHeight(0, 1), &ibctm.ConsensusState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, types.NewHeight(0, 2), &ibctm.ConsensusState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, types.NewHeight(0, 3), &ibctm.ConsensusState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientA, types.NewHeight(2, 300), &ibctm.ConsensusState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientA, &ibctm.ClientState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientA, types.NewHeight(0, 1), &ibctm.ConsensusState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientA, types.NewHeight(0, 2), &ibctm.ConsensusState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientA, types.NewHeight(0, 3), &ibctm.ConsensusState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientA, types.NewHeight(2, 300), &ibctm.ConsensusState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientB, &ibctm.ClientState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientB, types.NewHeight(1, 100), &ibctm.ConsensusState{}) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), clientB, types.NewHeight(2, 300), &ibctm.ConsensusState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientB, &ibctm.ClientState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientB, types.NewHeight(1, 100), &ibctm.ConsensusState{}) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), clientB, types.NewHeight(2, 300), &ibctm.ConsensusState{}) // NOTE: correct ordering of expected value is required // Ordering is typically determined by the lexographic ordering of the height passed into each key. @@ -215,38 +215,38 @@ func (suite *KeeperTestSuite) TestGetAllGenesisMetadata() { types.NewIdentifiedClientState(clientA, &ibctm.ClientState{}), types.NewIdentifiedClientState(clientB, &ibctm.ClientState{}), } - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetAllClientMetadata(suite.chainA.GetContext(), expectedGenMetadata) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetAllClientMetadata(s.chainA.GetContext(), expectedGenMetadata) - actualGenMetadata, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients) - suite.Require().NoError(err, "get client metadata returned error unexpectedly") - suite.Require().Equal(expectedGenMetadata, actualGenMetadata, "retrieved metadata is unexpected") + actualGenMetadata, err := s.chainA.App.GetIBCKeeper().ClientKeeper.GetAllClientMetadata(s.chainA.GetContext(), genClients) + s.Require().NoError(err, "get client metadata returned error unexpectedly") + s.Require().Equal(expectedGenMetadata, actualGenMetadata, "retrieved metadata is unexpected") // set invalid key in client store which will cause panic during iteration - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "") + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "") clientStore.Set([]byte("key"), []byte("val")) - suite.Require().Panics(func() { - suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients) //nolint:errcheck // we expect a panic + s.Require().Panics(func() { + s.chainA.App.GetIBCKeeper().ClientKeeper.GetAllClientMetadata(s.chainA.GetContext(), genClients) //nolint:errcheck // we expect a panic }) } // 2 clients in total are created on chainA. The first client is updated so it contains an initial consensus state // and a consensus state at the update height. -func (suite *KeeperTestSuite) TestGetAllConsensusStates() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllConsensusStates() { + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupClients() expConsensusHeight0 := path1.EndpointA.GetClientLatestHeight() - consensusState0, ok := suite.chainA.GetConsensusState(path1.EndpointA.ClientID, expConsensusHeight0) - suite.Require().True(ok) + consensusState0, ok := s.chainA.GetConsensusState(path1.EndpointA.ClientID, expConsensusHeight0) + s.Require().True(ok) // update client to create a second consensus state err := path1.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) expConsensusHeight1 := path1.EndpointA.GetClientLatestHeight() - suite.Require().True(expConsensusHeight1.GT(expConsensusHeight0)) - consensusState1, ok := suite.chainA.GetConsensusState(path1.EndpointA.ClientID, expConsensusHeight1) - suite.Require().True(ok) + s.Require().True(expConsensusHeight1.GT(expConsensusHeight0)) + consensusState1, ok := s.chainA.GetConsensusState(path1.EndpointA.ClientID, expConsensusHeight1) + s.Require().True(ok) expConsensus := []exported.ConsensusState{ consensusState0, @@ -254,12 +254,12 @@ func (suite *KeeperTestSuite) TestGetAllConsensusStates() { } // create second client on chainA - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) path2.SetupClients() expConsensusHeight2 := path2.EndpointA.GetClientLatestHeight() - consensusState2, ok := suite.chainA.GetConsensusState(path2.EndpointA.ClientID, expConsensusHeight2) - suite.Require().True(ok) + consensusState2, ok := s.chainA.GetConsensusState(path2.EndpointA.ClientID, expConsensusHeight2) + s.Require().True(ok) expConsensus2 := []exported.ConsensusState{consensusState2} @@ -273,20 +273,20 @@ func (suite *KeeperTestSuite) TestGetAllConsensusStates() { }), }.Sort() - consStates := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetAllConsensusStates(suite.chainA.GetContext()) - suite.Require().Equal(expConsensusStates, consStates, "%s \n\n%s", expConsensusStates, consStates) + consStates := s.chainA.App.GetIBCKeeper().ClientKeeper.GetAllConsensusStates(s.chainA.GetContext()) + s.Require().Equal(expConsensusStates, consStates, "%s \n\n%s", expConsensusStates, consStates) } -func (suite *KeeperTestSuite) TestIterateClientStates() { +func (s *KeeperTestSuite) TestIterateClientStates() { paths := []*ibctesting.Path{ - ibctesting.NewPath(suite.chainA, suite.chainB), - ibctesting.NewPath(suite.chainA, suite.chainB), - ibctesting.NewPath(suite.chainA, suite.chainB), + ibctesting.NewPath(s.chainA, s.chainB), + ibctesting.NewPath(s.chainA, s.chainB), + ibctesting.NewPath(s.chainA, s.chainB), } solomachines := []*ibctesting.Solomachine{ - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1), - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-1", "testing", 4), } var ( @@ -302,7 +302,7 @@ func (suite *KeeperTestSuite) TestIterateClientStates() { // create solomachine clients for i, sm := range solomachines { - expSMClientIDs[i] = sm.CreateClient(suite.chainA) + expSMClientIDs[i] = sm.CreateClient(s.chainA) } testCases := []struct { @@ -334,19 +334,19 @@ func (suite *KeeperTestSuite) TestIterateClientStates() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { var clientIDs []string - suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.IterateClientStates(suite.chainA.GetContext(), tc.prefix, func(clientID string, _ exported.ClientState) bool { + s.chainA.GetSimApp().IBCKeeper.ClientKeeper.IterateClientStates(s.chainA.GetContext(), tc.prefix, func(clientID string, _ exported.ClientState) bool { clientIDs = append(clientIDs, clientID) return false }) - suite.Require().ElementsMatch(tc.expClientIDs(), clientIDs) + s.Require().ElementsMatch(tc.expClientIDs(), clientIDs) }) } } -func (suite *KeeperTestSuite) TestGetClientLatestHeight() { +func (s *KeeperTestSuite) TestGetClientLatestHeight() { var path *ibctesting.Path cases := []struct { @@ -369,7 +369,7 @@ func (suite *KeeperTestSuite) TestGetClientLatestHeight() { { "client type is not allowed", func() { params := types.NewParams(exported.Localhost) - suite.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), params) + s.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetParams(s.chainA.GetContext(), params) }, false, }, @@ -382,26 +382,26 @@ func (suite *KeeperTestSuite) TestGetClientLatestHeight() { } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() tc.malleate() - height := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientLatestHeight(suite.chainA.GetContext(), path.EndpointA.ClientID) + height := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientLatestHeight(s.chainA.GetContext(), path.EndpointA.ClientID) if tc.expPass { - suite.Require().Equal(suite.chainB.LatestCommittedHeader.GetHeight().(types.Height), height) + s.Require().Equal(s.chainB.LatestCommittedHeader.GetHeight().(types.Height), height) } else { - suite.Require().Equal(types.ZeroHeight(), height) + s.Require().Equal(types.ZeroHeight(), height) } }) } } -func (suite *KeeperTestSuite) TestGetTimestampAtHeight() { +func (s *KeeperTestSuite) TestGetTimestampAtHeight() { var ( height exported.Height path *ibctesting.Path @@ -427,7 +427,7 @@ func (suite *KeeperTestSuite) TestGetTimestampAtHeight() { { "client type is not allowed", func() { params := types.NewParams(exported.Localhost) - suite.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), params) + s.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.SetParams(s.chainA.GetContext(), params) }, types.ErrInvalidClientType, }, @@ -445,36 +445,36 @@ func (suite *KeeperTestSuite) TestGetTimestampAtHeight() { }, { "consensus state not found", func() { - height = suite.chainB.LatestCommittedHeader.GetHeight().Increment() + height = s.chainB.LatestCommittedHeader.GetHeight().Increment() }, types.ErrConsensusStateNotFound, }, } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() - height = suite.chainB.LatestCommittedHeader.GetHeight() + height = s.chainB.LatestCommittedHeader.GetHeight() tc.malleate() - actualTimestamp, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientTimestampAtHeight(suite.chainA.GetContext(), path.EndpointA.ClientID, height) + actualTimestamp, err := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientTimestampAtHeight(s.chainA.GetContext(), path.EndpointA.ClientID, height) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().Equal(uint64(suite.chainB.LatestCommittedHeader.GetTime().UnixNano()), actualTimestamp) + s.Require().NoError(err) + s.Require().Equal(uint64(s.chainB.LatestCommittedHeader.GetTime().UnixNano()), actualTimestamp) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestVerifyMembership() { +func (s *KeeperTestSuite) TestVerifyMembership() { var path *ibctesting.Path cases := []struct { @@ -498,7 +498,7 @@ func (suite *KeeperTestSuite) TestVerifyMembership() { "failure: client is frozen", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = types.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, @@ -507,40 +507,40 @@ func (suite *KeeperTestSuite) TestVerifyMembership() { } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // create default proof, merklePath, and value which passes key := host.FullClientStateKey(path.EndpointB.ClientID) merklePath := commitmenttypes.NewMerklePath(key) - merklePrefixPath, err := commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePrefixPath, err := commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight := suite.chainB.QueryProof(key) + proof, proofHeight := s.chainB.QueryProof(key) clientState, ok := path.EndpointB.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - value, err := suite.chainB.Codec.MarshalInterface(clientState) - suite.Require().NoError(err) + s.Require().True(ok) + value, err := s.chainB.Codec.MarshalInterface(clientState) + s.Require().NoError(err) tc.malleate() - err = suite.chainA.App.GetIBCKeeper().ClientKeeper.VerifyMembership(suite.chainA.GetContext(), path.EndpointA.ClientID, proofHeight, 0, 0, proof, merklePrefixPath, value) + err = s.chainA.App.GetIBCKeeper().ClientKeeper.VerifyMembership(s.chainA.GetContext(), path.EndpointA.ClientID, proofHeight, 0, 0, proof, merklePrefixPath, value) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestVerifyNonMembership() { +func (s *KeeperTestSuite) TestVerifyNonMembership() { var path *ibctesting.Path cases := []struct { @@ -564,7 +564,7 @@ func (suite *KeeperTestSuite) TestVerifyNonMembership() { "failure: client is frozen", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = types.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, @@ -573,48 +573,48 @@ func (suite *KeeperTestSuite) TestVerifyNonMembership() { } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // create default proof, merklePath, and value which passes key := host.FullClientStateKey("invalid-client-id") merklePath := commitmenttypes.NewMerklePath(key) - merklePrefixPath, err := commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePrefixPath, err := commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight := suite.chainB.QueryProof(key) + proof, proofHeight := s.chainB.QueryProof(key) tc.malleate() - err = suite.chainA.App.GetIBCKeeper().ClientKeeper.VerifyNonMembership(suite.chainA.GetContext(), path.EndpointA.ClientID, proofHeight, 0, 0, proof, merklePrefixPath) + err = s.chainA.App.GetIBCKeeper().ClientKeeper.VerifyNonMembership(s.chainA.GetContext(), path.EndpointA.ClientID, proofHeight, 0, 0, proof, merklePrefixPath) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } // TestDefaultSetParams tests the default params set are what is expected -func (suite *KeeperTestSuite) TestDefaultSetParams() { +func (s *KeeperTestSuite) TestDefaultSetParams() { expParams := types.DefaultParams() - clientKeeper := suite.chainA.App.GetIBCKeeper().ClientKeeper - params := clientKeeper.GetParams(suite.chainA.GetContext()) + clientKeeper := s.chainA.App.GetIBCKeeper().ClientKeeper + params := clientKeeper.GetParams(s.chainA.GetContext()) - suite.Require().Equal(expParams, params) - suite.Require().Equal(expParams.AllowedClients, clientKeeper.GetParams(suite.chainA.GetContext()).AllowedClients) + s.Require().Equal(expParams, params) + s.Require().Equal(expParams.AllowedClients, clientKeeper.GetParams(s.chainA.GetContext()).AllowedClients) } // TestParams tests that Param setting and retrieval works properly -func (suite *KeeperTestSuite) TestParams() { +func (s *KeeperTestSuite) TestParams() { testCases := []struct { name string input types.Params @@ -627,38 +627,38 @@ func (suite *KeeperTestSuite) TestParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(tc.name, func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() err := tc.input.Validate() - suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.SetParams(ctx, tc.input) + s.chainA.GetSimApp().IBCKeeper.ClientKeeper.SetParams(ctx, tc.input) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) expected := tc.input - p := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetParams(ctx) - suite.Require().Equal(expected, p) + p := s.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetParams(ctx) + s.Require().Equal(expected, p) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } // TestUnsetParams tests that trying to get params that are not set panics. -func (suite *KeeperTestSuite) TestUnsetParams() { - suite.SetupTest() - ctx := suite.chainA.GetContext() - store := ctx.KVStore(suite.chainA.GetSimApp().GetKey(exported.StoreKey)) +func (s *KeeperTestSuite) TestUnsetParams() { + s.SetupTest() + ctx := s.chainA.GetContext() + store := ctx.KVStore(s.chainA.GetSimApp().GetKey(exported.StoreKey)) store.Delete([]byte(types.ParamsKey)) - suite.Require().Panics(func() { - suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetParams(ctx) + s.Require().Panics(func() { + s.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetParams(ctx) }) } // TestIBCSoftwareUpgrade tests that an IBC client upgrade has been properly scheduled -func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { +func (s *KeeperTestSuite) TestIBCSoftwareUpgrade() { var ( upgradedClientState *ibctm.ClientState oldPlan, plan upgradetypes.Plan @@ -693,14 +693,14 @@ func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset oldPlan.Height = 0 // reset - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tmClientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) upgradedClientState = tmClientState.ZeroCustomFields() // use height 1000 to distinguish from old plan @@ -714,38 +714,38 @@ func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { // set the old plan if it is not empty if oldPlan.Height != 0 { // set upgrade plan in the upgrade store - store := suite.chainA.GetContext().KVStore(suite.chainA.GetSimApp().GetKey(upgradetypes.StoreKey)) - bz := suite.chainA.App.AppCodec().MustMarshal(&oldPlan) + store := s.chainA.GetContext().KVStore(s.chainA.GetSimApp().GetKey(upgradetypes.StoreKey)) + bz := s.chainA.App.AppCodec().MustMarshal(&oldPlan) store.Set(upgradetypes.PlanKey(), bz) - bz, err := types.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClientState) - suite.Require().NoError(err) + bz, err := types.MarshalClientState(s.chainA.App.AppCodec(), upgradedClientState) + s.Require().NoError(err) - suite.Require().NoError(suite.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height, bz)) + s.Require().NoError(s.chainA.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainA.GetContext(), oldPlan.Height, bz)) } - ctx := suite.chainA.GetContext() - err := suite.chainA.App.GetIBCKeeper().ClientKeeper.ScheduleIBCSoftwareUpgrade(ctx, plan, upgradedClientState) + ctx := s.chainA.GetContext() + err := s.chainA.App.GetIBCKeeper().ClientKeeper.ScheduleIBCSoftwareUpgrade(ctx, plan, upgradedClientState) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // check that the correct plan is returned - storedPlan, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext()) - suite.Require().NoError(err) - suite.Require().Equal(plan, storedPlan) + storedPlan, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Equal(plan, storedPlan) // check that old upgraded client state is cleared - cs, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), oldPlan.Height) - suite.Require().ErrorIs(err, upgradetypes.ErrNoUpgradedClientFound) - suite.Require().Empty(cs) + cs, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(s.chainA.GetContext(), oldPlan.Height) + s.Require().ErrorIs(err, upgradetypes.ErrNoUpgradedClientFound) + s.Require().Empty(cs) // check that client state was set - storedClientState, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height) - suite.Require().NoError(err) - clientState, err := types.UnmarshalClientState(suite.chainA.App.AppCodec(), storedClientState) - suite.Require().NoError(err) - suite.Require().Equal(upgradedClientState, clientState) + storedClientState, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(s.chainA.GetContext(), plan.Height) + s.Require().NoError(err) + clientState, err := types.UnmarshalClientState(s.chainA.App.AppCodec(), storedClientState) + s.Require().NoError(err) + s.Require().Equal(upgradedClientState, clientState) expectedEvents := sdk.Events{ sdk.NewEvent( @@ -756,25 +756,24 @@ func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) - + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) } else { // check that the new plan wasn't stored - storedPlan, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext()) + storedPlan, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(s.chainA.GetContext()) if oldPlan.Height != 0 { // NOTE: this is only true if the ScheduleUpgrade function // returns an error before clearing the old plan - suite.Require().NoError(err) - suite.Require().Equal(oldPlan, storedPlan) + s.Require().NoError(err) + s.Require().Equal(oldPlan, storedPlan) } else { - suite.Require().ErrorIs(err, upgradetypes.ErrNoUpgradePlanFound) - suite.Require().Empty(storedPlan) + s.Require().ErrorIs(err, upgradetypes.ErrNoUpgradePlanFound) + s.Require().Empty(storedPlan) } // check that client state was not set - cs, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height) - suite.Require().Empty(cs) - suite.Require().ErrorIs(err, upgradetypes.ErrNoUpgradedClientFound) + cs, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(s.chainA.GetContext(), plan.Height) + s.Require().Empty(cs) + s.Require().ErrorIs(err, upgradetypes.ErrNoUpgradedClientFound) } }) } diff --git a/modules/core/02-client/keeper/migrations.go b/modules/core/02-client/keeper/migrations.go index 094a8ee636f..e9471e2a34f 100644 --- a/modules/core/02-client/keeper/migrations.go +++ b/modules/core/02-client/keeper/migrations.go @@ -3,8 +3,6 @@ package keeper import ( sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/ibc-go/v10/modules/core/02-client/migrations/v7" - "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" "github.com/cosmos/ibc-go/v10/modules/core/exported" ) @@ -19,31 +17,6 @@ func NewMigrator(keeper *Keeper) Migrator { return Migrator{keeper: keeper} } -// Migrate2to3 migrates from consensus version 2 to 3. -// This migration -// - migrates solo machine client states from v2 to v3 protobuf definition -// - prunes solo machine consensus states -// - removes the localhost client -// - asserts that existing tendermint clients are properly registered on the chain codec -func (m Migrator) Migrate2to3(ctx sdk.Context) error { - return v7.MigrateStore(ctx, m.keeper.storeService, m.keeper.cdc, m.keeper) -} - -// MigrateParams migrates from consensus version 4 to 5. -// This migration takes the parameters that are currently stored and managed by x/params -// and stores them directly in the ibc module's state. -func (m Migrator) MigrateParams(ctx sdk.Context) error { - var params types.Params - m.keeper.legacySubspace.GetParamSet(ctx, ¶ms) - if err := params.Validate(); err != nil { - return err - } - - m.keeper.SetParams(ctx, params) - m.keeper.Logger(ctx).Info("successfully migrated client to self-manage params") - return nil -} - // MigrateToStatelessLocalhost deletes the localhost client state. The localhost // implementation is now stateless. func (m Migrator) MigrateToStatelessLocalhost(ctx sdk.Context) error { diff --git a/modules/core/02-client/keeper/migrations_test.go b/modules/core/02-client/keeper/migrations_test.go index f4f64df34d5..a40995d7d87 100644 --- a/modules/core/02-client/keeper/migrations_test.go +++ b/modules/core/02-client/keeper/migrations_test.go @@ -2,58 +2,22 @@ package keeper_test import ( "github.com/cosmos/ibc-go/v10/modules/core/02-client/keeper" - "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" ) -// TestMigrateParams tests the migration for the client params -func (suite *KeeperTestSuite) TestMigrateParams() { - testCases := []struct { - name string - malleate func() - expectedParams types.Params - }{ - { - "success: default params", - func() { - params := types.DefaultParams() - subspace := suite.chainA.GetSimApp().GetSubspace(ibcexported.ModuleName) - subspace.SetParamSet(suite.chainA.GetContext(), ¶ms) - }, - types.DefaultParams(), - }, - } - - for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - - tc.malleate() - - ctx := suite.chainA.GetContext() - migrator := keeper.NewMigrator(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - err := migrator.MigrateParams(ctx) - suite.Require().NoError(err) - - params := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.GetParams(ctx) - suite.Require().Equal(tc.expectedParams, params) - }) - } -} - -func (suite *KeeperTestSuite) TestMigrateToStatelessLocalhost() { +func (s *KeeperTestSuite) TestMigrateToStatelessLocalhost() { // set localhost in state - clientStore := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), ibcexported.LocalhostClientID) + clientStore := s.chainA.GetSimApp().IBCKeeper.ClientKeeper.ClientStore(s.chainA.GetContext(), ibcexported.LocalhostClientID) clientStore.Set(host.ClientStateKey(), []byte("clientState")) - m := keeper.NewMigrator(suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - err := m.MigrateToStatelessLocalhost(suite.chainA.GetContext()) - suite.Require().NoError(err) - suite.Require().False(clientStore.Has(host.ClientStateKey())) + m := keeper.NewMigrator(s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + err := m.MigrateToStatelessLocalhost(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().False(clientStore.Has(host.ClientStateKey())) // rerun migration on no localhost set - err = m.MigrateToStatelessLocalhost(suite.chainA.GetContext()) - suite.Require().NoError(err) - suite.Require().False(clientStore.Has(host.ClientStateKey())) + err = m.MigrateToStatelessLocalhost(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().False(clientStore.Has(host.ClientStateKey())) } diff --git a/modules/core/02-client/migrations/v7/genesis_test.go b/modules/core/02-client/migrations/v7/genesis_test.go index bfa2130d231..7bd08b48b8b 100644 --- a/modules/core/02-client/migrations/v7/genesis_test.go +++ b/modules/core/02-client/migrations/v7/genesis_test.go @@ -15,26 +15,26 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { +func (s *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // create tendermint clients for range 3 { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // update a second time to add more state err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) } // create multiple legacy solo machine clients - solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1) - solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + solomachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-1", "testing", 4) - clientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + clientGenState := ibcclient.ExportGenesis(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) // manually generate old proto buf definitions and set in genesis // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are @@ -57,8 +57,8 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // set client state protoAny, err := codectypes.NewAnyWithValue(legacyClientState) - suite.Require().NoError(err) - suite.Require().NotNil(protoAny) + s.Require().NoError(err) + s.Require().NotNil(protoAny) clients = append(clients, types.IdentifiedClientState{ ClientId: sm.ClientID, @@ -66,29 +66,29 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { }) // set in store for ease of determining expected genesis - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), sm.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), sm.ClientID) - cdc, ok := suite.chainA.App.AppCodec().(*codec.ProtoCodec) - suite.Require().True(ok) + cdc, ok := s.chainA.App.AppCodec().(*codec.ProtoCodec) + s.Require().True(ok) v7.RegisterInterfaces(cdc.InterfaceRegistry()) bz, err := cdc.MarshalInterface(legacyClientState) - suite.Require().NoError(err) + s.Require().NoError(err) clientStore.Set(host.ClientStateKey(), bz) protoAny, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState) - suite.Require().NoError(err) - suite.Require().NotNil(protoAny) + s.Require().NoError(err) + s.Require().NotNil(protoAny) // obtain marshalled bytes to set in client store bz, err = cdc.MarshalInterface(legacyClientState.ConsensusState) - suite.Require().NoError(err) + s.Require().NoError(err) var consensusStates []types.ConsensusStateWithHeight // set consensus states in store and genesis - for i := uint64(0); i < numCreations; i++ { - height := types.NewHeight(1, i) + for i := range numCreations { + height := types.NewHeight(1, uint64(i)) clientStore.Set(host.ConsensusStateKey(height), bz) consensusStates = append(consensusStates, types.ConsensusStateWithHeight{ Height: height, @@ -108,34 +108,34 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // migrate store get expected genesis // store migration and genesis migration should produce identical results // NOTE: tendermint clients are not pruned in genesis so the test should not have expired tendermint clients - err := v7.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - suite.Require().NoError(err) - expectedClientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + err := v7.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + s.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) - cdc, ok := suite.chainA.App.AppCodec().(codec.ProtoCodecMarshaler) - suite.Require().True(ok) + cdc, ok := s.chainA.App.AppCodec().(codec.ProtoCodecMarshaler) + s.Require().True(ok) migrated, err := v7.MigrateGenesis(&clientGenState, cdc) - suite.Require().NoError(err) + s.Require().NoError(err) bz, err := cdc.MarshalJSON(&expectedClientGenState) - suite.Require().NoError(err) + s.Require().NoError(err) // Indent the JSON bz correctly. var jsonObj map[string]any err = json.Unmarshal(bz, &jsonObj) - suite.Require().NoError(err) + s.Require().NoError(err) expectedIndentedBz, err := json.MarshalIndent(jsonObj, "", "\t") - suite.Require().NoError(err) + s.Require().NoError(err) bz, err = cdc.MarshalJSON(migrated) - suite.Require().NoError(err) + s.Require().NoError(err) // Indent the JSON bz correctly. err = json.Unmarshal(bz, &jsonObj) - suite.Require().NoError(err) + s.Require().NoError(err) indentedBz, err := json.MarshalIndent(jsonObj, "", "\t") - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Equal(string(expectedIndentedBz), string(indentedBz)) + s.Require().Equal(string(expectedIndentedBz), string(indentedBz)) } diff --git a/modules/core/02-client/migrations/v7/store.go b/modules/core/02-client/migrations/v7/store.go index 0b7787fc6d2..0869d1345d1 100644 --- a/modules/core/02-client/migrations/v7/store.go +++ b/modules/core/02-client/migrations/v7/store.go @@ -139,11 +139,12 @@ func handleLocalhostMigration(ctx sdk.Context, store storetypes.KVStore, clientK // avoid state corruption as modifying state during iteration is unsafe. A special case // for tendermint clients is included as only one tendermint clientID is required for // v7 migrations. -func collectClients(ctx sdk.Context, store storetypes.KVStore, clientType string) (clients []string, err error) { +func collectClients(ctx sdk.Context, store storetypes.KVStore, clientType string) ([]string, error) { clientPrefix := host.PrefixedClientStoreKey([]byte(clientType)) iterator := storetypes.KVStorePrefixIterator(store, clientPrefix) defer sdk.LogDeferred(ctx.Logger(), func() error { return iterator.Close() }) + var clients []string for ; iterator.Valid(); iterator.Next() { path := string(iterator.Key()) if !strings.Contains(path, host.KeyClientState) { diff --git a/modules/core/02-client/migrations/v7/store_test.go b/modules/core/02-client/migrations/v7/store_test.go index 863a81d37b3..cf8ae206a96 100644 --- a/modules/core/02-client/migrations/v7/store_test.go +++ b/modules/core/02-client/migrations/v7/store_test.go @@ -1,7 +1,7 @@ package v7_test import ( - "strconv" + "fmt" "testing" testifysuite "github.com/stretchr/testify/suite" @@ -29,24 +29,24 @@ type MigrationsV7TestSuite struct { chainB *ibctesting.TestChain } -func (suite *MigrationsV7TestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) -} - func TestIBCTestSuite(t *testing.T) { testifysuite.Run(t, new(MigrationsV7TestSuite)) } +func (s *MigrationsV7TestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + // create multiple solo machine clients, tendermint and localhost clients // ensure that solo machine clients are migrated and their consensus states are removed // ensure the localhost is deleted entirely. -func (suite *MigrationsV7TestSuite) TestMigrateStore() { +func (s *MigrationsV7TestSuite) TestMigrateStore() { paths := []*ibctesting.Path{ - ibctesting.NewPath(suite.chainA, suite.chainB), - ibctesting.NewPath(suite.chainA, suite.chainB), + ibctesting.NewPath(s.chainA, s.chainB), + ibctesting.NewPath(s.chainA, s.chainB), } // create tendermint clients @@ -55,43 +55,43 @@ func (suite *MigrationsV7TestSuite) TestMigrateStore() { } solomachines := []*ibctesting.Solomachine{ - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1), - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-1", "testing", 4), } - suite.createSolomachineClients(solomachines) - suite.createLocalhostClients() + s.createSolomachineClients(solomachines) + s.createLocalhostClients() - err := v7.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - suite.Require().NoError(err) + err := v7.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + s.Require().NoError(err) - suite.assertSolomachineClients(solomachines) - suite.assertNoLocalhostClients() + s.assertSolomachineClients(solomachines) + s.assertNoLocalhostClients() } -func (suite *MigrationsV7TestSuite) TestMigrateStoreNoTendermintClients() { +func (s *MigrationsV7TestSuite) TestMigrateStoreNoTendermintClients() { solomachines := []*ibctesting.Solomachine{ - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1), - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-1", "testing", 4), } - suite.createSolomachineClients(solomachines) - suite.createLocalhostClients() + s.createSolomachineClients(solomachines) + s.createLocalhostClients() - err := v7.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - suite.Require().NoError(err) + err := v7.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + s.Require().NoError(err) - suite.assertSolomachineClients(solomachines) - suite.assertNoLocalhostClients() + s.assertSolomachineClients(solomachines) + s.assertNoLocalhostClients() } -func (suite *MigrationsV7TestSuite) createSolomachineClients(solomachines []*ibctesting.Solomachine) { +func (s *MigrationsV7TestSuite) createSolomachineClients(solomachines []*ibctesting.Solomachine) { // manually generate old protobuf definitions and set in store // NOTE: we cannot use 'CreateClient' and 'UpdateClient' functions since we are // using client states and consensus states which do not implement the exported.ClientState // and exported.ConsensusState interface for _, sm := range solomachines { - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), sm.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), sm.ClientID) clientState := sm.ClientState() // generate old client state proto definition @@ -105,48 +105,47 @@ func (suite *MigrationsV7TestSuite) createSolomachineClients(solomachines []*ibc AllowUpdateAfterProposal: true, } - cdc, ok := suite.chainA.App.AppCodec().(*codec.ProtoCodec) - suite.Require().True(ok) + cdc, ok := s.chainA.App.AppCodec().(*codec.ProtoCodec) + s.Require().True(ok) v7.RegisterInterfaces(cdc.InterfaceRegistry()) bz, err := cdc.MarshalInterface(legacyClientState) - suite.Require().NoError(err) + s.Require().NoError(err) clientStore.Set(host.ClientStateKey(), bz) bz, err = cdc.MarshalInterface(legacyClientState.ConsensusState) - suite.Require().NoError(err) + s.Require().NoError(err) // set some consensus states - for i := uint64(0); i < numCreations; i++ { - height := types.NewHeight(1, i) + for i := range numCreations { + height := types.NewHeight(1, uint64(i)) clientStore.Set(host.ConsensusStateKey(height), bz) } - } } -func (suite *MigrationsV7TestSuite) assertSolomachineClients(solomachines []*ibctesting.Solomachine) { +func (s *MigrationsV7TestSuite) assertSolomachineClients(solomachines []*ibctesting.Solomachine) { // verify client state has been migrated for _, sm := range solomachines { - clientState, ok := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), sm.ClientID) - suite.Require().True(ok) - suite.Require().Equal(sm.ClientState(), clientState) + clientState, ok := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), sm.ClientID) + s.Require().True(ok) + s.Require().Equal(sm.ClientState(), clientState) - for i := uint64(0); i < numCreations; i++ { - height := types.NewHeight(1, i) + for i := range numCreations { + height := types.NewHeight(1, uint64(i)) - consState, ok := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), sm.ClientID, height) - suite.Require().False(ok) - suite.Require().Empty(consState) + consState, ok := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(s.chainA.GetContext(), sm.ClientID, height) + s.Require().False(ok) + s.Require().Empty(consState) } } } // createLocalhostClients clients creates multiple localhost clients and multiple consensus states for each -func (suite *MigrationsV7TestSuite) createLocalhostClients() { - for numClients := uint64(0); numClients < numCreations; numClients++ { - clientID := v7.Localhost + "-" + strconv.FormatUint(numClients, 10) - clientStore := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) +func (s *MigrationsV7TestSuite) createLocalhostClients() { + for numClients := range numCreations { + clientID := fmt.Sprintf("%s-%d", v7.Localhost, numClients) + clientStore := s.chainA.GetSimApp().IBCKeeper.ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) clientStore.Set(host.ClientStateKey(), []byte("clientState")) @@ -157,15 +156,15 @@ func (suite *MigrationsV7TestSuite) createLocalhostClients() { } // assertNoLocalhostClients asserts that all localhost information has been deleted -func (suite *MigrationsV7TestSuite) assertNoLocalhostClients() { - for numClients := uint64(0); numClients < numCreations; numClients++ { - clientID := v7.Localhost + "-" + strconv.FormatUint(numClients, 10) - clientStore := suite.chainA.GetSimApp().IBCKeeper.ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) +func (s *MigrationsV7TestSuite) assertNoLocalhostClients() { + for numClients := range numCreations { + clientID := fmt.Sprintf("%s-%d", v7.Localhost, numClients) + clientStore := s.chainA.GetSimApp().IBCKeeper.ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) - suite.Require().False(clientStore.Has(host.ClientStateKey())) + s.Require().False(clientStore.Has(host.ClientStateKey())) - for i := uint64(0); i < numCreations; i++ { - suite.Require().False(clientStore.Has(host.ConsensusStateKey(types.NewHeight(1, i)))) + for i := range numCreations { + s.Require().False(clientStore.Has(host.ConsensusStateKey(types.NewHeight(1, uint64(i))))) } } } diff --git a/modules/core/02-client/types/client_test.go b/modules/core/02-client/types/client_test.go index 4b187b10ed1..bd3762bd57a 100644 --- a/modules/core/02-client/types/client_test.go +++ b/modules/core/02-client/types/client_test.go @@ -10,7 +10,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() { +func (s *TypesTestSuite) TestMarshalConsensusStateWithHeight() { var cswh types.ConsensusStateWithHeight testCases := []struct { @@ -19,19 +19,19 @@ func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() { }{ { "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 1) cswh = types.NewConsensusStateWithHeight(types.NewHeight(0, soloMachine.Sequence), soloMachine.ConsensusState()) }, }, { "tendermint client", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() latestHeight, ok := path.EndpointA.GetClientLatestHeight().(types.Height) - suite.Require().True(ok) - consensusState, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, latestHeight) - suite.Require().True(ok) + s.Require().True(ok) + consensusState, ok := s.chainA.GetConsensusState(path.EndpointA.ClientID, latestHeight) + s.Require().True(ok) cswh = types.NewConsensusStateWithHeight(latestHeight, consensusState) }, @@ -39,21 +39,21 @@ func (suite *TypesTestSuite) TestMarshalConsensusStateWithHeight() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - cdc := suite.chainA.App.AppCodec() + cdc := s.chainA.App.AppCodec() // marshal message bz, err := cdc.MarshalJSON(&cswh) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal message newCswh := &types.ConsensusStateWithHeight{} err = cdc.UnmarshalJSON(bz, newCswh) - suite.Require().NoError(err) + s.Require().NoError(err) }) } } @@ -74,7 +74,6 @@ func TestValidateClientType(t *testing.T) { } for _, tc := range testCases { - err := types.ValidateClientType(tc.clientType) if tc.expError == nil { diff --git a/modules/core/02-client/types/codec.go b/modules/core/02-client/types/codec.go index 0d44d3b7d1f..f7bbd2a4628 100644 --- a/modules/core/02-client/types/codec.go +++ b/modules/core/02-client/types/codec.go @@ -5,6 +5,8 @@ import ( errorsmod "cosmossdk.io/errors" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/legacy" codectypes "github.com/cosmos/cosmos-sdk/codec/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/msgservice" @@ -14,6 +16,12 @@ import ( "github.com/cosmos/ibc-go/v10/modules/core/exported" ) +// RegisterLegacyAminoCodec registers the necessary interfaces and concrete types +// on the provided LegacyAmino codec. These types are used for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + legacy.RegisterAminoMsg(cdc, &MsgRecoverClient{}, "cosmos-sdk/MsgRecoverClient") +} + // RegisterInterfaces registers the client interfaces to protobuf Any. func RegisterInterfaces(registry codectypes.InterfaceRegistry) { registry.RegisterInterface( @@ -42,7 +50,6 @@ func RegisterInterfaces(registry codectypes.InterfaceRegistry) { &MsgCreateClient{}, &MsgUpdateClient{}, &MsgUpgradeClient{}, - &MsgSubmitMisbehaviour{}, &MsgRecoverClient{}, &MsgIBCSoftwareUpgrade{}, &MsgUpdateParams{}, diff --git a/modules/core/02-client/types/codec_test.go b/modules/core/02-client/types/codec_test.go index 0b2049fbeab..8a09c1f25d9 100644 --- a/modules/core/02-client/types/codec_test.go +++ b/modules/core/02-client/types/codec_test.go @@ -22,7 +22,7 @@ type caseAny struct { expErr error } -func (suite *TypesTestSuite) TestPackClientState() { +func (s *TypesTestSuite) TestPackClientState() { testCases := []struct { name string clientState exported.ClientState @@ -30,12 +30,12 @@ func (suite *TypesTestSuite) TestPackClientState() { }{ { "solo machine client", - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ClientState(), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2).ClientState(), nil, }, { "tendermint client", - ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), nil, }, { @@ -48,12 +48,11 @@ func (suite *TypesTestSuite) TestPackClientState() { testCasesAny := []caseAny{} for _, tc := range testCases { - protoAny, err := types.PackClientState(tc.clientState) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) + s.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, protoAny, tc.expErr}) @@ -62,16 +61,16 @@ func (suite *TypesTestSuite) TestPackClientState() { for i, tc := range testCasesAny { cs, err := types.UnpackClientState(tc.any) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) - suite.Require().Equal(testCases[i].clientState, cs, tc.name) + s.Require().NoError(err, tc.name) + s.Require().Equal(testCases[i].clientState, cs, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } } } -func (suite *TypesTestSuite) TestPackConsensusState() { +func (s *TypesTestSuite) TestPackConsensusState() { testCases := []struct { name string consensusState exported.ConsensusState @@ -79,12 +78,12 @@ func (suite *TypesTestSuite) TestPackConsensusState() { }{ { "solo machine consensus", - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState(), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2).ConsensusState(), nil, }, { "tendermint consensus", - suite.chainA.LatestCommittedHeader.ConsensusState(), + s.chainA.LatestCommittedHeader.ConsensusState(), nil, }, { @@ -97,30 +96,28 @@ func (suite *TypesTestSuite) TestPackConsensusState() { testCasesAny := []caseAny{} for _, tc := range testCases { - protoAny, err := types.PackConsensusState(tc.consensusState) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) + s.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, protoAny, tc.expErr}) } for i, tc := range testCasesAny { - cs, err := types.UnpackConsensusState(tc.any) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) - suite.Require().Equal(testCases[i].consensusState, cs, tc.name) + s.Require().NoError(err, tc.name) + s.Require().Equal(testCases[i].consensusState, cs, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } } } -func (suite *TypesTestSuite) TestPackClientMessage() { +func (s *TypesTestSuite) TestPackClientMessage() { testCases := []struct { name string clientMessage exported.ClientMessage @@ -128,12 +125,12 @@ func (suite *TypesTestSuite) TestPackClientMessage() { }{ { "solo machine header", - ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateHeader("solomachine"), + ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2).CreateHeader("solomachine"), nil, }, { "tendermint header", - suite.chainA.LatestCommittedHeader, + s.chainA.LatestCommittedHeader, nil, }, { @@ -146,31 +143,29 @@ func (suite *TypesTestSuite) TestPackClientMessage() { testCasesAny := []caseAny{} for _, tc := range testCases { - protoAny, err := types.PackClientMessage(tc.clientMessage) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) + s.Require().Error(err, tc.name) } testCasesAny = append(testCasesAny, caseAny{tc.name, protoAny, tc.expErr}) } for i, tc := range testCasesAny { - cs, err := types.UnpackClientMessage(tc.any) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) - suite.Require().Equal(testCases[i].clientMessage, cs, tc.name) + s.Require().NoError(err, tc.name) + s.Require().Equal(testCases[i].clientMessage, cs, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } } } -func (suite *TypesTestSuite) TestCodecTypeRegistration() { +func (s *TypesTestSuite) TestCodecTypeRegistration() { testCases := []struct { name string typeURL string @@ -191,11 +186,6 @@ func (suite *TypesTestSuite) TestCodecTypeRegistration() { sdk.MsgTypeURL(&types.MsgUpgradeClient{}), nil, }, - { - "success: MsgSubmitMisbehaviour", - sdk.MsgTypeURL(&types.MsgSubmitMisbehaviour{}), - nil, - }, { "success: MsgRecoverClient", sdk.MsgTypeURL(&types.MsgRecoverClient{}), @@ -229,16 +219,16 @@ func (suite *TypesTestSuite) TestCodecTypeRegistration() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - msg, err := suite.chainA.GetSimApp().AppCodec().InterfaceRegistry().Resolve(tc.typeURL) + s.Run(tc.name, func() { + msg, err := s.chainA.GetSimApp().AppCodec().InterfaceRegistry().Resolve(tc.typeURL) if tc.expErr == nil { - suite.Require().NotNil(msg) - suite.Require().NoError(err) + s.Require().NotNil(msg) + s.Require().NoError(err) } else { - suite.Require().Nil(msg) - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Nil(msg) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } diff --git a/modules/core/02-client/types/encoding_test.go b/modules/core/02-client/types/encoding_test.go index dc8319813ab..36e087ba958 100644 --- a/modules/core/02-client/types/encoding_test.go +++ b/modules/core/02-client/types/encoding_test.go @@ -5,24 +5,24 @@ import ( ibctm "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" ) -func (suite *TypesTestSuite) TestMarshalHeader() { - cdc := suite.chainA.App.AppCodec() +func (s *TypesTestSuite) TestMarshalHeader() { + cdc := s.chainA.App.AppCodec() h := &ibctm.Header{ TrustedHeight: types.NewHeight(4, 100), } // marshal header bz, err := types.MarshalClientMessage(cdc, h) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal header newHeader, err := types.UnmarshalClientMessage(cdc, bz) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Equal(h, newHeader) + s.Require().Equal(h, newHeader) // use invalid bytes invalidHeader, err := types.UnmarshalClientMessage(cdc, []byte("invalid bytes")) - suite.Require().Error(err) - suite.Require().Nil(invalidHeader) + s.Require().Error(err) + s.Require().Nil(invalidHeader) } diff --git a/modules/core/02-client/types/expected_keepers.go b/modules/core/02-client/types/expected_keepers.go index 914dfd080f8..3da0f3081a7 100644 --- a/modules/core/02-client/types/expected_keepers.go +++ b/modules/core/02-client/types/expected_keepers.go @@ -4,9 +4,6 @@ import ( "context" upgradetypes "cosmossdk.io/x/upgrade/types" - - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) // UpgradeKeeper expected upgrade keeper @@ -18,8 +15,3 @@ type UpgradeKeeper interface { SetUpgradedConsensusState(ctx context.Context, planHeight int64, bz []byte) error ScheduleUpgrade(ctx context.Context, plan upgradetypes.Plan) error } - -// ParamSubspace defines the expected Subspace interface for module parameters. -type ParamSubspace interface { - GetParamSet(ctx sdk.Context, ps paramtypes.ParamSet) -} diff --git a/modules/core/02-client/types/genesis.go b/modules/core/02-client/types/genesis.go index f3b4a2d992f..5837157d281 100644 --- a/modules/core/02-client/types/genesis.go +++ b/modules/core/02-client/types/genesis.go @@ -179,7 +179,6 @@ func (gs GenesisState) Validate() error { if clientType != cs.ClientType() { return fmt.Errorf("consensus state client type %s does not equal client state client type %s", cs.ClientType(), clientType) } - } } @@ -195,7 +194,6 @@ func (gs GenesisState) Validate() error { return fmt.Errorf("invalid client metadata %v clientID %s index %d: %w", gm, clientMetadata.ClientId, i, err) } } - } if maxSequence != 0 && maxSequence >= gs.NextClientSequence { diff --git a/modules/core/02-client/types/genesis_test.go b/modules/core/02-client/types/genesis_test.go index f49dd0c0f13..5b11bf165b2 100644 --- a/modules/core/02-client/types/genesis_test.go +++ b/modules/core/02-client/types/genesis_test.go @@ -26,28 +26,28 @@ const ( var clientHeight = types.NewHeight(1, 10) -func (suite *TypesTestSuite) TestMarshalGenesisState() { - cdc := suite.chainA.App.AppCodec() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *TypesTestSuite) TestMarshalGenesisState() { + cdc := s.chainA.App.AppCodec() + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - genesis := client.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + genesis := client.ExportGenesis(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) bz, err := cdc.MarshalJSON(&genesis) - suite.Require().NoError(err) - suite.Require().NotNil(bz) + s.Require().NoError(err) + s.Require().NotNil(bz) var gs types.GenesisState err = cdc.UnmarshalJSON(bz, &gs) - suite.Require().NoError(err) + s.Require().NoError(err) } -func (suite *TypesTestSuite) TestValidateGenesis() { +func (s *TypesTestSuite) TestValidateGenesis() { privVal := cmttypes.NewMockPV() pubKey, err := privVal.GetPubKey() - suite.Require().NoError(err) + s.Require().NoError(err) now := time.Now().UTC() @@ -58,7 +58,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { signers[val.Address.String()] = privVal heightMinus1 := types.NewHeight(1, height-1) - header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(clientHeight.RevisionHeight), heightMinus1, now, valSet, valSet, valSet, signers) + header := s.chainA.CreateTMClientHeader(s.chainA.ChainID, int64(clientHeight.RevisionHeight), heightMinus1, now, valSet, valSet, valSet, signers) testCases := []struct { name string @@ -75,7 +75,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -111,9 +111,9 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - ibctesting.DefaultSolomachineClientID, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + ibctesting.DefaultSolomachineClientID, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), - types.NewIdentifiedClientState(tmClientID0, solomachine.NewClientState(0, &solomachine.ConsensusState{PublicKey: suite.solomachine.ConsensusState().PublicKey, Diversifier: suite.solomachine.Diversifier, Timestamp: suite.solomachine.Time})), + types.NewIdentifiedClientState(tmClientID0, solomachine.NewClientState(0, &solomachine.ConsensusState{PublicKey: s.solomachine.ConsensusState().PublicKey, Diversifier: s.solomachine.Diversifier, Timestamp: s.solomachine.Time})), }, nil, nil, @@ -128,7 +128,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - invalidClientID, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + invalidClientID, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -156,7 +156,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -184,7 +184,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -212,7 +212,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -240,7 +240,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -268,7 +268,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - clientID, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + clientID, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -304,7 +304,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - clientID, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + clientID, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -340,7 +340,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -368,7 +368,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -396,10 +396,10 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - tmClientID0, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID0, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), types.NewIdentifiedClientState( - tmClientID1, ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + tmClientID1, ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -427,7 +427,7 @@ func (suite *TypesTestSuite) TestValidateGenesis() { genState: types.NewGenesisState( []types.IdentifiedClientState{ types.NewIdentifiedClientState( - "my-client", ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + "my-client", ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []types.ClientConsensusStates{ @@ -477,12 +477,11 @@ func (suite *TypesTestSuite) TestValidateGenesis() { } for _, tc := range testCases { - err := tc.genState.Validate() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) } } } diff --git a/modules/core/02-client/types/height.go b/modules/core/02-client/types/height.go index f08d9d9f873..d8e2a52101e 100644 --- a/modules/core/02-client/types/height.go +++ b/modules/core/02-client/types/height.go @@ -103,7 +103,7 @@ func (h Height) String() string { // Decrement will return a new height with the RevisionHeight decremented // If the RevisionHeight is already at lowest value (1), then false success flag is returned -func (h Height) Decrement() (decremented exported.Height, success bool) { +func (h Height) Decrement() (exported.Height, bool) { if h.RevisionHeight == 0 { return Height{}, false } diff --git a/modules/core/02-client/types/height_test.go b/modules/core/02-client/types/height_test.go index ee16fd05bd2..3f33422f29b 100644 --- a/modules/core/02-client/types/height_test.go +++ b/modules/core/02-client/types/height_test.go @@ -35,14 +35,16 @@ func TestCompareHeights(t *testing.T) { switch tc.compareSign { case -1: - require.True(t, compare == -1, "case %d: %s should return negative value on comparison, got: %d", + require.Equal(t, tc.compareSign, compare, "case %d: %s should return negative value on comparison, got: %d", i, tc.name, compare) case 0: - require.True(t, compare == 0, "case %d: %s should return zero on comparison, got: %d", + require.Equal(t, tc.compareSign, compare, "case %d: %s should return zero on comparison, got: %d", i, tc.name, compare) case 1: - require.True(t, compare == 1, "case %d: %s should return positive value on comparison, got: %d", + require.Equal(t, tc.compareSign, compare, "case %d: %s should return positive value on comparison, got: %d", i, tc.name, compare) + default: + t.Fatalf("unexpected compareSign: %d", tc.compareSign) } }) } @@ -85,16 +87,16 @@ func TestString(t *testing.T) { require.Equal(t, types.NewHeight(3, 10), parse, "parse height returns wrong height") } -func (suite *TypesTestSuite) TestMustParseHeight() { - suite.Require().Panics(func() { +func (s *TypesTestSuite) TestMustParseHeight() { + s.Require().Panics(func() { types.MustParseHeight("height") }) - suite.Require().NotPanics(func() { + s.Require().NotPanics(func() { types.MustParseHeight("111-1") }) - suite.Require().NotPanics(func() { + s.Require().NotPanics(func() { types.MustParseHeight("0-0") }) } @@ -134,7 +136,7 @@ func TestSetRevisionNumber(t *testing.T) { // Test SetRevisionNumber chainID, err := types.SetRevisionNumber("gaiamainnet", 3) require.Error(t, err, "invalid revision format passed SetRevisionNumber") - require.Equal(t, "", chainID, "invalid revision format returned non-empty string on SetRevisionNumber") + require.Empty(t, chainID, "invalid revision format returned non-empty string on SetRevisionNumber") chainID = "gaiamainnet-3" chainID, err = types.SetRevisionNumber(chainID, 4) @@ -142,18 +144,18 @@ func TestSetRevisionNumber(t *testing.T) { require.Equal(t, "gaiamainnet-4", chainID, "valid revision format returned incorrect string on SetRevisionNumber") } -func (suite *TypesTestSuite) TestSelfHeight() { - ctx := suite.chainA.GetContext() +func (s *TypesTestSuite) TestSelfHeight() { + ctx := s.chainA.GetContext() // Test default revision ctx = ctx.WithChainID("gaiamainnet") ctx = ctx.WithBlockHeight(10) height := types.GetSelfHeight(ctx) - suite.Require().Equal(types.NewHeight(0, 10), height, "default self height failed") + s.Require().Equal(types.NewHeight(0, 10), height, "default self height failed") // Test successful revision format ctx = ctx.WithChainID("gaiamainnet-3") ctx = ctx.WithBlockHeight(18) height = types.GetSelfHeight(ctx) - suite.Require().Equal(types.NewHeight(3, 18), height, "valid self height failed") + s.Require().Equal(types.NewHeight(3, 18), height, "valid self height failed") } diff --git a/modules/core/02-client/types/keys.go b/modules/core/02-client/types/keys.go index 7aa99f4b9e1..b06bfb9c8eb 100644 --- a/modules/core/02-client/types/keys.go +++ b/modules/core/02-client/types/keys.go @@ -37,6 +37,8 @@ const ( AllowAllClients = "*" ) +var KeyAllowedClients = []byte("AllowedClients") + // FormatClientIdentifier returns the client identifier with the sequence appended. // This is an SDK specific format not enforced by IBC protocol. func FormatClientIdentifier(clientType string, sequence uint64) string { diff --git a/modules/core/02-client/types/keys_test.go b/modules/core/02-client/types/keys_test.go index af9b63bd254..4a051c69d8a 100644 --- a/modules/core/02-client/types/keys_test.go +++ b/modules/core/02-client/types/keys_test.go @@ -59,7 +59,7 @@ func TestParseClientIdentifier(t *testing.T) { } else { require.Error(t, err, tc.name, tc.clientID) require.False(t, valid) - require.Equal(t, "", clientType) + require.Empty(t, clientType) require.ErrorContains(t, err, tc.expErr.Error()) } }) diff --git a/modules/core/02-client/types/legacy_proposal_test.go b/modules/core/02-client/types/legacy_proposal_test.go index b4e4e9ef4e0..d3d63a77a58 100644 --- a/modules/core/02-client/types/legacy_proposal_test.go +++ b/modules/core/02-client/types/legacy_proposal_test.go @@ -11,12 +11,12 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TypesTestSuite) TestValidateBasic() { - subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *TypesTestSuite) TestValidateBasic() { + subjectPath := ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() subject := subjectPath.EndpointA.ClientID - substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath := ibctesting.NewPath(s.chainA, s.chainB) substitutePath.SetupClients() substitute := substitutePath.EndpointA.ClientID @@ -53,20 +53,20 @@ func (suite *TypesTestSuite) TestValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.proposal.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expErr, tc.name) + s.Require().ErrorIs(err, tc.expErr, tc.name) } }) } } // tests a client update proposal can be marshaled and unmarshaled -func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() { +func (s *TypesTestSuite) TestMarshalClientUpdateProposalProposal() { // create proposal proposal := types.NewClientUpdateProposal("update IBC client", "description", "subject", "substitute") @@ -78,12 +78,12 @@ func (suite *TypesTestSuite) TestMarshalClientUpdateProposalProposal() { // marshal message content, ok := proposal.(*types.ClientUpdateProposal) - suite.Require().True(ok) + s.Require().True(ok) bz, err := cdc.MarshalJSON(content) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal proposal newProposal := &types.ClientUpdateProposal{} err = cdc.UnmarshalJSON(bz, newProposal) - suite.Require().NoError(err) + s.Require().NoError(err) } diff --git a/modules/core/02-client/types/msgs.go b/modules/core/02-client/types/msgs.go index a3fae97d868..bdbfe604554 100644 --- a/modules/core/02-client/types/msgs.go +++ b/modules/core/02-client/types/msgs.go @@ -15,7 +15,6 @@ import ( var ( _ sdk.Msg = (*MsgCreateClient)(nil) _ sdk.Msg = (*MsgUpdateClient)(nil) - _ sdk.Msg = (*MsgSubmitMisbehaviour)(nil) _ sdk.Msg = (*MsgUpgradeClient)(nil) _ sdk.Msg = (*MsgUpdateParams)(nil) _ sdk.Msg = (*MsgIBCSoftwareUpgrade)(nil) @@ -24,7 +23,6 @@ var ( _ sdk.HasValidateBasic = (*MsgCreateClient)(nil) _ sdk.HasValidateBasic = (*MsgUpdateClient)(nil) - _ sdk.HasValidateBasic = (*MsgSubmitMisbehaviour)(nil) _ sdk.HasValidateBasic = (*MsgUpgradeClient)(nil) _ sdk.HasValidateBasic = (*MsgUpdateParams)(nil) _ sdk.HasValidateBasic = (*MsgIBCSoftwareUpgrade)(nil) @@ -33,7 +31,6 @@ var ( _ codectypes.UnpackInterfacesMessage = (*MsgCreateClient)(nil) _ codectypes.UnpackInterfacesMessage = (*MsgUpdateClient)(nil) - _ codectypes.UnpackInterfacesMessage = (*MsgSubmitMisbehaviour)(nil) _ codectypes.UnpackInterfacesMessage = (*MsgUpgradeClient)(nil) _ codectypes.UnpackInterfacesMessage = (*MsgIBCSoftwareUpgrade)(nil) ) @@ -200,43 +197,6 @@ func (msg MsgUpgradeClient) UnpackInterfaces(unpacker codectypes.AnyUnpacker) er return unpacker.UnpackAny(msg.ConsensusState, &consState) } -// NewMsgSubmitMisbehaviour creates a new MsgSubmitMisbehaviour instance. -func NewMsgSubmitMisbehaviour(clientID string, misbehaviour exported.ClientMessage, signer string) (*MsgSubmitMisbehaviour, error) { - anyMisbehaviour, err := PackClientMessage(misbehaviour) - if err != nil { - return nil, err - } - - return &MsgSubmitMisbehaviour{ - ClientId: clientID, - Misbehaviour: anyMisbehaviour, - Signer: signer, - }, nil -} - -// ValidateBasic performs basic (non-state-dependant) validation on a MsgSubmitMisbehaviour. -func (msg MsgSubmitMisbehaviour) ValidateBasic() error { - _, err := sdk.AccAddressFromBech32(msg.Signer) - if err != nil { - return errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) - } - misbehaviour, err := UnpackClientMessage(msg.Misbehaviour) - if err != nil { - return err - } - if err := misbehaviour.ValidateBasic(); err != nil { - return err - } - - return host.ClientIdentifierValidator(msg.ClientId) -} - -// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces -func (msg MsgSubmitMisbehaviour) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - var misbehaviour exported.ClientMessage - return unpacker.UnpackAny(msg.Misbehaviour, &misbehaviour) -} - // NewMsgRecoverClient creates a new MsgRecoverClient instance func NewMsgRecoverClient(signer, subjectClientID, substituteClientID string) *MsgRecoverClient { return &MsgRecoverClient{ diff --git a/modules/core/02-client/types/msgs_test.go b/modules/core/02-client/types/msgs_test.go index 0c5ff90ca37..bfa9414001d 100644 --- a/modules/core/02-client/types/msgs_test.go +++ b/modules/core/02-client/types/msgs_test.go @@ -3,7 +3,6 @@ package types_test import ( "errors" "testing" - "time" "github.com/golang/protobuf/proto" //nolint:staticcheck "github.com/stretchr/testify/require" @@ -38,20 +37,20 @@ type TypesTestSuite struct { solomachine *ibctesting.Solomachine } -func (suite *TypesTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1) -} - func TestTypesTestSuite(t *testing.T) { testifysuite.Run(t, new(TypesTestSuite)) } +func (s *TypesTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.solomachine = ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachinesingle", "testing", 1) +} + // tests that different clients within MsgCreateClient can be marshaled // and unmarshaled. -func (suite *TypesTestSuite) TestMarshalMsgCreateClient() { +func (s *TypesTestSuite) TestMarshalMsgCreateClient() { var ( msg *types.MsgCreateClient err error @@ -63,43 +62,43 @@ func (suite *TypesTestSuite) TestMarshalMsgCreateClient() { }{ { "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, }, { "tendermint client", func() { - tendermintClient := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) - msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + tendermintClient := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + msg, err = types.NewMsgCreateClient(tendermintClient, s.chainA.CurrentTMClientHeader().ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - cdc := suite.chainA.App.AppCodec() + cdc := s.chainA.App.AppCodec() // marshal message bz, err := cdc.MarshalJSON(msg) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal message newMsg := &types.MsgCreateClient{} err = cdc.UnmarshalJSON(bz, newMsg) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(proto.Equal(msg, newMsg)) + s.Require().True(proto.Equal(msg, newMsg)) }) } } -func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { +func (s *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { var ( msg = &types.MsgCreateClient{} err error @@ -113,17 +112,17 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { { "valid - tendermint client", func() { - tendermintClient := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) - msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + tendermintClient := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + msg, err = types.NewMsgCreateClient(tendermintClient, s.chainA.CurrentTMClientHeader().ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, nil, }, { "invalid tendermint client", func() { - msg, err = types.NewMsgCreateClient(&ibctm.ClientState{}, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = types.NewMsgCreateClient(&ibctm.ClientState{}, s.chainA.CurrentTMClientHeader().ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, errorsmod.Wrap(ibctm.ErrInvalidChainID, "chain id cannot be empty string"), }, @@ -137,9 +136,9 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { { "failed to unpack consensus state", func() { - tendermintClient := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) - msg, err = types.NewMsgCreateClient(tendermintClient, suite.chainA.CurrentTMClientHeader().ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + tendermintClient := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + msg, err = types.NewMsgCreateClient(tendermintClient, s.chainA.CurrentTMClientHeader().ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) msg.ConsensusState = nil }, errorsmod.Wrap(ibcerrors.ErrUnpackAny, "protobuf Any message cannot be nil"), @@ -154,37 +153,37 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { { "valid - solomachine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), soloMachine.ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, nil, }, { "invalid solomachine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgCreateClient(&solomachine.ClientState{}, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(&solomachine.ClientState{}, soloMachine.ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrInvalidClient, "sequence cannot be 0"), }, { "invalid solomachine consensus state", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), &solomachine.ConsensusState{}, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(soloMachine.ClientState(), &solomachine.ConsensusState{}, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrInvalidConsensus, "timestamp cannot be 0"), }, { "invalid - client state and consensus state client types do not match", func() { - tendermintClient := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgCreateClient(tendermintClient, soloMachine.ConsensusState(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + tendermintClient := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgCreateClient(tendermintClient, soloMachine.ConsensusState(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrInvalidClientType, "client type for client state and consensus state do not match"), }, @@ -194,17 +193,17 @@ func (suite *TypesTestSuite) TestMsgCreateClient_ValidateBasic() { tc.malleate() err = msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } } } // tests that different header within MsgUpdateClient can be marshaled // and unmarshaled. -func (suite *TypesTestSuite) TestMarshalMsgUpdateClient() { +func (s *TypesTestSuite) TestMarshalMsgUpdateClient() { var ( msg *types.MsgUpdateClient err error @@ -216,42 +215,42 @@ func (suite *TypesTestSuite) TestMarshalMsgUpdateClient() { }{ { "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(soloMachine.Diversifier), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(soloMachine.Diversifier), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, }, { "tendermint client", func() { - msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = types.NewMsgUpdateClient("tendermint", s.chainA.CurrentTMClientHeader(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - cdc := suite.chainA.App.AppCodec() + cdc := s.chainA.App.AppCodec() // marshal message bz, err := cdc.MarshalJSON(msg) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal message newMsg := &types.MsgUpdateClient{} err = cdc.UnmarshalJSON(bz, newMsg) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(proto.Equal(msg, newMsg)) + s.Require().True(proto.Equal(msg, newMsg)) }) } } -func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { +func (s *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { var ( msg = &types.MsgUpdateClient{} err error @@ -272,16 +271,16 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { { "valid - tendermint header", func() { - msg, err = types.NewMsgUpdateClient("tendermint", suite.chainA.CurrentTMClientHeader(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = types.NewMsgUpdateClient("tendermint", s.chainA.CurrentTMClientHeader(), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, nil, }, { "invalid tendermint header", func() { - msg, err = types.NewMsgUpdateClient("tendermint", &ibctm.Header{}, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = types.NewMsgUpdateClient("tendermint", &ibctm.Header{}, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrInvalidHeader, "tendermint signed header cannot be nil"), }, @@ -302,18 +301,18 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { { "valid - solomachine header", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(soloMachine.Diversifier), suite.chainA.SenderAccount.GetAddress().String()) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) + msg, err = types.NewMsgUpdateClient(soloMachine.ClientID, soloMachine.CreateHeader(soloMachine.Diversifier), s.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, }, { "invalid solomachine header", func() { - msg, err = types.NewMsgUpdateClient("solomachine", &solomachine.Header{}, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = types.NewMsgUpdateClient("solomachine", &solomachine.Header{}, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrInvalidHeader, "timestamp cannot be zero"), }, @@ -323,15 +322,15 @@ func (suite *TypesTestSuite) TestMsgUpdateClient_ValidateBasic() { tc.malleate() err = msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } } } -func (suite *TypesTestSuite) TestMarshalMsgUpgradeClient() { +func (s *TypesTestSuite) TestMarshalMsgUpgradeClient() { var ( msg *types.MsgUpgradeClient err error @@ -344,43 +343,43 @@ func (suite *TypesTestSuite) TestMarshalMsgUpgradeClient() { { "client upgrades to new tendermint client", func() { - tendermintClient := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + tendermintClient := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) tendermintConsState := &ibctm.ConsensusState{NextValidatorsHash: []byte("nextValsHash")} - msg, err = types.NewMsgUpgradeClient("clientid", tendermintClient, tendermintConsState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = types.NewMsgUpgradeClient("clientid", tendermintClient, tendermintConsState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, }, { "client upgrades to new solomachine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 1) - msg, err = types.NewMsgUpgradeClient("clientid", soloMachine.ClientState(), soloMachine.ConsensusState(), []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 1) + msg, err = types.NewMsgUpgradeClient("clientid", soloMachine.ClientState(), soloMachine.ConsensusState(), []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - cdc := suite.chainA.App.AppCodec() + cdc := s.chainA.App.AppCodec() // marshal message bz, err := cdc.MarshalJSON(msg) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal message newMsg := &types.MsgUpgradeClient{} err = cdc.UnmarshalJSON(bz, newMsg) - suite.Require().NoError(err) + s.Require().NoError(err) }) } } -func (suite *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() { +func (s *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() { cases := []struct { name string malleate func(*types.MsgUpgradeClient) @@ -422,9 +421,9 @@ func (suite *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() { { name: "client and consensus type does not match", malleate: func(msg *types.MsgUpgradeClient) { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) + soloMachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2) soloConsensus, err := types.PackConsensusState(soloMachine.ConsensusState()) - suite.Require().NoError(err) + s.Require().NoError(err) msg.ConsensusState = soloConsensus }, expErr: errorsmod.Wrap(types.ErrInvalidUpgradeClient, "consensus state's client-type does not match client. expected: 07-tendermint, got: 06-solomachine"), @@ -453,174 +452,24 @@ func (suite *TypesTestSuite) TestMsgUpgradeClient_ValidateBasic() { } for _, tc := range cases { - - clientState := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + clientState := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) consState := &ibctm.ConsensusState{NextValidatorsHash: []byte("nextValsHash")} - msg, err := types.NewMsgUpgradeClient("testclientid", clientState, consState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err := types.NewMsgUpgradeClient("testclientid", clientState, consState, []byte("proofUpgradeClient"), []byte("proofUpgradeConsState"), s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) tc.malleate(msg) err = msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err, "valid case %s failed", tc.name) + s.Require().NoError(err, "valid case %s failed", tc.name) } else { - suite.Require().Error(err, "invalid case %s passed", tc.name) - suite.Require().ErrorIs(err, tc.expErr) - } - } -} - -// tests that different misbehaviours within MsgSubmitMisbehaviour can be marshaled -// and unmarshaled. -func (suite *TypesTestSuite) TestMarshalMsgSubmitMisbehaviour() { - var ( - msg *types.MsgSubmitMisbehaviour - err error - ) - - testCases := []struct { - name string - malleate func() - }{ - { - "solo machine client", func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - }, - { - "tendermint client", func() { - height := types.NewHeight(0, uint64(suite.chainA.ProposedHeader.Height)) - heightMinus1 := types.NewHeight(0, uint64(suite.chainA.ProposedHeader.Height)-1) - header1 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.ProposedHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) - header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.ProposedHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) - - misbehaviour := ibctm.NewMisbehaviour("tendermint", header1, header2) - msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - }, - } - - for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - - tc.malleate() - - cdc := suite.chainA.App.AppCodec() - - // marshal message - bz, err := cdc.MarshalJSON(msg) - suite.Require().NoError(err) - - // unmarshal message - newMsg := &types.MsgSubmitMisbehaviour{} - err = cdc.UnmarshalJSON(bz, newMsg) - suite.Require().NoError(err) - - suite.Require().True(proto.Equal(msg, newMsg)) - }) - } -} - -func (suite *TypesTestSuite) TestMsgSubmitMisbehaviour_ValidateBasic() { - var ( - msg = &types.MsgSubmitMisbehaviour{} - err error - ) - - cases := []struct { - name string - malleate func() - expErr error - }{ - { - "invalid client-id", - func() { - msg.ClientId = "" - }, - errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: empty address string is not allowed"), - }, - { - "valid - tendermint misbehaviour", - func() { - height := types.NewHeight(0, uint64(suite.chainA.ProposedHeader.Height)) - heightMinus1 := types.NewHeight(0, uint64(suite.chainA.ProposedHeader.Height)-1) - header1 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.ProposedHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) - header2 := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, int64(height.RevisionHeight), heightMinus1, suite.chainA.ProposedHeader.Time.Add(time.Minute), suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) - - misbehaviour := ibctm.NewMisbehaviour("tendermint", header1, header2) - msg, err = types.NewMsgSubmitMisbehaviour("tendermint", misbehaviour, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - nil, - }, - { - "invalid tendermint misbehaviour", - func() { - msg, err = types.NewMsgSubmitMisbehaviour("tendermint", &ibctm.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - errorsmod.Wrap(ibctm.ErrInvalidHeader, "misbehaviour Header1 cannot be nil"), - }, - { - "failed to unpack misbehaviourt", - func() { - msg.Misbehaviour = nil - }, - errorsmod.Wrap(ibcerrors.ErrUnpackAny, "protobuf Any message cannot be nil"), - }, - { - "invalid signer", - func() { - msg.Signer = "" - }, - errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: empty address string is not allowed"), - }, - { - "valid - solomachine misbehaviour", - func() { - soloMachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2) - msg, err = types.NewMsgSubmitMisbehaviour(soloMachine.ClientID, soloMachine.CreateMisbehaviour(), suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - nil, - }, - { - "invalid solomachine misbehaviour", - func() { - msg, err = types.NewMsgSubmitMisbehaviour("solomachine", &solomachine.Misbehaviour{}, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - errorsmod.Wrapf(types.ErrInvalidMisbehaviour, "sequence cannot be 0"), - }, - { - "client-id too short", - func() { - soloMachineMisbehaviour := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).CreateMisbehaviour() - msg, err = types.NewMsgSubmitMisbehaviour("ext", soloMachineMisbehaviour, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) - }, - errorsmod.Wrapf(host.ErrInvalidID, "identifier external has invalid length: 3, must be between 4-64 characters"), - }, - } - - for _, tc := range cases { - tc.malleate() - err = msg.ValidateBasic() - if tc.expErr == nil { - suite.Require().NoError(err, tc.name) - } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, "invalid case %s passed", tc.name) + s.Require().ErrorIs(err, tc.expErr) } } } // TestMsgRecoverClientValidateBasic tests ValidateBasic for MsgRecoverClient -func (suite *TypesTestSuite) TestMsgRecoverClientValidateBasic() { +func (s *TypesTestSuite) TestMsgRecoverClientValidateBasic() { var msg *types.MsgRecoverClient testCases := []struct { @@ -674,9 +523,9 @@ func (suite *TypesTestSuite) TestMsgRecoverClientValidateBasic() { err := msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err, "valid case %s failed", tc.name) + s.Require().NoError(err, "valid case %s failed", tc.name) } else { - suite.Require().ErrorIs(err, tc.expError, "invalid case %s passed", tc.name) + s.Require().ErrorIs(err, tc.expError, "invalid case %s passed", tc.name) } } } @@ -710,7 +559,7 @@ func TestMsgRecoverClientGetSigners(t *testing.T) { } // TestMsgIBCSoftwareUpgrade_NewMsgIBCSoftwareUpgrade tests NewMsgIBCSoftwareUpgrade -func (suite *TypesTestSuite) TestMsgIBCSoftwareUpgrade_NewMsgIBCSoftwareUpgrade() { +func (s *TypesTestSuite) TestMsgIBCSoftwareUpgrade_NewMsgIBCSoftwareUpgrade() { testCases := []struct { name string upgradedClientState exported.ClientState @@ -718,7 +567,7 @@ func (suite *TypesTestSuite) TestMsgIBCSoftwareUpgrade_NewMsgIBCSoftwareUpgrade( }{ { "success", - ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), nil, }, { @@ -740,15 +589,15 @@ func (suite *TypesTestSuite) TestMsgIBCSoftwareUpgrade_NewMsgIBCSoftwareUpgrade( ) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Assert().Equal(ibctesting.TestAccAddress, msg.Signer) - suite.Assert().Equal(plan, msg.Plan) + s.Require().NoError(err) + s.Equal(ibctesting.TestAccAddress, msg.Signer) + s.Equal(plan, msg.Plan) unpackedClientState, err := types.UnpackClientState(msg.UpgradedClientState) - suite.Require().NoError(err) - suite.Assert().Equal(tc.upgradedClientState, unpackedClientState) + s.Require().NoError(err) + s.Equal(tc.upgradedClientState, unpackedClientState) } else { - suite.Require().True(errors.Is(err, ibcerrors.ErrPackAny)) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, ibcerrors.ErrPackAny) + s.Require().ErrorIs(err, tc.expErr) } } } @@ -798,7 +647,7 @@ func TestMsgIBCSoftwareUpgrade_GetSigners(t *testing.T) { } // TestMsgIBCSoftwareUpgrade_ValidateBasic tests ValidateBasic for MsgIBCSoftwareUpgrade -func (suite *TypesTestSuite) TestMsgIBCSoftwareUpgrade_ValidateBasic() { +func (s *TypesTestSuite) TestMsgIBCSoftwareUpgrade_ValidateBasic() { var ( signer string plan upgradetypes.Plan @@ -843,10 +692,10 @@ func (suite *TypesTestSuite) TestMsgIBCSoftwareUpgrade_ValidateBasic() { Name: "upgrade IBC clients", Height: 1000, } - upgradedClientState := ibctm.NewClientState(suite.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) + upgradedClientState := ibctm.NewClientState(s.chainA.ChainID, ibctesting.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath) var err error anyClient, err = types.PackClientState(upgradedClientState) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() @@ -859,18 +708,18 @@ func (suite *TypesTestSuite) TestMsgIBCSoftwareUpgrade_ValidateBasic() { err = msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } if tc.expError != nil { - suite.Require().True(errors.Is(err, tc.expError)) + s.Require().ErrorIs(err, tc.expError) } } } // tests a MsgIBCSoftwareUpgrade can be marshaled and unmarshaled, and the // client state can be unpacked -func (suite *TypesTestSuite) TestMarshalMsgIBCSoftwareUpgrade() { - cdc := suite.chainA.App.AppCodec() +func (s *TypesTestSuite) TestMarshalMsgIBCSoftwareUpgrade() { + cdc := s.chainA.App.AppCodec() // create proposal plan := upgradetypes.Plan{ @@ -879,25 +728,25 @@ func (suite *TypesTestSuite) TestMarshalMsgIBCSoftwareUpgrade() { } msg, err := types.NewMsgIBCSoftwareUpgrade(ibctesting.TestAccAddress, plan, &ibctm.ClientState{}) - suite.Require().NoError(err) + s.Require().NoError(err) // marshal message bz, err := cdc.MarshalJSON(msg) - suite.Require().NoError(err) + s.Require().NoError(err) // unmarshal proposal newMsg := &types.MsgIBCSoftwareUpgrade{} err = cdc.UnmarshalJSON(bz, newMsg) - suite.Require().NoError(err) + s.Require().NoError(err) // unpack client state _, err = types.UnpackClientState(newMsg.UpgradedClientState) - suite.Require().NoError(err) + s.Require().NoError(err) } // TestMsgUpdateParamsValidateBasic tests ValidateBasic for MsgUpdateParams -func (suite *TypesTestSuite) TestMsgUpdateParamsValidateBasic() { - signer := suite.chainA.App.GetIBCKeeper().GetAuthority() +func (s *TypesTestSuite) TestMsgUpdateParamsValidateBasic() { + signer := s.chainA.App.GetIBCKeeper().GetAuthority() testCases := []struct { name string msg *types.MsgUpdateParams @@ -926,13 +775,13 @@ func (suite *TypesTestSuite) TestMsgUpdateParamsValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err, "valid case %s failed", tc.name) + s.Require().NoError(err, "valid case %s failed", tc.name) } else { - suite.Require().Error(err, "invalid case %s passed", tc.name) - suite.Require().Equal(tc.expErr.Error(), err.Error()) + s.Require().Error(err, "invalid case %s passed", tc.name) + s.Require().Equal(tc.expErr.Error(), err.Error()) } }) } @@ -950,7 +799,6 @@ func TestMsgUpdateParamsGetSigners(t *testing.T) { } for _, tc := range testCases { - msg := types.MsgUpdateParams{ Signer: tc.address.String(), Params: types.DefaultParams(), diff --git a/modules/core/02-client/types/params_legacy.go b/modules/core/02-client/types/params_legacy.go deleted file mode 100644 index 0f699bf640b..00000000000 --- a/modules/core/02-client/types/params_legacy.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -NOTE: Usage of x/params to manage parameters is deprecated in favor of x/gov -controlled execution of MsgUpdateParams messages. These types remains solely -for migration purposes and will be removed in a future release. -[#3621](https://github.com/cosmos/ibc-go/issues/3621) -*/ -package types - -import ( - "fmt" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" -) - -// KeyAllowedClients is store's key for AllowedClients Params -var KeyAllowedClients = []byte("AllowedClients") - -// ParamKeyTable type declaration for parameters -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -// ParamSetPairs implements params.ParamSet -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair(KeyAllowedClients, &p.AllowedClients, validateClientsLegacy), - } -} - -func validateClientsLegacy(i any) error { - clients, ok := i.([]string) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - return validateClients(clients) -} diff --git a/modules/core/02-client/types/params_test.go b/modules/core/02-client/types/params_test.go index 79bf5f3a2a6..344c737d198 100644 --- a/modules/core/02-client/types/params_test.go +++ b/modules/core/02-client/types/params_test.go @@ -1,4 +1,4 @@ -package types +package types_test import ( "errors" @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" "github.com/cosmos/ibc-go/v10/modules/core/exported" ) @@ -13,15 +14,15 @@ func TestIsAllowedClient(t *testing.T) { testCases := []struct { name string clientType string - params Params + params types.Params expPass bool }{ - {"success: valid client", exported.Tendermint, DefaultParams(), true}, - {"success: valid client with custom params", exported.Tendermint, NewParams(exported.Tendermint), true}, - {"success: invalid blank client", " ", DefaultParams(), false}, - {"success: invalid client with custom params", exported.Localhost, NewParams(exported.Tendermint), false}, - {"success: wildcard allow all clients", "test-client-type", NewParams(AllowAllClients), true}, - {"success: wildcard allow all clients with blank client", " ", NewParams(AllowAllClients), false}, + {"success: valid client", exported.Tendermint, types.DefaultParams(), true}, + {"success: valid client with custom params", exported.Tendermint, types.NewParams(exported.Tendermint), true}, + {"success: invalid blank client", " ", types.DefaultParams(), false}, + {"success: invalid client with custom params", exported.Localhost, types.NewParams(exported.Tendermint), false}, + {"success: wildcard allow all clients", "test-client-type", types.NewParams(types.AllowAllClients), true}, + {"success: wildcard allow all clients with blank client", " ", types.NewParams(types.AllowAllClients), false}, } for _, tc := range testCases { @@ -32,19 +33,18 @@ func TestIsAllowedClient(t *testing.T) { func TestValidateParams(t *testing.T) { testCases := []struct { name string - params Params + params types.Params expError error }{ - {"default params", DefaultParams(), nil}, - {"custom params", NewParams(exported.Tendermint), nil}, - {"blank client", NewParams(" "), errors.New("client type 0 cannot be blank")}, - {"duplicate clients", NewParams(exported.Tendermint, exported.Tendermint), errors.New("duplicate client type: 07-tendermint")}, - {"allow all clients plus valid client", NewParams(AllowAllClients, exported.Tendermint), errors.New("allow list must have only one element because the allow all clients wildcard (*) is present")}, - {"too many allowed clients", NewParams(make([]string, MaxAllowedClientsLength+1)...), errors.New("allowed clients length must not exceed 200 items")}, + {"default params", types.DefaultParams(), nil}, + {"custom params", types.NewParams(exported.Tendermint), nil}, + {"blank client", types.NewParams(" "), errors.New("client type 0 cannot be blank")}, + {"duplicate clients", types.NewParams(exported.Tendermint, exported.Tendermint), errors.New("duplicate client type: 07-tendermint")}, + {"allow all clients plus valid client", types.NewParams(types.AllowAllClients, exported.Tendermint), errors.New("allow list must have only one element because the allow all clients wildcard (*) is present")}, + {"too many allowed clients", types.NewParams(make([]string, types.MaxAllowedClientsLength+1)...), errors.New("allowed clients length must not exceed 200 items")}, } for _, tc := range testCases { - err := tc.params.Validate() if tc.expError == nil { require.NoError(t, err, tc.name) diff --git a/modules/core/02-client/types/query.pb.go b/modules/core/02-client/types/query.pb.go index 90e86812a88..171ca2f2b23 100644 --- a/modules/core/02-client/types/query.pb.go +++ b/modules/core/02-client/types/query.pb.go @@ -1727,6 +1727,7 @@ func _Query_VerifyMembership_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.client.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/core/02-client/types/router_test.go b/modules/core/02-client/types/router_test.go index 9b3efcc7e2b..c11a6f7e0df 100644 --- a/modules/core/02-client/types/router_test.go +++ b/modules/core/02-client/types/router_test.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" - "github.com/stretchr/testify/require" - "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" @@ -13,7 +11,7 @@ import ( ibctm "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" ) -func (suite *TypesTestSuite) TestAddRoute() { +func (s *TypesTestSuite) TestAddRoute() { var ( clientType string router *types.Router @@ -49,11 +47,11 @@ func (suite *TypesTestSuite) TestAddRoute() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - cdc := suite.chainA.App.AppCodec() + s.Run(tc.name, func() { + s.SetupTest() + cdc := s.chainA.App.AppCodec() - storeProvider := types.NewStoreProvider(runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(exported.StoreKey))) + storeProvider := types.NewStoreProvider(runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(exported.StoreKey))) tmLightClientModule := ibctm.NewLightClientModule(cdc, storeProvider) router = types.NewRouter() @@ -61,9 +59,9 @@ func (suite *TypesTestSuite) TestAddRoute() { if tc.expError == nil { router.AddRoute(clientType, &tmLightClientModule) - suite.Require().True(router.HasRoute(clientType)) + s.Require().True(router.HasRoute(clientType)) } else { - require.Panics(suite.T(), func() { + s.Require().Panics(func() { router.AddRoute(clientType, &tmLightClientModule) }, tc.expError.Error()) } @@ -71,7 +69,7 @@ func (suite *TypesTestSuite) TestAddRoute() { } } -func (suite *TypesTestSuite) TestHasGetRoute() { +func (s *TypesTestSuite) TestHasGetRoute() { var clientType string testCases := []struct { @@ -103,11 +101,11 @@ func (suite *TypesTestSuite) TestHasGetRoute() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - cdc := suite.chainA.App.AppCodec() + s.Run(tc.name, func() { + s.SetupTest() + cdc := s.chainA.App.AppCodec() - storeProvider := types.NewStoreProvider(runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(exported.StoreKey))) + storeProvider := types.NewStoreProvider(runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(exported.StoreKey))) tmLightClientModule := ibctm.NewLightClientModule(cdc, storeProvider) router := types.NewRouter() router.AddRoute(exported.Tendermint, &tmLightClientModule) @@ -118,14 +116,14 @@ func (suite *TypesTestSuite) TestHasGetRoute() { route, ok := router.GetRoute(clientType) if tc.expPass { - suite.Require().True(hasRoute) - suite.Require().True(ok) - suite.Require().NotNil(route) - suite.Require().IsType(&ibctm.LightClientModule{}, route) + s.Require().True(hasRoute) + s.Require().True(ok) + s.Require().NotNil(route) + s.Require().IsType(&ibctm.LightClientModule{}, route) } else { - suite.Require().False(hasRoute) - suite.Require().False(ok) - suite.Require().Nil(route) + s.Require().False(hasRoute) + s.Require().False(ok) + s.Require().Nil(route) } }) } diff --git a/modules/core/02-client/types/tx.pb.go b/modules/core/02-client/types/tx.pb.go index bbae47636c1..abe6c22b528 100644 --- a/modules/core/02-client/types/tx.pb.go +++ b/modules/core/02-client/types/tx.pb.go @@ -9,6 +9,7 @@ import ( fmt "fmt" types "github.com/cosmos/cosmos-sdk/codec/types" _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" _ "github.com/cosmos/gogoproto/gogoproto" grpc1 "github.com/cosmos/gogoproto/grpc" proto "github.com/cosmos/gogoproto/proto" @@ -282,91 +283,6 @@ func (m *MsgUpgradeClientResponse) XXX_DiscardUnknown() { var xxx_messageInfo_MsgUpgradeClientResponse proto.InternalMessageInfo -// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for -// light client misbehaviour. -// This message has been deprecated. Use MsgUpdateClient instead. -// -// Deprecated: Do not use. -type MsgSubmitMisbehaviour struct { - // client unique identifier - ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - // misbehaviour used for freezing the light client - Misbehaviour *types.Any `protobuf:"bytes,2,opt,name=misbehaviour,proto3" json:"misbehaviour,omitempty"` - // signer address - Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` -} - -func (m *MsgSubmitMisbehaviour) Reset() { *m = MsgSubmitMisbehaviour{} } -func (m *MsgSubmitMisbehaviour) String() string { return proto.CompactTextString(m) } -func (*MsgSubmitMisbehaviour) ProtoMessage() {} -func (*MsgSubmitMisbehaviour) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{6} -} -func (m *MsgSubmitMisbehaviour) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSubmitMisbehaviour) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSubmitMisbehaviour.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSubmitMisbehaviour) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSubmitMisbehaviour.Merge(m, src) -} -func (m *MsgSubmitMisbehaviour) XXX_Size() int { - return m.Size() -} -func (m *MsgSubmitMisbehaviour) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSubmitMisbehaviour.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSubmitMisbehaviour proto.InternalMessageInfo - -// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response -// type. -type MsgSubmitMisbehaviourResponse struct { -} - -func (m *MsgSubmitMisbehaviourResponse) Reset() { *m = MsgSubmitMisbehaviourResponse{} } -func (m *MsgSubmitMisbehaviourResponse) String() string { return proto.CompactTextString(m) } -func (*MsgSubmitMisbehaviourResponse) ProtoMessage() {} -func (*MsgSubmitMisbehaviourResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{7} -} -func (m *MsgSubmitMisbehaviourResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSubmitMisbehaviourResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSubmitMisbehaviourResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSubmitMisbehaviourResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSubmitMisbehaviourResponse.Merge(m, src) -} -func (m *MsgSubmitMisbehaviourResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgSubmitMisbehaviourResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSubmitMisbehaviourResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSubmitMisbehaviourResponse proto.InternalMessageInfo - // MsgRecoverClient defines the message used to recover a frozen or expired client. type MsgRecoverClient struct { // the client identifier for the client to be updated if the proposal passes @@ -382,7 +298,7 @@ func (m *MsgRecoverClient) Reset() { *m = MsgRecoverClient{} } func (m *MsgRecoverClient) String() string { return proto.CompactTextString(m) } func (*MsgRecoverClient) ProtoMessage() {} func (*MsgRecoverClient) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{8} + return fileDescriptor_cb5dc4651eb49a04, []int{6} } func (m *MsgRecoverClient) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -419,7 +335,7 @@ func (m *MsgRecoverClientResponse) Reset() { *m = MsgRecoverClientRespon func (m *MsgRecoverClientResponse) String() string { return proto.CompactTextString(m) } func (*MsgRecoverClientResponse) ProtoMessage() {} func (*MsgRecoverClientResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{9} + return fileDescriptor_cb5dc4651eb49a04, []int{7} } func (m *MsgRecoverClientResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -468,7 +384,7 @@ func (m *MsgIBCSoftwareUpgrade) Reset() { *m = MsgIBCSoftwareUpgrade{} } func (m *MsgIBCSoftwareUpgrade) String() string { return proto.CompactTextString(m) } func (*MsgIBCSoftwareUpgrade) ProtoMessage() {} func (*MsgIBCSoftwareUpgrade) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{10} + return fileDescriptor_cb5dc4651eb49a04, []int{8} } func (m *MsgIBCSoftwareUpgrade) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -526,7 +442,7 @@ func (m *MsgIBCSoftwareUpgradeResponse) Reset() { *m = MsgIBCSoftwareUpg func (m *MsgIBCSoftwareUpgradeResponse) String() string { return proto.CompactTextString(m) } func (*MsgIBCSoftwareUpgradeResponse) ProtoMessage() {} func (*MsgIBCSoftwareUpgradeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{11} + return fileDescriptor_cb5dc4651eb49a04, []int{9} } func (m *MsgIBCSoftwareUpgradeResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -569,7 +485,7 @@ func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } func (*MsgUpdateParams) ProtoMessage() {} func (*MsgUpdateParams) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{12} + return fileDescriptor_cb5dc4651eb49a04, []int{10} } func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +522,7 @@ func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } func (*MsgUpdateParamsResponse) ProtoMessage() {} func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{13} + return fileDescriptor_cb5dc4651eb49a04, []int{11} } func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -647,7 +563,7 @@ func (m *MsgDeleteClientCreator) Reset() { *m = MsgDeleteClientCreator{} func (m *MsgDeleteClientCreator) String() string { return proto.CompactTextString(m) } func (*MsgDeleteClientCreator) ProtoMessage() {} func (*MsgDeleteClientCreator) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{14} + return fileDescriptor_cb5dc4651eb49a04, []int{12} } func (m *MsgDeleteClientCreator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -684,7 +600,7 @@ func (m *MsgDeleteClientCreatorResponse) Reset() { *m = MsgDeleteClientC func (m *MsgDeleteClientCreatorResponse) String() string { return proto.CompactTextString(m) } func (*MsgDeleteClientCreatorResponse) ProtoMessage() {} func (*MsgDeleteClientCreatorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cb5dc4651eb49a04, []int{15} + return fileDescriptor_cb5dc4651eb49a04, []int{13} } func (m *MsgDeleteClientCreatorResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -720,8 +636,6 @@ func init() { proto.RegisterType((*MsgUpdateClientResponse)(nil), "ibc.core.client.v1.MsgUpdateClientResponse") proto.RegisterType((*MsgUpgradeClient)(nil), "ibc.core.client.v1.MsgUpgradeClient") proto.RegisterType((*MsgUpgradeClientResponse)(nil), "ibc.core.client.v1.MsgUpgradeClientResponse") - proto.RegisterType((*MsgSubmitMisbehaviour)(nil), "ibc.core.client.v1.MsgSubmitMisbehaviour") - proto.RegisterType((*MsgSubmitMisbehaviourResponse)(nil), "ibc.core.client.v1.MsgSubmitMisbehaviourResponse") proto.RegisterType((*MsgRecoverClient)(nil), "ibc.core.client.v1.MsgRecoverClient") proto.RegisterType((*MsgRecoverClientResponse)(nil), "ibc.core.client.v1.MsgRecoverClientResponse") proto.RegisterType((*MsgIBCSoftwareUpgrade)(nil), "ibc.core.client.v1.MsgIBCSoftwareUpgrade") @@ -735,62 +649,60 @@ func init() { func init() { proto.RegisterFile("ibc/core/client/v1/tx.proto", fileDescriptor_cb5dc4651eb49a04) } var fileDescriptor_cb5dc4651eb49a04 = []byte{ - // 868 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x3f, 0x73, 0xe3, 0x44, - 0x14, 0xb7, 0x1c, 0x9f, 0x87, 0x6c, 0x7c, 0x67, 0x6e, 0xcf, 0x77, 0xe7, 0x28, 0xc4, 0xf6, 0x98, - 0x14, 0xc1, 0x10, 0xc9, 0x36, 0x33, 0x10, 0x02, 0x14, 0x89, 0x29, 0x48, 0xe1, 0x99, 0x8c, 0x3d, - 0x34, 0x14, 0x38, 0x92, 0xbc, 0x56, 0xc4, 0x58, 0x5a, 0x8d, 0x76, 0x65, 0x48, 0xc7, 0x50, 0x51, - 0x52, 0xd0, 0xd0, 0xf1, 0x11, 0x32, 0x7c, 0x00, 0x3a, 0x66, 0x52, 0xa6, 0xa4, 0x62, 0x20, 0x29, - 0xf2, 0x35, 0x18, 0x69, 0x57, 0x8a, 0x24, 0x4b, 0x8a, 0x98, 0xeb, 0x24, 0xbd, 0xdf, 0x7b, 0xfb, - 0xfb, 0xbd, 0x7f, 0x5a, 0xb0, 0x63, 0xa8, 0x9a, 0xac, 0x61, 0x07, 0xc9, 0xda, 0xd2, 0x40, 0x16, - 0x95, 0x57, 0x03, 0x99, 0x7e, 0x2f, 0xd9, 0x0e, 0xa6, 0x18, 0x42, 0x43, 0xd5, 0x24, 0xcf, 0x28, - 0x31, 0xa3, 0xb4, 0x1a, 0x88, 0xaf, 0x35, 0x4c, 0x4c, 0x4c, 0x64, 0x93, 0xe8, 0x1e, 0xd6, 0x24, - 0x3a, 0x03, 0x8b, 0x7b, 0xdc, 0xe0, 0xda, 0xba, 0xa3, 0xcc, 0x91, 0xbc, 0x1a, 0xa8, 0x88, 0x2a, - 0x83, 0xe0, 0x9d, 0xa3, 0x1a, 0x3a, 0xd6, 0xb1, 0xff, 0x28, 0x7b, 0x4f, 0xfc, 0xeb, 0xb6, 0x8e, - 0xb1, 0xbe, 0x44, 0xb2, 0xff, 0xa6, 0xba, 0x0b, 0x59, 0xb1, 0x2e, 0xb9, 0xa9, 0x9d, 0x42, 0x90, - 0xb3, 0xf1, 0x01, 0xdd, 0xdf, 0x05, 0x50, 0x1f, 0x13, 0x7d, 0xe4, 0x20, 0x85, 0xa2, 0x91, 0x6f, - 0x81, 0x1f, 0x83, 0x1a, 0xc3, 0xcc, 0x08, 0x55, 0x28, 0x6a, 0x0a, 0x1d, 0x61, 0x7f, 0x6b, 0xd8, - 0x90, 0xd8, 0x31, 0x52, 0x70, 0x8c, 0x74, 0x6c, 0x5d, 0x4e, 0xb6, 0x18, 0x72, 0xea, 0x01, 0xe1, - 0xe7, 0xa0, 0xae, 0x61, 0x8b, 0x20, 0x8b, 0xb8, 0x84, 0xfb, 0x96, 0x73, 0x7c, 0x9f, 0x85, 0x60, - 0xe6, 0xfe, 0x0a, 0x54, 0x89, 0xa1, 0x5b, 0xc8, 0x69, 0x6e, 0x74, 0x84, 0xfd, 0xcd, 0x09, 0x7f, - 0x3b, 0xaa, 0xff, 0xf4, 0x5b, 0xbb, 0xf4, 0xe3, 0xfd, 0x55, 0x8f, 0x7f, 0xe8, 0x7e, 0x06, 0x5e, - 0x27, 0x38, 0x4f, 0x10, 0xb1, 0xbd, 0x60, 0x70, 0x07, 0x6c, 0x72, 0xee, 0xc6, 0xdc, 0x27, 0xbe, - 0x39, 0x79, 0x8b, 0x7d, 0x38, 0x9d, 0x1f, 0x55, 0xbc, 0x40, 0xdd, 0x5f, 0x98, 0xe4, 0xaf, 0xec, - 0xf9, 0x83, 0xe4, 0x3c, 0x37, 0xf8, 0x29, 0x78, 0xc6, 0x8d, 0x26, 0x22, 0x44, 0xd1, 0xf3, 0x55, - 0x3d, 0x65, 0xd8, 0x31, 0x83, 0x16, 0x17, 0xb5, 0xed, 0x8b, 0x8a, 0xb2, 0x0a, 0x44, 0x75, 0xff, - 0x2c, 0x83, 0xb7, 0x7d, 0x9b, 0xdf, 0x0b, 0x45, 0x28, 0x27, 0x4b, 0x58, 0x7e, 0x83, 0x12, 0x6e, - 0xfc, 0x8f, 0x12, 0xf6, 0x41, 0xc3, 0x76, 0x30, 0x5e, 0xcc, 0x78, 0xdf, 0xce, 0x58, 0xec, 0x66, - 0xa5, 0x23, 0xec, 0xd7, 0x26, 0xd0, 0xb7, 0xc5, 0x65, 0x1c, 0x83, 0xdd, 0x84, 0x47, 0xe2, 0xf8, - 0x27, 0xbe, 0xab, 0x18, 0x73, 0xcd, 0xea, 0x9b, 0x6a, 0x7e, 0x8a, 0x45, 0xd0, 0x4c, 0xa6, 0x31, - 0xcc, 0xf1, 0xaf, 0x02, 0x78, 0x39, 0x26, 0xfa, 0xd4, 0x55, 0x4d, 0x83, 0x8e, 0x0d, 0xa2, 0xa2, - 0x0b, 0x65, 0x65, 0x60, 0xd7, 0xc9, 0x4f, 0xf4, 0x21, 0xa8, 0x99, 0x11, 0x70, 0x6e, 0xa2, 0x63, - 0xc8, 0xcc, 0xc6, 0x78, 0x9e, 0x60, 0xdd, 0x14, 0xba, 0x6d, 0xb0, 0x9b, 0x4a, 0x2d, 0x4a, 0xde, - 0x6b, 0x90, 0x09, 0xd2, 0xf0, 0x0a, 0x39, 0x3c, 0xb3, 0x3d, 0xf0, 0x9c, 0xb8, 0xea, 0xb7, 0x48, - 0xa3, 0xb3, 0x24, 0xff, 0x3a, 0x37, 0x8c, 0x02, 0x19, 0x7d, 0xd0, 0x20, 0xae, 0x4a, 0xa8, 0x41, - 0x5d, 0x8a, 0x22, 0xf0, 0xb2, 0x0f, 0x87, 0x0f, 0xb6, 0xd0, 0xa3, 0x70, 0x5f, 0xb3, 0xa4, 0xc7, - 0xa8, 0x85, 0xbc, 0xff, 0x60, 0x49, 0x3f, 0x3d, 0x19, 0x4d, 0xf1, 0x82, 0x7e, 0xa7, 0x38, 0x88, - 0x17, 0x07, 0x7e, 0x04, 0x2a, 0xf6, 0x52, 0xb1, 0xf8, 0xee, 0x79, 0x47, 0x62, 0xeb, 0x51, 0x0a, - 0xd6, 0x21, 0x5f, 0x8f, 0xd2, 0xd9, 0x52, 0xb1, 0x4e, 0x2a, 0xd7, 0x7f, 0xb7, 0x4b, 0x13, 0x1f, - 0x0f, 0xbf, 0x04, 0x2f, 0x39, 0x66, 0x3e, 0x2b, 0x3c, 0x01, 0x2f, 0x02, 0x97, 0x51, 0x64, 0x12, - 0xb2, 0x04, 0x6e, 0x45, 0xc5, 0xb1, 0xca, 0xac, 0xf3, 0x0f, 0x15, 0xd2, 0xc8, 0xae, 0x39, 0x53, - 0x1c, 0xc5, 0x24, 0x91, 0xc0, 0x42, 0x34, 0x30, 0x3c, 0x04, 0x55, 0xdb, 0x47, 0x70, 0xae, 0xa2, - 0xb4, 0xfe, 0x03, 0x91, 0x58, 0x0c, 0x2e, 0x99, 0xe3, 0xf3, 0x77, 0x09, 0xf3, 0x08, 0x09, 0x7d, - 0x03, 0x5e, 0x8d, 0x89, 0xfe, 0x05, 0x5a, 0xa2, 0xa0, 0x98, 0xfe, 0x1e, 0xc5, 0x8f, 0xf4, 0xf9, - 0x03, 0xe9, 0x72, 0x7e, 0xb9, 0x3b, 0xa0, 0x95, 0x1e, 0x3f, 0x60, 0x30, 0xfc, 0xb7, 0x0a, 0x36, - 0xc6, 0x44, 0x87, 0xe7, 0xa0, 0x16, 0xfb, 0xed, 0xbc, 0x9b, 0xa6, 0x37, 0xb1, 0xe7, 0xc5, 0xf7, - 0x0b, 0x80, 0xc2, 0x9f, 0xc1, 0x39, 0xa8, 0xc5, 0xb6, 0x7c, 0xd6, 0x09, 0x51, 0x50, 0xe6, 0x09, - 0x69, 0x9b, 0x19, 0x6a, 0xe0, 0x69, 0x7c, 0x9d, 0xed, 0x65, 0x7a, 0x47, 0x50, 0xe2, 0x07, 0x45, - 0x50, 0xe1, 0x21, 0x0e, 0x80, 0x29, 0x6b, 0xe9, 0xbd, 0x8c, 0x18, 0xeb, 0x50, 0x71, 0x50, 0x18, - 0x1a, 0x15, 0x16, 0xdf, 0x26, 0x59, 0xc2, 0x62, 0xa8, 0x4c, 0x61, 0xa9, 0xe3, 0xef, 0x09, 0x4b, - 0x19, 0xfd, 0x2c, 0x61, 0xeb, 0xd0, 0x4c, 0x61, 0xd9, 0x03, 0x09, 0x17, 0x00, 0x46, 0x2b, 0xc9, - 0x67, 0x32, 0xbf, 0x33, 0x18, 0xe8, 0x91, 0xce, 0x88, 0xcf, 0x19, 0x74, 0xc1, 0x8b, 0xb4, 0x21, - 0xeb, 0x65, 0xc4, 0x48, 0xc1, 0x8a, 0xc3, 0xe2, 0xd8, 0xe0, 0x58, 0xf1, 0xc9, 0x0f, 0xf7, 0x57, - 0x3d, 0xe1, 0x64, 0x7a, 0x7d, 0xdb, 0x12, 0x6e, 0x6e, 0x5b, 0xc2, 0x3f, 0xb7, 0x2d, 0xe1, 0xe7, - 0xbb, 0x56, 0xe9, 0xe6, 0xae, 0x55, 0xfa, 0xeb, 0xae, 0x55, 0xfa, 0xfa, 0x13, 0xdd, 0xa0, 0x17, - 0xae, 0x2a, 0x69, 0xd8, 0x94, 0xf9, 0x9d, 0xd3, 0x50, 0xb5, 0x03, 0x1d, 0xcb, 0xab, 0x41, 0x5f, - 0x36, 0xf1, 0xdc, 0x5d, 0x22, 0xc2, 0xae, 0x8c, 0xfd, 0xe1, 0x01, 0xbf, 0x35, 0xd2, 0x4b, 0x1b, - 0x11, 0xb5, 0xea, 0x2f, 0xcd, 0x0f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x85, 0x70, 0xb2, 0x01, - 0xf6, 0x0a, 0x00, 0x00, + // 841 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x3f, 0x6f, 0xdb, 0x46, + 0x14, 0x17, 0x65, 0x59, 0xa8, 0xcf, 0xb2, 0x55, 0x9f, 0x65, 0x5b, 0xa6, 0x6b, 0x49, 0x50, 0x3d, + 0xb8, 0x6a, 0x4d, 0x4a, 0x2a, 0xd0, 0x3f, 0x6e, 0x3b, 0xd8, 0xea, 0x50, 0x0f, 0x02, 0x0c, 0x19, + 0x5d, 0x3a, 0x54, 0x26, 0xa9, 0x13, 0xcb, 0x56, 0xe4, 0x11, 0x3c, 0x52, 0xad, 0xb7, 0xa2, 0x53, + 0x91, 0x29, 0x43, 0x3e, 0x40, 0x3e, 0x82, 0x91, 0x2d, 0x4b, 0xb6, 0x00, 0x1e, 0x3d, 0x66, 0x0a, + 0x02, 0x7b, 0xf0, 0x94, 0xef, 0x10, 0xe8, 0xee, 0x48, 0x93, 0x34, 0xc9, 0x28, 0xc8, 0x22, 0x88, + 0x7c, 0xbf, 0xf7, 0xde, 0xef, 0xf7, 0xfe, 0x81, 0x60, 0xc7, 0x50, 0x35, 0x59, 0xc3, 0x0e, 0x92, + 0xb5, 0x89, 0x81, 0x2c, 0x57, 0x9e, 0x76, 0x64, 0xf7, 0x1f, 0xc9, 0x76, 0xb0, 0x8b, 0x21, 0x34, + 0x54, 0x4d, 0x9a, 0x19, 0x25, 0x66, 0x94, 0xa6, 0x1d, 0x71, 0x4d, 0x31, 0x0d, 0x0b, 0xcb, 0xf4, + 0x97, 0xc1, 0xc4, 0x2d, 0x0d, 0x13, 0x13, 0x13, 0xd9, 0x24, 0xfa, 0xcc, 0xdd, 0x24, 0x3a, 0x37, + 0xec, 0x71, 0x83, 0x67, 0xeb, 0x8e, 0x32, 0x42, 0xf2, 0xb4, 0xa3, 0x22, 0x57, 0xe9, 0xf8, 0xcf, + 0x1c, 0x55, 0xd1, 0xb1, 0x8e, 0xe9, 0x5f, 0x79, 0xf6, 0x8f, 0xbf, 0xdd, 0xd6, 0x31, 0xd6, 0x27, + 0x48, 0xa6, 0x4f, 0xaa, 0x37, 0x96, 0x15, 0xeb, 0x82, 0x9b, 0xea, 0x09, 0x9c, 0x39, 0x41, 0x0a, + 0x68, 0x3e, 0x13, 0x40, 0xb9, 0x4f, 0xf4, 0x9e, 0x83, 0x14, 0x17, 0xf5, 0xa8, 0x05, 0x7e, 0x0b, + 0x4a, 0x0c, 0x33, 0x24, 0xae, 0xe2, 0xa2, 0xaa, 0xd0, 0x10, 0xf6, 0x97, 0xbb, 0x15, 0x89, 0xa5, + 0x91, 0xfc, 0x34, 0xd2, 0x91, 0x75, 0x31, 0x58, 0x66, 0xc8, 0xb3, 0x19, 0x10, 0xfe, 0x04, 0xca, + 0x1a, 0xb6, 0x08, 0xb2, 0x88, 0x47, 0xb8, 0x6f, 0x3e, 0xc3, 0x77, 0x35, 0x00, 0x33, 0xf7, 0x4d, + 0x50, 0x24, 0x86, 0x6e, 0x21, 0xa7, 0xba, 0xd0, 0x10, 0xf6, 0x97, 0x06, 0xfc, 0xe9, 0xb0, 0xfc, + 0xff, 0xd3, 0x7a, 0xee, 0xbf, 0xbb, 0xcb, 0x16, 0x7f, 0xd1, 0xfc, 0x11, 0x6c, 0xc5, 0x38, 0x0f, + 0x10, 0xb1, 0x67, 0xc1, 0xe0, 0x0e, 0x58, 0xe2, 0xdc, 0x8d, 0x11, 0x25, 0xbe, 0x34, 0xf8, 0x84, + 0xbd, 0x38, 0x19, 0x1d, 0x16, 0x66, 0x81, 0x9a, 0x4f, 0x98, 0xe4, 0x5f, 0xed, 0xd1, 0xbd, 0xe4, + 0x2c, 0x37, 0xf8, 0x03, 0x58, 0xe5, 0x46, 0x13, 0x11, 0xa2, 0xe8, 0xd9, 0xaa, 0x56, 0x18, 0xb6, + 0xcf, 0xa0, 0xf3, 0x8b, 0xda, 0xa6, 0xa2, 0xc2, 0xac, 0x7c, 0x51, 0xcd, 0x97, 0x79, 0xf0, 0x29, + 0xb5, 0xd1, 0x59, 0x98, 0x87, 0x72, 0xbc, 0x85, 0xf9, 0x8f, 0x68, 0xe1, 0xc2, 0x07, 0xb4, 0xb0, + 0x0d, 0x2a, 0xb6, 0x83, 0xf1, 0x78, 0xc8, 0xe7, 0x76, 0xc8, 0x62, 0x57, 0x0b, 0x0d, 0x61, 0xbf, + 0x34, 0x80, 0xd4, 0x16, 0x95, 0x71, 0x04, 0x76, 0x63, 0x1e, 0xb1, 0xf4, 0x8b, 0xd4, 0x55, 0x8c, + 0xb8, 0xa6, 0xcd, 0x4d, 0x31, 0xbb, 0xc4, 0x22, 0xa8, 0xc6, 0xcb, 0x18, 0xd4, 0xf8, 0xb9, 0x40, + 0x6b, 0x3c, 0x40, 0x1a, 0x9e, 0x22, 0x87, 0x93, 0x6b, 0x81, 0x35, 0xe2, 0xa9, 0x7f, 0x22, 0xcd, + 0x1d, 0xc6, 0x6b, 0x5d, 0xe6, 0x86, 0x9e, 0x5f, 0xf2, 0x36, 0xa8, 0x10, 0x4f, 0x25, 0xae, 0xe1, + 0x7a, 0x2e, 0x0a, 0xc1, 0xf3, 0x14, 0x0e, 0xef, 0x6d, 0x81, 0x47, 0xda, 0x68, 0xc8, 0x31, 0xde, + 0x8f, 0xee, 0x2e, 0x5b, 0x3b, 0xec, 0x3e, 0x1c, 0x90, 0xd1, 0x5f, 0x72, 0x9c, 0x26, 0xd7, 0x15, + 0x79, 0x17, 0xe8, 0x7a, 0x21, 0x80, 0x8d, 0x3e, 0xd1, 0x4f, 0x8e, 0x7b, 0x67, 0x78, 0xec, 0xfe, + 0xad, 0x38, 0x88, 0xeb, 0x87, 0xdf, 0x80, 0x82, 0x3d, 0x51, 0x2c, 0xbe, 0xde, 0x9f, 0x49, 0x2c, + 0x83, 0xe4, 0x5f, 0x1c, 0x7e, 0x81, 0xa4, 0xd3, 0x89, 0x62, 0x1d, 0x17, 0xae, 0x5e, 0xd7, 0x73, + 0x03, 0x8a, 0x87, 0xbf, 0x80, 0x0d, 0x8e, 0x19, 0x0d, 0xe7, 0x1e, 0xb2, 0x75, 0xdf, 0xa5, 0x17, + 0x1a, 0xb6, 0xb4, 0x02, 0x2c, 0x87, 0x9b, 0x56, 0x07, 0xbb, 0x89, 0xfc, 0x03, 0x85, 0x6e, 0x68, + 0x9d, 0x4f, 0x15, 0x47, 0x31, 0x49, 0x28, 0xb0, 0x10, 0x0e, 0x0c, 0xbf, 0x03, 0x45, 0x9b, 0x22, + 0x38, 0x57, 0x51, 0x7a, 0x78, 0xb6, 0x25, 0x16, 0x83, 0x4b, 0xe6, 0xf8, 0xec, 0x75, 0x65, 0x1e, + 0x01, 0xa1, 0xdf, 0xc1, 0x66, 0x9f, 0xe8, 0x3f, 0xa3, 0x09, 0xf2, 0x9b, 0x4d, 0x4f, 0x15, 0x76, + 0xb2, 0x77, 0xf6, 0x9e, 0x74, 0x3e, 0x7b, 0x8c, 0x1b, 0xa0, 0x96, 0x1c, 0xdf, 0x67, 0xd0, 0x7d, + 0xbb, 0x08, 0x16, 0xfa, 0x44, 0x87, 0xe7, 0xa0, 0x14, 0xb9, 0xec, 0x9f, 0x27, 0xe9, 0x8d, 0x9d, + 0x52, 0xf1, 0xcb, 0x39, 0x40, 0xc1, 0xbd, 0x3d, 0x07, 0xa5, 0xc8, 0x21, 0x4d, 0xcb, 0x10, 0x06, + 0xa5, 0x66, 0x48, 0x3a, 0x7e, 0x50, 0x03, 0x2b, 0xd1, 0x8b, 0xb1, 0x97, 0xea, 0x1d, 0x42, 0x89, + 0x5f, 0xcd, 0x83, 0x0a, 0x27, 0x89, 0x6e, 0x7e, 0x5a, 0x92, 0x08, 0x2a, 0x35, 0x49, 0xe2, 0x2a, + 0x42, 0x07, 0xc0, 0x84, 0x35, 0xfc, 0x22, 0x25, 0xc6, 0x43, 0xa8, 0xd8, 0x99, 0x1b, 0x1a, 0xe4, + 0x1c, 0x03, 0x18, 0xae, 0x2a, 0xdf, 0x8f, 0xec, 0x2e, 0x31, 0xd0, 0x7b, 0xba, 0x14, 0x9d, 0x79, + 0xe8, 0x81, 0xf5, 0xa4, 0x81, 0x6f, 0xa5, 0xc4, 0x48, 0xc0, 0x8a, 0xdd, 0xf9, 0xb1, 0x7e, 0x5a, + 0x71, 0xf1, 0xdf, 0xbb, 0xcb, 0x96, 0x70, 0x7c, 0x76, 0x75, 0x53, 0x13, 0xae, 0x6f, 0x6a, 0xc2, + 0x9b, 0x9b, 0x9a, 0xf0, 0xf8, 0xb6, 0x96, 0xbb, 0xbe, 0xad, 0xe5, 0x5e, 0xdd, 0xd6, 0x72, 0xbf, + 0x7d, 0xaf, 0x1b, 0xee, 0x1f, 0x9e, 0x2a, 0x69, 0xd8, 0x94, 0xf9, 0x27, 0x96, 0xa1, 0x6a, 0x07, + 0x3a, 0x96, 0xa7, 0x9d, 0xb6, 0x6c, 0xe2, 0x91, 0x37, 0x41, 0x84, 0x7d, 0x21, 0xb5, 0xbb, 0x07, + 0xfc, 0x23, 0xc9, 0xbd, 0xb0, 0x11, 0x51, 0x8b, 0xf4, 0x80, 0x7d, 0xfd, 0x2e, 0x00, 0x00, 0xff, + 0xff, 0x04, 0xd4, 0x4a, 0x73, 0xf8, 0x09, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -811,8 +723,6 @@ type MsgClient interface { UpdateClient(ctx context.Context, in *MsgUpdateClient, opts ...grpc.CallOption) (*MsgUpdateClientResponse, error) // UpgradeClient defines a rpc handler method for MsgUpgradeClient. UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opts ...grpc.CallOption) (*MsgUpgradeClientResponse, error) - // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. - SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error) // RecoverClient defines a rpc handler method for MsgRecoverClient. RecoverClient(ctx context.Context, in *MsgRecoverClient, opts ...grpc.CallOption) (*MsgRecoverClientResponse, error) // IBCSoftwareUpgrade defines a rpc handler method for MsgIBCSoftwareUpgrade. @@ -858,15 +768,6 @@ func (c *msgClient) UpgradeClient(ctx context.Context, in *MsgUpgradeClient, opt return out, nil } -func (c *msgClient) SubmitMisbehaviour(ctx context.Context, in *MsgSubmitMisbehaviour, opts ...grpc.CallOption) (*MsgSubmitMisbehaviourResponse, error) { - out := new(MsgSubmitMisbehaviourResponse) - err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Msg/SubmitMisbehaviour", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *msgClient) RecoverClient(ctx context.Context, in *MsgRecoverClient, opts ...grpc.CallOption) (*MsgRecoverClientResponse, error) { out := new(MsgRecoverClientResponse) err := c.cc.Invoke(ctx, "/ibc.core.client.v1.Msg/RecoverClient", in, out, opts...) @@ -911,8 +812,6 @@ type MsgServer interface { UpdateClient(context.Context, *MsgUpdateClient) (*MsgUpdateClientResponse, error) // UpgradeClient defines a rpc handler method for MsgUpgradeClient. UpgradeClient(context.Context, *MsgUpgradeClient) (*MsgUpgradeClientResponse, error) - // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. - SubmitMisbehaviour(context.Context, *MsgSubmitMisbehaviour) (*MsgSubmitMisbehaviourResponse, error) // RecoverClient defines a rpc handler method for MsgRecoverClient. RecoverClient(context.Context, *MsgRecoverClient) (*MsgRecoverClientResponse, error) // IBCSoftwareUpgrade defines a rpc handler method for MsgIBCSoftwareUpgrade. @@ -936,9 +835,6 @@ func (*UnimplementedMsgServer) UpdateClient(ctx context.Context, req *MsgUpdateC func (*UnimplementedMsgServer) UpgradeClient(ctx context.Context, req *MsgUpgradeClient) (*MsgUpgradeClientResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpgradeClient not implemented") } -func (*UnimplementedMsgServer) SubmitMisbehaviour(ctx context.Context, req *MsgSubmitMisbehaviour) (*MsgSubmitMisbehaviourResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SubmitMisbehaviour not implemented") -} func (*UnimplementedMsgServer) RecoverClient(ctx context.Context, req *MsgRecoverClient) (*MsgRecoverClientResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RecoverClient not implemented") } @@ -1010,24 +906,6 @@ func _Msg_UpgradeClient_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } -func _Msg_SubmitMisbehaviour_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgSubmitMisbehaviour) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).SubmitMisbehaviour(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ibc.core.client.v1.Msg/SubmitMisbehaviour", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).SubmitMisbehaviour(ctx, req.(*MsgSubmitMisbehaviour)) - } - return interceptor(ctx, in, info, handler) -} - func _Msg_RecoverClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(MsgRecoverClient) if err := dec(in); err != nil { @@ -1100,6 +978,7 @@ func _Msg_DeleteClientCreator_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.client.v1.Msg", HandlerType: (*MsgServer)(nil), @@ -1116,10 +995,6 @@ var _Msg_serviceDesc = grpc.ServiceDesc{ MethodName: "UpgradeClient", Handler: _Msg_UpgradeClient_Handler, }, - { - MethodName: "SubmitMisbehaviour", - Handler: _Msg_SubmitMisbehaviour_Handler, - }, { MethodName: "RecoverClient", Handler: _Msg_RecoverClient_Handler, @@ -1395,78 +1270,6 @@ func (m *MsgUpgradeClientResponse) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *MsgSubmitMisbehaviour) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSubmitMisbehaviour) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSubmitMisbehaviour) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signer) > 0 { - i -= len(m.Signer) - copy(dAtA[i:], m.Signer) - i = encodeVarintTx(dAtA, i, uint64(len(m.Signer))) - i-- - dAtA[i] = 0x1a - } - if m.Misbehaviour != nil { - { - size, err := m.Misbehaviour.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTx(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ClientId) > 0 { - i -= len(m.ClientId) - copy(dAtA[i:], m.ClientId) - i = encodeVarintTx(dAtA, i, uint64(len(m.ClientId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgSubmitMisbehaviourResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSubmitMisbehaviourResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSubmitMisbehaviourResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - func (m *MsgRecoverClient) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1849,36 +1652,6 @@ func (m *MsgUpgradeClientResponse) Size() (n int) { return n } -func (m *MsgSubmitMisbehaviour) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ClientId) - if l > 0 { - n += 1 + l + sovTx(uint64(l)) - } - if m.Misbehaviour != nil { - l = m.Misbehaviour.Size() - n += 1 + l + sovTx(uint64(l)) - } - l = len(m.Signer) - if l > 0 { - n += 1 + l + sovTx(uint64(l)) - } - return n -} - -func (m *MsgSubmitMisbehaviourResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - func (m *MsgRecoverClient) Size() (n int) { if m == nil { return 0 @@ -2733,206 +2506,6 @@ func (m *MsgUpgradeClientResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MsgSubmitMisbehaviour) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSubmitMisbehaviour: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSubmitMisbehaviour: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTx - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTx - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Misbehaviour", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTx - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTx - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Misbehaviour == nil { - m.Misbehaviour = &types.Any{} - } - if err := m.Misbehaviour.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTx - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTx - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTx(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTx - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSubmitMisbehaviourResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTx - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSubmitMisbehaviourResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSubmitMisbehaviourResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTx(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTx - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *MsgRecoverClient) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/modules/core/02-client/v2/genesis_test.go b/modules/core/02-client/v2/genesis_test.go index ee88e8c1255..dbf6cbd28a2 100644 --- a/modules/core/02-client/v2/genesis_test.go +++ b/modules/core/02-client/v2/genesis_test.go @@ -7,22 +7,22 @@ import ( ) // TestInitExportGenesis tests the import and export flow for the channel v2 keeper. -func (suite *ModuleTestSuite) TestInitExportGenesis() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *ModuleTestSuite) TestInitExportGenesis() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - path2 := ibctesting.NewPath(suite.chainA, suite.chainC) + path2 := ibctesting.NewPath(s.chainA, s.chainC) path2.SetupV2() - path3 := ibctesting.NewPath(suite.chainB, suite.chainC) + path3 := ibctesting.NewPath(s.chainB, s.chainC) path3.SetupV2() - app := suite.chainA.App + app := s.chainA.App emptyGenesis := types.DefaultGenesisState() // create a valid genesis state that uses the counterparty info set during setup - existingGS := clientv2.ExportGenesis(suite.chainA.GetContext(), app.GetIBCKeeper().ClientV2Keeper) + existingGS := clientv2.ExportGenesis(s.chainA.GetContext(), app.GetIBCKeeper().ClientV2Keeper) tests := []struct { name string @@ -42,13 +42,13 @@ func (suite *ModuleTestSuite) TestInitExportGenesis() { } for _, tt := range tests { - suite.Run(tt.name, func() { + s.Run(tt.name, func() { clientV2Keeper := app.GetIBCKeeper().ClientV2Keeper - clientv2.InitGenesis(suite.chainA.GetContext(), clientV2Keeper, tt.genState) + clientv2.InitGenesis(s.chainA.GetContext(), clientV2Keeper, tt.genState) - exported := clientv2.ExportGenesis(suite.chainA.GetContext(), clientV2Keeper) - suite.Require().Equal(tt.expectedState, exported) + exported := clientv2.ExportGenesis(s.chainA.GetContext(), clientV2Keeper) + s.Require().Equal(tt.expectedState, exported) }) } } diff --git a/modules/core/02-client/v2/keeper/grpc_query_test.go b/modules/core/02-client/v2/keeper/grpc_query_test.go index 42ff13c7c97..085153d6e6e 100644 --- a/modules/core/02-client/v2/keeper/grpc_query_test.go +++ b/modules/core/02-client/v2/keeper/grpc_query_test.go @@ -11,7 +11,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestQueryCounterPartyInfo() { +func (s *KeeperTestSuite) TestQueryCounterPartyInfo() { var ( req *types.QueryCounterpartyInfoRequest expInfo = types.CounterpartyInfo{} @@ -39,7 +39,7 @@ func (suite *KeeperTestSuite) TestQueryCounterPartyInfo() { { "counterparty not found", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupClients() // counter party not set up @@ -53,7 +53,7 @@ func (suite *KeeperTestSuite) TestQueryCounterPartyInfo() { { "success", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupClients() path1.SetupCounterparties() @@ -67,26 +67,26 @@ func (suite *KeeperTestSuite) TestQueryCounterPartyInfo() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientV2Keeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientV2Keeper) res, err := queryServer.CounterpartyInfo(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expInfo, *res.CounterpartyInfo) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expInfo, *res.CounterpartyInfo) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConfig() { +func (s *KeeperTestSuite) TestQueryConfig() { var ( req *types.QueryConfigRequest expConfig = types.Config{} @@ -114,7 +114,7 @@ func (suite *KeeperTestSuite) TestQueryConfig() { { "success with default config", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupClients() expConfig = types.DefaultConfig() @@ -127,11 +127,11 @@ func (suite *KeeperTestSuite) TestQueryConfig() { { "success with custom config", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupClients() expConfig = types.NewConfig(ibctesting.TestAccAddress) - suite.chainA.App.GetIBCKeeper().ClientV2Keeper.SetConfig(suite.chainA.GetContext(), path1.EndpointA.ClientID, expConfig) + s.chainA.App.GetIBCKeeper().ClientV2Keeper.SetConfig(s.chainA.GetContext(), path1.EndpointA.ClientID, expConfig) req = &types.QueryConfigRequest{ ClientId: path1.EndpointA.ClientID, } @@ -141,20 +141,20 @@ func (suite *KeeperTestSuite) TestQueryConfig() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ClientV2Keeper) + ctx := s.chainA.GetContext() + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ClientV2Keeper) res, err := queryServer.Config(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expConfig, *res.Config) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expConfig, *res.Config) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/02-client/v2/keeper/keeper_test.go b/modules/core/02-client/v2/keeper/keeper_test.go index cc26b2a79e2..6be2b8db28c 100644 --- a/modules/core/02-client/v2/keeper/keeper_test.go +++ b/modules/core/02-client/v2/keeper/keeper_test.go @@ -36,51 +36,51 @@ func TestKeeperTestSuite(t *testing.T) { testifysuite.Run(t, new(KeeperTestSuite)) } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) isCheckTx := false - app := simapp.Setup(suite.T(), isCheckTx) + app := simapp.Setup(s.T(), isCheckTx) - suite.cdc = app.AppCodec() - suite.ctx = app.NewContext(isCheckTx) - suite.keeper = app.IBCKeeper.ClientV2Keeper + s.cdc = app.AppCodec() + s.ctx = app.NewContext(isCheckTx) + s.keeper = app.IBCKeeper.ClientV2Keeper } -func (suite *KeeperTestSuite) TestSetClientCounterparty() { +func (s *KeeperTestSuite) TestSetClientCounterparty() { counterparty := types.NewCounterpartyInfo([][]byte{[]byte("ibc"), []byte("channel-7")}, testClientID2) - suite.keeper.SetClientCounterparty(suite.ctx, testClientID, counterparty) + s.keeper.SetClientCounterparty(s.ctx, testClientID, counterparty) - retrievedCounterparty, found := suite.keeper.GetClientCounterparty(suite.ctx, testClientID) - suite.Require().True(found, "GetCounterparty failed") - suite.Require().Equal(counterparty, retrievedCounterparty, "Counterparties are not equal") + retrievedCounterparty, found := s.keeper.GetClientCounterparty(s.ctx, testClientID) + s.Require().True(found, "GetCounterparty failed") + s.Require().Equal(counterparty, retrievedCounterparty, "Counterparties are not equal") } -func (suite *KeeperTestSuite) TestSetConfig() { - config := suite.keeper.GetConfig(suite.ctx, testClientID) - suite.Require().Equal(config, types.DefaultConfig(), "did not return default config on initialization") +func (s *KeeperTestSuite) TestSetConfig() { + config := s.keeper.GetConfig(s.ctx, testClientID) + s.Require().Equal(config, types.DefaultConfig(), "did not return default config on initialization") newConfig := types.NewConfig(ibctesting.TestAccAddress) - suite.keeper.SetConfig(suite.ctx, testClientID, newConfig) + s.keeper.SetConfig(s.ctx, testClientID, newConfig) - config = suite.keeper.GetConfig(suite.ctx, testClientID) - suite.Require().Equal(newConfig, config, "config not set correctly") + config = s.keeper.GetConfig(s.ctx, testClientID) + s.Require().Equal(newConfig, config, "config not set correctly") // config should be empty for a different clientID - config = suite.keeper.GetConfig(suite.ctx, testClientID2) - suite.Require().Equal(types.DefaultConfig(), config, "config should be empty for different clientID") + config = s.keeper.GetConfig(s.ctx, testClientID2) + s.Require().Equal(types.DefaultConfig(), config, "config should be empty for different clientID") // set config for a different clientID - newConfig2 := types.NewConfig(ibctesting.TestAccAddress, suite.chainA.SenderAccount.GetAddress().String()) - suite.keeper.SetConfig(suite.ctx, testClientID2, newConfig2) + newConfig2 := types.NewConfig(ibctesting.TestAccAddress, s.chainA.SenderAccount.GetAddress().String()) + s.keeper.SetConfig(s.ctx, testClientID2, newConfig2) - config = suite.keeper.GetConfig(suite.ctx, testClientID2) - suite.Require().Equal(newConfig2, config, "config not set correctly for different clientID") + config = s.keeper.GetConfig(s.ctx, testClientID2) + s.Require().Equal(newConfig2, config, "config not set correctly for different clientID") // config for original client unaffected - config = suite.keeper.GetConfig(suite.ctx, testClientID) - suite.Require().Equal(newConfig, config, "config not set correctly for original clientID") + config = s.keeper.GetConfig(s.ctx, testClientID) + s.Require().Equal(newConfig, config, "config not set correctly for original clientID") } diff --git a/modules/core/02-client/v2/module_test.go b/modules/core/02-client/v2/module_test.go index d119b2b2942..7d88637837e 100644 --- a/modules/core/02-client/v2/module_test.go +++ b/modules/core/02-client/v2/module_test.go @@ -8,10 +8,6 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func TestModuleTestSuite(t *testing.T) { - testifysuite.Run(t, new(ModuleTestSuite)) -} - type ModuleTestSuite struct { testifysuite.Suite @@ -23,9 +19,13 @@ type ModuleTestSuite struct { chainC *ibctesting.TestChain } -func (suite *ModuleTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func TestModuleTestSuite(t *testing.T) { + testifysuite.Run(t, new(ModuleTestSuite)) +} + +func (s *ModuleTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } diff --git a/modules/core/02-client/v2/types/config.go b/modules/core/02-client/v2/types/config.go index b472f36abdc..8e1e9dc5b84 100644 --- a/modules/core/02-client/v2/types/config.go +++ b/modules/core/02-client/v2/types/config.go @@ -1,7 +1,7 @@ package types import ( - fmt "fmt" + "fmt" sdk "github.com/cosmos/cosmos-sdk/types" ) diff --git a/modules/core/02-client/v2/types/msgs_test.go b/modules/core/02-client/v2/types/msgs_test.go index 7f77db443e9..d81ed9ddb9d 100644 --- a/modules/core/02-client/v2/types/msgs_test.go +++ b/modules/core/02-client/v2/types/msgs_test.go @@ -1,7 +1,7 @@ package types_test import ( - fmt "fmt" + "fmt" "testing" "github.com/stretchr/testify/require" diff --git a/modules/core/02-client/v2/types/query.pb.go b/modules/core/02-client/v2/types/query.pb.go index 23b0777d963..4c5dac396ca 100644 --- a/modules/core/02-client/v2/types/query.pb.go +++ b/modules/core/02-client/v2/types/query.pb.go @@ -353,6 +353,7 @@ func _Query_Config_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.client.v2.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/core/02-client/v2/types/tx.pb.go b/modules/core/02-client/v2/types/tx.pb.go index 0711b01ef5d..44027022a2c 100644 --- a/modules/core/02-client/v2/types/tx.pb.go +++ b/modules/core/02-client/v2/types/tx.pb.go @@ -338,6 +338,7 @@ func _Msg_UpdateClientConfig_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.client.v2.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/core/03-connection/keeper/events_test.go b/modules/core/03-connection/keeper/events_test.go index 741f4501753..e1260946d76 100644 --- a/modules/core/03-connection/keeper/events_test.go +++ b/modules/core/03-connection/keeper/events_test.go @@ -8,9 +8,9 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestMsgConnectionOpenInitEvents() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestMsgConnectionOpenInitEvents() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() msg := types.NewMsgConnectionOpenInit( @@ -20,9 +20,9 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenInitEvents() { path.EndpointA.Chain.SenderAccount.GetAddress().String(), ) - res, err := suite.chainA.SendMsgs(msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + res, err := s.chainA.SendMsgs(msg) + s.Require().NoError(err) + s.Require().NotNil(res) events := res.Events expectedEvents := sdk.Events{ @@ -36,17 +36,17 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenInitEvents() { var indexSet map[string]struct{} expectedEvents = sdk.MarkEventsToIndex(expectedEvents, indexSet) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, events) + ibctesting.AssertEvents(&s.Suite, expectedEvents, events) } -func (suite *KeeperTestSuite) TestMsgConnectionOpenTryEvents() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestMsgConnectionOpenTryEvents() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - suite.Require().NoError(path.EndpointA.ConnOpenInit()) + s.Require().NoError(path.EndpointA.ConnOpenInit()) - suite.Require().NoError(path.EndpointB.UpdateClient()) + s.Require().NoError(path.EndpointB.UpdateClient()) initProof, proofHeight := path.EndpointB.QueryConnectionHandshakeProof() @@ -58,8 +58,8 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenTryEvents() { ) res, err := path.EndpointB.Chain.SendMsgs(msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) events := res.Events expectedEvents := sdk.Events{ @@ -74,18 +74,18 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenTryEvents() { var indexSet map[string]struct{} expectedEvents = sdk.MarkEventsToIndex(expectedEvents, indexSet) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, events) + ibctesting.AssertEvents(&s.Suite, expectedEvents, events) } -func (suite *KeeperTestSuite) TestMsgConnectionOpenAckEvents() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestMsgConnectionOpenAckEvents() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - suite.Require().NoError(path.EndpointA.ConnOpenInit()) - suite.Require().NoError(path.EndpointB.ConnOpenTry()) + s.Require().NoError(path.EndpointA.ConnOpenInit()) + s.Require().NoError(path.EndpointB.ConnOpenTry()) - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) tryProof, proofHeight := path.EndpointA.QueryConnectionHandshakeProof() @@ -96,8 +96,8 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenAckEvents() { ) res, err := path.EndpointA.Chain.SendMsgs(msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) events := res.Events expectedEvents := sdk.Events{ @@ -112,19 +112,19 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenAckEvents() { var indexSet map[string]struct{} expectedEvents = sdk.MarkEventsToIndex(expectedEvents, indexSet) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, events) + ibctesting.AssertEvents(&s.Suite, expectedEvents, events) } -func (suite *KeeperTestSuite) TestMsgConnectionOpenConfirmEvents() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestMsgConnectionOpenConfirmEvents() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - suite.Require().NoError(path.EndpointA.ConnOpenInit()) - suite.Require().NoError(path.EndpointB.ConnOpenTry()) - suite.Require().NoError(path.EndpointA.ConnOpenAck()) + s.Require().NoError(path.EndpointA.ConnOpenInit()) + s.Require().NoError(path.EndpointB.ConnOpenTry()) + s.Require().NoError(path.EndpointA.ConnOpenAck()) - suite.Require().NoError(path.EndpointB.UpdateClient()) + s.Require().NoError(path.EndpointB.UpdateClient()) connectionKey := host.ConnectionKey(path.EndpointB.Counterparty.ConnectionID) proof, height := path.EndpointB.Counterparty.Chain.QueryProof(connectionKey) @@ -136,8 +136,8 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenConfirmEvents() { ) res, err := path.EndpointB.Chain.SendMsgs(msg) - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) events := res.Events expectedEvents := sdk.Events{ @@ -152,5 +152,5 @@ func (suite *KeeperTestSuite) TestMsgConnectionOpenConfirmEvents() { var indexSet map[string]struct{} expectedEvents = sdk.MarkEventsToIndex(expectedEvents, indexSet) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, events) + ibctesting.AssertEvents(&s.Suite, expectedEvents, events) } diff --git a/modules/core/03-connection/keeper/grpc_query_test.go b/modules/core/03-connection/keeper/grpc_query_test.go index 5221df91e44..ef0112fdbe3 100644 --- a/modules/core/03-connection/keeper/grpc_query_test.go +++ b/modules/core/03-connection/keeper/grpc_query_test.go @@ -18,7 +18,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestQueryConnection() { +func (s *KeeperTestSuite) TestQueryConnection() { var ( req *types.QueryConnectionRequest expConnection types.ConnectionEnd @@ -58,14 +58,14 @@ func (suite *KeeperTestSuite) TestQueryConnection() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) - counterparty := types.NewCounterparty(path.EndpointB.ClientID, "", suite.chainB.GetPrefix()) + counterparty := types.NewCounterparty(path.EndpointB.ClientID, "", s.chainB.GetPrefix()) expConnection = types.NewConnectionEnd(types.INIT, path.EndpointA.ClientID, counterparty, types.GetCompatibleVersions(), 500) - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, expConnection) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(s.chainA.GetContext(), path.EndpointA.ConnectionID, expConnection) req = &types.QueryConnectionRequest{ ConnectionId: path.EndpointA.ConnectionID, @@ -76,31 +76,31 @@ func (suite *KeeperTestSuite) TestQueryConnection() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ConnectionKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ConnectionKeeper) res, err := queryServer.Connection(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(&expConnection, res.Connection) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(&expConnection, res.Connection) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConnections() { - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.CreateSentinelLocalhostConnection(suite.chainA.GetContext()) - localhostConn, found := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), exported.LocalhostConnectionID) - suite.Require().True(found) +func (s *KeeperTestSuite) TestQueryConnections() { + s.chainA.App.GetIBCKeeper().ConnectionKeeper.CreateSentinelLocalhostConnection(s.chainA.GetContext()) + localhostConn, found := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(s.chainA.GetContext(), exported.LocalhostConnectionID) + s.Require().True(found) identifiedConn := types.NewIdentifiedConnection(exported.LocalhostConnectionID, localhostConn) @@ -131,20 +131,20 @@ func (suite *KeeperTestSuite) TestQueryConnections() { { "success", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) - path3 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) + path3 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupConnections() path2.SetupConnections() path3.SetupClients() err := path3.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) - counterparty1 := types.NewCounterparty(path1.EndpointB.ClientID, path1.EndpointB.ConnectionID, suite.chainB.GetPrefix()) - counterparty2 := types.NewCounterparty(path2.EndpointB.ClientID, path2.EndpointB.ConnectionID, suite.chainB.GetPrefix()) + counterparty1 := types.NewCounterparty(path1.EndpointB.ClientID, path1.EndpointB.ConnectionID, s.chainB.GetPrefix()) + counterparty2 := types.NewCounterparty(path2.EndpointB.ClientID, path2.EndpointB.ConnectionID, s.chainB.GetPrefix()) // counterparty connection id is blank after open init - counterparty3 := types.NewCounterparty(path3.EndpointB.ClientID, "", suite.chainB.GetPrefix()) + counterparty3 := types.NewCounterparty(path3.EndpointB.ClientID, "", s.chainB.GetPrefix()) conn1 := types.NewConnectionEnd(types.OPEN, path1.EndpointA.ClientID, counterparty1, types.GetCompatibleVersions(), 0) conn2 := types.NewConnectionEnd(types.OPEN, path2.EndpointA.ClientID, counterparty2, types.GetCompatibleVersions(), 0) @@ -168,28 +168,28 @@ func (suite *KeeperTestSuite) TestQueryConnections() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ConnectionKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ConnectionKeeper) res, err := queryServer.Connections(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expConnections, res.Connections) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expConnections, res.Connections) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryClientConnections() { +func (s *KeeperTestSuite) TestQueryClientConnections() { var ( req *types.QueryClientConnectionsRequest expPaths []string @@ -229,18 +229,18 @@ func (suite *KeeperTestSuite) TestQueryClientConnections() { { "success", func() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupConnections() // create another connection using same underlying clients - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) path2.EndpointA.ClientID = path1.EndpointA.ClientID path2.EndpointB.ClientID = path1.EndpointB.ClientID path2.CreateConnections() expPaths = []string{path1.EndpointA.ConnectionID, path2.EndpointA.ConnectionID} - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), path1.EndpointA.ClientID, expPaths) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetClientConnectionPaths(s.chainA.GetContext(), path1.EndpointA.ClientID, expPaths) req = &types.QueryClientConnectionsRequest{ ClientId: path1.EndpointA.ClientID, @@ -251,28 +251,28 @@ func (suite *KeeperTestSuite) TestQueryClientConnections() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ConnectionKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ConnectionKeeper) res, err := queryServer.ClientConnections(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expPaths, res.ConnectionPaths) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expPaths, res.ConnectionPaths) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConnectionClientState() { +func (s *KeeperTestSuite) TestQueryConnectionClientState() { var ( req *types.QueryConnectionClientStateRequest expIdentifiedClientState clienttypes.IdentifiedClientState @@ -314,11 +314,11 @@ func (suite *KeeperTestSuite) TestQueryConnectionClientState() { { "client state not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // set connection to empty so clientID is empty - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, types.ConnectionEnd{}) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(s.chainA.GetContext(), path.EndpointA.ConnectionID, types.ConnectionEnd{}) req = &types.QueryConnectionClientStateRequest{ ConnectionId: path.EndpointA.ConnectionID, @@ -331,10 +331,10 @@ func (suite *KeeperTestSuite) TestQueryConnectionClientState() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() - expClientState := suite.chainA.GetClientState(path.EndpointA.ClientID) + expClientState := s.chainA.GetClientState(path.EndpointA.ClientID) expIdentifiedClientState = clienttypes.NewIdentifiedClientState(path.EndpointA.ClientID, expClientState) req = &types.QueryConnectionClientStateRequest{ @@ -346,32 +346,32 @@ func (suite *KeeperTestSuite) TestQueryConnectionClientState() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ConnectionKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ConnectionKeeper) res, err := queryServer.ConnectionClientState(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState) // ensure UnpackInterfaces is defined cachedValue := res.IdentifiedClientState.ClientState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() { +func (s *KeeperTestSuite) TestQueryConnectionConsensusState() { var ( req *types.QueryConnectionConsensusStateRequest expConsensusState exported.ConsensusState @@ -418,13 +418,13 @@ func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() { { "consensus state not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() req = &types.QueryConnectionConsensusStateRequest{ ConnectionId: path.EndpointA.ConnectionID, RevisionNumber: 0, - RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height + RevisionHeight: uint64(s.chainA.GetContext().BlockHeight()), // use current height } }, status.Error( codes.NotFound, @@ -434,13 +434,13 @@ func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() clientHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) - expConsensusState, _ = suite.chainA.GetConsensusState(path.EndpointA.ClientID, clientHeight) - suite.Require().NotNil(expConsensusState) + s.Require().True(ok) + expConsensusState, _ = s.chainA.GetConsensusState(path.EndpointA.ClientID, clientHeight) + s.Require().NotNil(expConsensusState) expClientID = path.EndpointA.ClientID req = &types.QueryConnectionConsensusStateRequest{ @@ -454,39 +454,39 @@ func (suite *KeeperTestSuite) TestQueryConnectionConsensusState() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ConnectionKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ConnectionKeeper) res, err := queryServer.ConnectionConsensusState(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) consensusState, err := clienttypes.UnpackConsensusState(res.ConsensusState) - suite.Require().NoError(err) - suite.Require().Equal(expConsensusState, consensusState) - suite.Require().Equal(expClientID, res.ClientId) + s.Require().NoError(err) + s.Require().Equal(expConsensusState, consensusState) + s.Require().Equal(expClientID, res.ClientId) // ensure UnpackInterfaces is defined cachedValue := res.ConsensusState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConnectionParams() { +func (s *KeeperTestSuite) TestQueryConnectionParams() { expParams := types.DefaultParams() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ConnectionKeeper) - res, err := queryServer.ConnectionParams(suite.chainA.GetContext(), &types.QueryConnectionParamsRequest{}) - suite.Require().NoError(err) - suite.Require().Equal(&expParams, res.Params) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ConnectionKeeper) + res, err := queryServer.ConnectionParams(s.chainA.GetContext(), &types.QueryConnectionParamsRequest{}) + s.Require().NoError(err) + s.Require().Equal(&expParams, res.Params) } diff --git a/modules/core/03-connection/keeper/handshake_test.go b/modules/core/03-connection/keeper/handshake_test.go index d461784ccf0..ffb34469c12 100644 --- a/modules/core/03-connection/keeper/handshake_test.go +++ b/modules/core/03-connection/keeper/handshake_test.go @@ -14,7 +14,7 @@ import ( // TestConnOpenInit - chainA initializes (INIT state) a connection with // chainB which is yet UNINITIALIZED -func (suite *KeeperTestSuite) TestConnOpenInit() { +func (s *KeeperTestSuite) TestConnOpenInit() { var ( path *ibctesting.Path version *types.Version @@ -53,20 +53,20 @@ func (suite *KeeperTestSuite) TestConnOpenInit() { malleate: func() { expErrorMsgSubstring = "status is Unauthorized" // remove client from allowed list - params := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(suite.chainA.GetContext()) + params := s.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(s.chainA.GetContext()) params.AllowedClients = []string{} - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), params) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(s.chainA.GetContext(), params) }, }, } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset emptyConnBID = false // must be explicitly changed version = nil // must be explicitly changed expErrorMsgSubstring = "" - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tc.malleate() @@ -74,18 +74,18 @@ func (suite *KeeperTestSuite) TestConnOpenInit() { if emptyConnBID { path.EndpointB.ConnectionID = "" } - counterparty := types.NewCounterparty(path.EndpointB.ClientID, path.EndpointB.ConnectionID, suite.chainB.GetPrefix()) + counterparty := types.NewCounterparty(path.EndpointB.ClientID, path.EndpointB.ConnectionID, s.chainB.GetPrefix()) - connectionID, err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.ConnOpenInit(suite.chainA.GetContext(), path.EndpointA.ClientID, counterparty, version, delayPeriod) + connectionID, err := s.chainA.App.GetIBCKeeper().ConnectionKeeper.ConnOpenInit(s.chainA.GetContext(), path.EndpointA.ClientID, counterparty, version, delayPeriod) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(types.FormatConnectionIdentifier(0), connectionID) + s.Require().NoError(err) + s.Require().Equal(types.FormatConnectionIdentifier(0), connectionID) } else { - suite.Require().Error(err) - suite.Contains(err.Error(), expErrorMsgSubstring) - suite.Require().Equal("", connectionID) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Contains(err.Error(), expErrorMsgSubstring) + s.Require().Equal("", connectionID) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -93,7 +93,7 @@ func (suite *KeeperTestSuite) TestConnOpenInit() { // TestConnOpenTry - chainB calls ConnOpenTry to verify the state of // connection on chainA is INIT -func (suite *KeeperTestSuite) TestConnOpenTry() { +func (s *KeeperTestSuite) TestConnOpenTry() { var ( path *ibctesting.Path delayPeriod uint64 @@ -107,11 +107,11 @@ func (suite *KeeperTestSuite) TestConnOpenTry() { }{ {"success", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"success with delay period", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) delayPeriod = uint64(time.Hour.Nanoseconds()) @@ -119,19 +119,19 @@ func (suite *KeeperTestSuite) TestConnOpenTry() { path.EndpointA.UpdateConnection(func(connection *types.ConnectionEnd) { connection.DelayPeriod = delayPeriod }) // commit in order for proof to return correct value - suite.coordinator.CommitBlock(suite.chainA) + s.coordinator.CommitBlock(s.chainA) err = path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"counterparty versions is empty", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) versions = nil }, errorsmod.Wrap(types.ErrVersionNegotiationFailed, "failed to find a matching counterparty version ([]) from the supported version list ([identifier:\"1\" features:\"ORDER_ORDERED\" features:\"ORDER_UNORDERED\" ])")}, {"counterparty versions don't have a match", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) version := types.NewVersion("0.0", nil) versions = []*types.Version{version} @@ -142,36 +142,36 @@ func (suite *KeeperTestSuite) TestConnOpenTry() { } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset versions = types.GetCompatibleVersions() // may be changed in malleate delayPeriod = 0 // may be changed in malleate - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tc.malleate() - counterparty := types.NewCounterparty(path.EndpointA.ClientID, path.EndpointA.ConnectionID, suite.chainA.GetPrefix()) + counterparty := types.NewCounterparty(path.EndpointA.ClientID, path.EndpointA.ConnectionID, s.chainA.GetPrefix()) // ensure client is up to date to receive proof err := path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) connectionKey := host.ConnectionKey(path.EndpointA.ConnectionID) - initProof, proofHeight := suite.chainA.QueryProof(connectionKey) + initProof, proofHeight := s.chainA.QueryProof(connectionKey) - connectionID, err := suite.chainB.App.GetIBCKeeper().ConnectionKeeper.ConnOpenTry( - suite.chainB.GetContext(), counterparty, delayPeriod, path.EndpointB.ClientID, + connectionID, err := s.chainB.App.GetIBCKeeper().ConnectionKeeper.ConnOpenTry( + s.chainB.GetContext(), counterparty, delayPeriod, path.EndpointB.ClientID, versions, initProof, proofHeight, ) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(types.FormatConnectionIdentifier(0), connectionID) + s.Require().NoError(err) + s.Require().Equal(types.FormatConnectionIdentifier(0), connectionID) } else { - suite.Require().Error(err) - suite.Require().Equal("", connectionID) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().Equal("", connectionID) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -179,7 +179,7 @@ func (suite *KeeperTestSuite) TestConnOpenTry() { // TestConnOpenAck - Chain A (ID #1) calls TestConnOpenAck to acknowledge (ACK state) // the initialization (TRYINIT) of the connection on Chain B (ID #2). -func (suite *KeeperTestSuite) TestConnOpenAck() { +func (s *KeeperTestSuite) TestConnOpenAck() { var ( path *ibctesting.Path version *types.Version @@ -192,113 +192,113 @@ func (suite *KeeperTestSuite) TestConnOpenAck() { }{ {"success", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"connection not found", func() { // connections are never created }, errorsmod.Wrap(types.ErrConnectionNotFound, "")}, {"invalid counterparty connection ID", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) // modify connB to set counterparty connection identifier to wrong identifier path.EndpointA.UpdateConnection(func(c *types.ConnectionEnd) { c.Counterparty.ConnectionId = ibctesting.InvalidID }) path.EndpointB.ConnectionID = ibctesting.InvalidID err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed connection state verification for client (07-tendermint-0): commitment proof must be existence proof. got: int at index &{1374412614704}")}, {"connection state is not INIT", func() { // connection state is already OPEN on chainA err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ConnOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrInvalidConnectionState, "connection state is not INIT (got STATE_OPEN)")}, {"connection is in INIT but the proposed version is invalid", func() { // chainA is in INIT, chainB is in TRYOPEN err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) version = types.NewVersion("2.0", nil) }, errorsmod.Wrap(types.ErrInvalidConnectionState, "the counterparty selected version identifier:\"2.0\" is not supported by versions selected on INIT")}, {"incompatible IBC versions", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) // set version to a non-compatible version version = types.NewVersion("2.0", nil) }, errorsmod.Wrap(types.ErrInvalidConnectionState, "the counterparty selected version identifier:\"2.0\" is not supported by versions selected on INIT")}, {"empty version", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) version = &types.Version{} }, errorsmod.Wrap(types.ErrInvalidConnectionState, "the counterparty selected version is not supported by versions selected on INIT")}, {"feature set verification failed - unsupported feature", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) version = types.NewVersion(types.DefaultIBCVersionIdentifier, []string{"ORDER_ORDERED", "ORDER_UNORDERED", "ORDER_DAG"}) }, errorsmod.Wrap(types.ErrInvalidConnectionState, "the counterparty selected version identifier:\"1\" features:\"ORDER_ORDERED\" features:\"ORDER_UNORDERED\" features:\"ORDER_DAG\" is not supported by versions selected on INIT")}, {"connection state verification failed", func() { // chainB connection is not in INIT err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed connection state verification for client (07-tendermint-0): commitment proof must be existence proof. got: int at index &{1374414228888}")}, } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset + s.Run(tc.msg, func() { + s.SetupTest() // reset version = types.GetCompatibleVersions()[0] // must be explicitly changed in malleate - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tc.malleate() // ensure client is up to date to receive proof err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) connectionKey := host.ConnectionKey(path.EndpointB.ConnectionID) - tryProof, proofHeight := suite.chainB.QueryProof(connectionKey) + tryProof, proofHeight := s.chainB.QueryProof(connectionKey) - err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.ConnOpenAck( - suite.chainA.GetContext(), path.EndpointA.ConnectionID, version, + err = s.chainA.App.GetIBCKeeper().ConnectionKeeper.ConnOpenAck( + s.chainA.GetContext(), path.EndpointA.ConnectionID, version, path.EndpointB.ConnectionID, tryProof, proofHeight, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -306,7 +306,7 @@ func (suite *KeeperTestSuite) TestConnOpenAck() { // TestConnOpenConfirm - chainB calls ConnOpenConfirm to confirm that // chainA state is now OPEN. -func (suite *KeeperTestSuite) TestConnOpenConfirm() { +func (s *KeeperTestSuite) TestConnOpenConfirm() { var path *ibctesting.Path testCases := []struct { msg string @@ -315,13 +315,13 @@ func (suite *KeeperTestSuite) TestConnOpenConfirm() { }{ {"success", func() { err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ConnOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"connection not found", func() { // connections are never created @@ -333,37 +333,37 @@ func (suite *KeeperTestSuite) TestConnOpenConfirm() { {"connection state verification failed", func() { // chainA is in INIT err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ConnOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed connection state verification for client (07-tendermint-0): failed to verify membership proof at index 0: provided value doesn't match proof")}, } for _, tc := range testCases { - suite.Run(tc.msg, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.msg, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tc.malleate() // ensure client is up to date to receive proof err := path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) connectionKey := host.ConnectionKey(path.EndpointA.ConnectionID) - ackProof, proofHeight := suite.chainA.QueryProof(connectionKey) + ackProof, proofHeight := s.chainA.QueryProof(connectionKey) - err = suite.chainB.App.GetIBCKeeper().ConnectionKeeper.ConnOpenConfirm( - suite.chainB.GetContext(), path.EndpointB.ConnectionID, ackProof, proofHeight, + err = s.chainB.App.GetIBCKeeper().ConnectionKeeper.ConnOpenConfirm( + s.chainB.GetContext(), path.EndpointB.ConnectionID, ackProof, proofHeight, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/03-connection/keeper/keeper.go b/modules/core/03-connection/keeper/keeper.go index 06931371b1b..e389ab691c1 100644 --- a/modules/core/03-connection/keeper/keeper.go +++ b/modules/core/03-connection/keeper/keeper.go @@ -24,24 +24,22 @@ type Keeper struct { // implements gRPC QueryServer interface types.QueryServer - storeService corestore.KVStoreService - legacySubspace types.ParamSubspace - cdc codec.BinaryCodec - clientKeeper types.ClientKeeper + storeService corestore.KVStoreService + cdc codec.BinaryCodec + clientKeeper types.ClientKeeper } // NewKeeper creates a new IBC connection Keeper instance -func NewKeeper(cdc codec.BinaryCodec, storeService corestore.KVStoreService, legacySubspace types.ParamSubspace, ck types.ClientKeeper) *Keeper { +func NewKeeper(cdc codec.BinaryCodec, storeService corestore.KVStoreService, ck types.ClientKeeper) *Keeper { return &Keeper{ - storeService: storeService, - cdc: cdc, - legacySubspace: legacySubspace, - clientKeeper: ck, + storeService: storeService, + cdc: cdc, + clientKeeper: ck, } } // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+exported.ModuleName+"/"+types.SubModuleName) } @@ -192,7 +190,8 @@ func (k *Keeper) IterateConnections(ctx sdk.Context, cb func(types.IdentifiedCon } // GetAllConnections returns all stored ConnectionEnd objects. -func (k *Keeper) GetAllConnections(ctx sdk.Context) (connections []types.IdentifiedConnection) { +func (k *Keeper) GetAllConnections(ctx sdk.Context) []types.IdentifiedConnection { + var connections []types.IdentifiedConnection k.IterateConnections(ctx, func(connection types.IdentifiedConnection) bool { connections = append(connections, connection) return false diff --git a/modules/core/03-connection/keeper/keeper_test.go b/modules/core/03-connection/keeper/keeper_test.go index 6d18de9c18d..f7108372f99 100644 --- a/modules/core/03-connection/keeper/keeper_test.go +++ b/modules/core/03-connection/keeper/keeper_test.go @@ -22,57 +22,57 @@ type KeeperTestSuite struct { chainB *ibctesting.TestChain } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) -} - func TestKeeperTestSuite(t *testing.T) { testifysuite.Run(t, new(KeeperTestSuite)) } -func (suite *KeeperTestSuite) TestSetAndGetConnection() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + +func (s *KeeperTestSuite) TestSetAndGetConnection() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() firstConnection := "connection-0" // check first connection does not exist - _, existed := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), firstConnection) - suite.Require().False(existed) + _, existed := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(s.chainA.GetContext(), firstConnection) + s.Require().False(existed) path.CreateConnections() - _, existed = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), firstConnection) - suite.Require().True(existed) + _, existed = s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(s.chainA.GetContext(), firstConnection) + s.Require().True(existed) } -func (suite *KeeperTestSuite) TestSetAndGetClientConnectionPaths() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestSetAndGetClientConnectionPaths() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - _, existed := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.False(existed) + _, existed := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetClientConnectionPaths(s.chainA.GetContext(), path.EndpointA.ClientID) + s.False(existed) connections := []string{"connectionA", "connectionB"} - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetClientConnectionPaths(suite.chainA.GetContext(), path.EndpointA.ClientID, connections) - paths, existed := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetClientConnectionPaths(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.True(existed) - suite.EqualValues(connections, paths) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetClientConnectionPaths(s.chainA.GetContext(), path.EndpointA.ClientID, connections) + paths, existed := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetClientConnectionPaths(s.chainA.GetContext(), path.EndpointA.ClientID) + s.True(existed) + s.Equal(connections, paths) } // create 2 connections: A0 - B0, A1 - B1 -func (suite *KeeperTestSuite) TestGetAllConnections() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllConnections() { + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupConnections() - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) path2.EndpointA.ClientID = path1.EndpointA.ClientID path2.EndpointB.ClientID = path1.EndpointB.ClientID path2.CreateConnections() - counterpartyB0 := types.NewCounterparty(path1.EndpointB.ClientID, path1.EndpointB.ConnectionID, suite.chainB.GetPrefix()) // connection B0 - counterpartyB1 := types.NewCounterparty(path2.EndpointB.ClientID, path2.EndpointB.ConnectionID, suite.chainB.GetPrefix()) // connection B1 + counterpartyB0 := types.NewCounterparty(path1.EndpointB.ClientID, path1.EndpointB.ConnectionID, s.chainB.GetPrefix()) // connection B0 + counterpartyB1 := types.NewCounterparty(path2.EndpointB.ClientID, path2.EndpointB.ConnectionID, s.chainB.GetPrefix()) // connection B1 conn1 := types.NewConnectionEnd(types.OPEN, path1.EndpointA.ClientID, counterpartyB0, types.GetCompatibleVersions(), 0) // A0 - B0 conn2 := types.NewConnectionEnd(types.OPEN, path2.EndpointA.ClientID, counterpartyB1, types.GetCompatibleVersions(), 0) // A1 - B1 @@ -80,26 +80,26 @@ func (suite *KeeperTestSuite) TestGetAllConnections() { iconn1 := types.NewIdentifiedConnection(path1.EndpointA.ConnectionID, conn1) iconn2 := types.NewIdentifiedConnection(path2.EndpointA.ConnectionID, conn2) - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.CreateSentinelLocalhostConnection(suite.chainA.GetContext()) - localhostConn, found := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(suite.chainA.GetContext(), exported.LocalhostConnectionID) - suite.Require().True(found) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.CreateSentinelLocalhostConnection(s.chainA.GetContext()) + localhostConn, found := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetConnection(s.chainA.GetContext(), exported.LocalhostConnectionID) + s.Require().True(found) expConnections := []types.IdentifiedConnection{iconn1, iconn2, types.NewIdentifiedConnection(exported.LocalhostConnectionID, localhostConn)} - connections := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetAllConnections(suite.chainA.GetContext()) - suite.Require().Len(connections, len(expConnections)) - suite.Require().Equal(expConnections, connections) + connections := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetAllConnections(s.chainA.GetContext()) + s.Require().Len(connections, len(expConnections)) + s.Require().Equal(expConnections, connections) } // the test creates 2 clients path.EndpointA.ClientID0 and path.EndpointA.ClientID1. path.EndpointA.ClientID0 has a single // connection and path.EndpointA.ClientID1 has 2 connections. -func (suite *KeeperTestSuite) TestGetAllClientConnectionPaths() { - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllClientConnectionPaths() { + path1 := ibctesting.NewPath(s.chainA, s.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetupConnections() path2.SetupConnections() - path3 := ibctesting.NewPath(suite.chainA, suite.chainB) + path3 := ibctesting.NewPath(s.chainA, s.chainB) path3.EndpointA.ClientID = path2.EndpointA.ClientID path3.EndpointB.ClientID = path2.EndpointB.ClientID path3.CreateConnections() @@ -109,36 +109,36 @@ func (suite *KeeperTestSuite) TestGetAllClientConnectionPaths() { types.NewConnectionPaths(path2.EndpointA.ClientID, []string{path2.EndpointA.ConnectionID, path3.EndpointA.ConnectionID}), } - connPaths := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetAllClientConnectionPaths(suite.chainA.GetContext()) - suite.Require().Len(connPaths, 2) - suite.Require().Equal(expPaths, connPaths) + connPaths := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetAllClientConnectionPaths(s.chainA.GetContext()) + s.Require().Len(connPaths, 2) + s.Require().Equal(expPaths, connPaths) } -func (suite *KeeperTestSuite) TestLocalhostConnectionEndCreation() { - ctx := suite.chainA.GetContext() - connectionKeeper := suite.chainA.App.GetIBCKeeper().ConnectionKeeper +func (s *KeeperTestSuite) TestLocalhostConnectionEndCreation() { + ctx := s.chainA.GetContext() + connectionKeeper := s.chainA.App.GetIBCKeeper().ConnectionKeeper connectionKeeper.CreateSentinelLocalhostConnection(ctx) connectionEnd, found := connectionKeeper.GetConnection(ctx, exported.LocalhostConnectionID) - suite.Require().True(found) - suite.Require().Equal(types.OPEN, connectionEnd.State) - suite.Require().Equal(exported.LocalhostClientID, connectionEnd.ClientId) - suite.Require().Equal(types.GetCompatibleVersions(), connectionEnd.Versions) + s.Require().True(found) + s.Require().Equal(types.OPEN, connectionEnd.State) + s.Require().Equal(exported.LocalhostClientID, connectionEnd.ClientId) + s.Require().Equal(types.GetCompatibleVersions(), connectionEnd.Versions) expectedCounterParty := types.NewCounterparty(exported.LocalhostClientID, exported.LocalhostConnectionID, commitmenttypes.NewMerklePrefix(connectionKeeper.GetCommitmentPrefix().Bytes())) - suite.Require().Equal(expectedCounterParty, connectionEnd.Counterparty) + s.Require().Equal(expectedCounterParty, connectionEnd.Counterparty) } // TestDefaultSetParams tests the default params set are what is expected -func (suite *KeeperTestSuite) TestDefaultSetParams() { +func (s *KeeperTestSuite) TestDefaultSetParams() { expParams := types.DefaultParams() - params := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(expParams, params) + params := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(expParams, params) } // TestSetAndGetParams tests that param setting and retrieval works properly -func (suite *KeeperTestSuite) TestSetAndGetParams() { +func (s *KeeperTestSuite) TestSetAndGetParams() { testCases := []struct { name string input types.Params @@ -150,32 +150,32 @@ func (suite *KeeperTestSuite) TestSetAndGetParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(tc.name, func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() err := tc.input.Validate() - suite.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.SetParams(ctx, tc.input) + s.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.SetParams(ctx, tc.input) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) expected := tc.input - p := suite.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetParams(ctx) - suite.Require().Equal(expected, p) + p := s.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetParams(ctx) + s.Require().Equal(expected, p) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } // TestUnsetParams tests that trying to get params that are not set panics. -func (suite *KeeperTestSuite) TestUnsetParams() { - suite.SetupTest() - ctx := suite.chainA.GetContext() - store := ctx.KVStore(suite.chainA.GetSimApp().GetKey(exported.StoreKey)) +func (s *KeeperTestSuite) TestUnsetParams() { + s.SetupTest() + ctx := s.chainA.GetContext() + store := ctx.KVStore(s.chainA.GetSimApp().GetKey(exported.StoreKey)) store.Delete([]byte(types.ParamsKey)) - suite.Require().Panics(func() { - suite.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetParams(ctx) + s.Require().Panics(func() { + s.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetParams(ctx) }) } diff --git a/modules/core/03-connection/keeper/migrations.go b/modules/core/03-connection/keeper/migrations.go deleted file mode 100644 index 0220f49e85c..00000000000 --- a/modules/core/03-connection/keeper/migrations.go +++ /dev/null @@ -1,40 +0,0 @@ -package keeper - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - connectionv7 "github.com/cosmos/ibc-go/v10/modules/core/03-connection/migrations/v7" - "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" -) - -// Migrator is a struct for handling in-place store migrations. -type Migrator struct { - keeper *Keeper -} - -// NewMigrator returns a new Migrator. -func NewMigrator(keeper *Keeper) Migrator { - return Migrator{keeper: keeper} -} - -// Migrate3to4 migrates from version 3 to 4. -// This migration writes the sentinel localhost connection end to state. -func (m Migrator) Migrate3to4(ctx sdk.Context) error { - connectionv7.MigrateLocalhostConnection(ctx, m.keeper) - return nil -} - -// MigrateParams migrates from consensus version 4 to 5. -// This migration takes the parameters that are currently stored and managed by x/params -// and stores them directly in the ibc module's state. -func (m Migrator) MigrateParams(ctx sdk.Context) error { - var params types.Params - m.keeper.legacySubspace.GetParamSet(ctx, ¶ms) - if err := params.Validate(); err != nil { - return err - } - - m.keeper.SetParams(ctx, params) - m.keeper.Logger(ctx).Info("successfully migrated connection to self-manage params") - return nil -} diff --git a/modules/core/03-connection/keeper/migrations_test.go b/modules/core/03-connection/keeper/migrations_test.go deleted file mode 100644 index 3192453e4f4..00000000000 --- a/modules/core/03-connection/keeper/migrations_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package keeper_test - -import ( - "github.com/cosmos/ibc-go/v10/modules/core/03-connection/keeper" - "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" - ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" -) - -// TestMigrateParams tests that the params for the connection are properly migrated -func (suite *KeeperTestSuite) TestMigrateParams() { - testCases := []struct { - name string - malleate func() - expectedParams types.Params - }{ - { - "success: default params", - func() { - params := types.DefaultParams() - subspace := suite.chainA.GetSimApp().GetSubspace(ibcexported.ModuleName) - subspace.SetParamSet(suite.chainA.GetContext(), ¶ms) - }, - types.DefaultParams(), - }, - } - - for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - - tc.malleate() - - ctx := suite.chainA.GetContext() - migrator := keeper.NewMigrator(suite.chainA.GetSimApp().IBCKeeper.ConnectionKeeper) - err := migrator.MigrateParams(ctx) - suite.Require().NoError(err) - - params := suite.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetParams(ctx) - suite.Require().Equal(tc.expectedParams, params) - }) - } -} diff --git a/modules/core/03-connection/keeper/verify_test.go b/modules/core/03-connection/keeper/verify_test.go index dd902140407..65a7d8e3413 100644 --- a/modules/core/03-connection/keeper/verify_test.go +++ b/modules/core/03-connection/keeper/verify_test.go @@ -22,7 +22,7 @@ var defaultTimeoutHeight = clienttypes.NewHeight(1, 100000) // TestVerifyConnectionState verifies the connection state of the connection // on chainB. The connections on chainA and chainB are fully opened. -func (suite *KeeperTestSuite) TestVerifyConnectionState() { +func (s *KeeperTestSuite) TestVerifyConnectionState() { var ( path *ibctesting.Path heightDiff uint64 @@ -41,21 +41,21 @@ func (suite *KeeperTestSuite) TestVerifyConnectionState() { }, errorsmod.Wrap(ibcerrors.ErrInvalidHeight, "failed connection state verification for client (07-tendermint-0): client state height < proof height ({1 9} < {1 14}), please ensure the client has been updated")}, {"client status is not active - client is expired", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() connectionKey := host.ConnectionKey(path.EndpointB.ConnectionID) - proof, proofHeight := suite.chainB.QueryProof(connectionKey) + proof, proofHeight := s.chainB.QueryProof(connectionKey) tc.malleate() @@ -63,16 +63,16 @@ func (suite *KeeperTestSuite) TestVerifyConnectionState() { expectedConnection := path.EndpointB.GetConnection() - err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyConnectionState( - suite.chainA.GetContext(), connection, + err := s.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyConnectionState( + s.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, path.EndpointB.ConnectionID, expectedConnection, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -80,7 +80,7 @@ func (suite *KeeperTestSuite) TestVerifyConnectionState() { // TestVerifyChannelState verifies the channel state of the channel on // chainB. The channels on chainA and chainB are fully opened. -func (suite *KeeperTestSuite) TestVerifyChannelState() { +func (s *KeeperTestSuite) TestVerifyChannelState() { var ( path *ibctesting.Path heightDiff uint64 @@ -99,37 +99,37 @@ func (suite *KeeperTestSuite) TestVerifyChannelState() { }, errorsmod.Wrap(ibcerrors.ErrInvalidHeight, "failed channel state verification for client (07-tendermint-0): client state height < proof height ({1 15} < {1 20}), please ensure the client has been updated")}, {"client status is not active - client is expired", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, } for _, tc := range cases { - suite.Run(fmt.Sprintf("Case %s", tc.name), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.name), func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) - proof, proofHeight := suite.chainB.QueryProof(channelKey) + proof, proofHeight := s.chainB.QueryProof(channelKey) tc.malleate() connection := path.EndpointA.GetConnection() channel := path.EndpointB.GetChannel() - err := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyChannelState( - suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, + err := s.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyChannelState( + s.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -138,7 +138,7 @@ func (suite *KeeperTestSuite) TestVerifyChannelState() { // TestVerifyPacketCommitment has chainB verify the packet commitment // on channelA. The channels on chainA and chainB are fully opened and a // packet is sent from chainA to chainB, but has not been received. -func (suite *KeeperTestSuite) TestVerifyPacketCommitment() { +func (s *KeeperTestSuite) TestVerifyPacketCommitment() { var ( path *ibctesting.Path packet channeltypes.Packet @@ -172,21 +172,21 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() { }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed packet commitment verification for client (07-tendermint-0): failed to verify membership proof at index 0: provided value doesn't match proof")}, {"client status is not active - client is expired", func() { clientState, ok := path.EndpointB.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(0, 1) path.EndpointB.SetClientState(clientState) }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0) // reset variables @@ -198,24 +198,24 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() { connection := path.EndpointB.GetConnection() connection.DelayPeriod = delayTimePeriod commitmentKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainA.QueryProof(commitmentKey) + proof, proofHeight := s.chainA.QueryProof(commitmentKey) // set time per block param if timePerBlock != 0 { - suite.chainB.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainB.GetContext(), types.NewParams(timePerBlock)) + s.chainB.App.GetIBCKeeper().ConnectionKeeper.SetParams(s.chainB.GetContext(), types.NewParams(timePerBlock)) } commitment := channeltypes.CommitPacket(packet) - err = suite.chainB.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketCommitment( - suite.chainB.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, + err = s.chainB.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketCommitment( + s.chainB.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence(), commitment, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -224,7 +224,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketCommitment() { // TestVerifyPacketAcknowledgement has chainA verify the acknowledgement on // channelB. The channels on chainA and chainB are fully opened and a packet // is sent from chainA to chainB and received. -func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() { +func (s *KeeperTestSuite) TestVerifyPacketAcknowledgement() { var ( path *ibctesting.Path ack exported.Acknowledgement @@ -259,34 +259,34 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() { }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed packet acknowledgement verification for client (07-tendermint-0): failed to verify membership proof at index 0: provided value doesn't match proof")}, {"client status is not active - client is expired", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset ack = ibcmock.MockAcknowledgement // must be explicitly changed - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // send and receive packet sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // increment receiving chain's (chainB) time by 2 hour to always pass receive - suite.coordinator.IncrementTimeBy(time.Hour * 2) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.IncrementTimeBy(time.Hour * 2) + s.coordinator.CommitBlock(s.chainB) packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) packetAckKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainB.QueryProof(packetAckKey) + proof, proofHeight := s.chainB.QueryProof(packetAckKey) // reset variables heightDiff = 0 @@ -299,19 +299,19 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() { // set time per block param if timePerBlock != 0 { - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(timePerBlock)) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(s.chainA.GetContext(), types.NewParams(timePerBlock)) } - err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketAcknowledgement( - suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, + err = s.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketAcknowledgement( + s.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack.Acknowledgement(), ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -320,7 +320,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketAcknowledgement() { // TestVerifyPacketReceiptAbsence has chainA verify the receipt // absence on channelB. The channels on chainA and chainB are fully opened and // a packet is sent from chainA to chainB and not received. -func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() { +func (s *KeeperTestSuite) TestVerifyPacketReceiptAbsence() { var ( path *ibctesting.Path packet channeltypes.Packet @@ -352,30 +352,30 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() { }, errorsmod.Wrap(ibcerrors.ErrInvalidHeight, "failed packet commitment verification for client (07-tendermint-0): client state height < proof height ({1 17} < {1 22}), please ensure the client has been updated")}, {"verification failed - acknowledgement was received", func() { // increment receiving chain's (chainB) time by 2 hour to always pass receive - suite.coordinator.IncrementTimeBy(time.Hour * 2) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.IncrementTimeBy(time.Hour * 2) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed packet commitment verification for client (07-tendermint-0): failed to verify membership proof at index 0: provided value doesn't match proof")}, {"client status is not active - client is expired", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // send, only receive in malleate if applicable sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0) // reset variables @@ -388,32 +388,32 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() { connection.DelayPeriod = delayTimePeriod clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) if clientState.FrozenHeight.IsZero() { // need to update height to prove absence or receipt - suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + s.coordinator.CommitBlock(s.chainA, s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) } packetReceiptKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainB.QueryProof(packetReceiptKey) + proof, proofHeight := s.chainB.QueryProof(packetReceiptKey) // set time per block param if timePerBlock != 0 { - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(timePerBlock)) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(s.chainA.GetContext(), types.NewParams(timePerBlock)) } - err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketReceiptAbsence( - suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, + err = s.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyPacketReceiptAbsence( + s.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -422,7 +422,7 @@ func (suite *KeeperTestSuite) TestVerifyPacketReceiptAbsence() { // TestVerifyNextSequenceRecv has chainA verify the next sequence receive on // channelB. The channels on chainA and chainB are fully opened and a packet // is sent from chainA to chainB and received. -func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() { +func (s *KeeperTestSuite) TestVerifyNextSequenceRecv() { var ( path *ibctesting.Path heightDiff uint64 @@ -457,33 +457,33 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() { }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "failed packet commitment verification for client (07-tendermint-0): failed to verify membership proof at index 0: provided value doesn't match proof")}, {"client status is not active - client is expired", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.FrozenHeight = clienttypes.NewHeight(0, 1) path.EndpointA.SetClientState(clientState) }, errorsmod.Wrap(clienttypes.ErrClientNotActive, "client (07-tendermint-0) status is Frozen")}, } for _, tc := range cases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // send and receive packet sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // increment receiving chain's (chainB) time by 2 hour to always pass receive - suite.coordinator.IncrementTimeBy(time.Hour * 2) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.IncrementTimeBy(time.Hour * 2) + s.coordinator.CommitBlock(s.chainB) packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) nextSeqRecvKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) - proof, proofHeight := suite.chainB.QueryProof(nextSeqRecvKey) + proof, proofHeight := s.chainB.QueryProof(nextSeqRecvKey) // reset variables heightDiff = 0 @@ -493,21 +493,21 @@ func (suite *KeeperTestSuite) TestVerifyNextSequenceRecv() { // set time per block param if timePerBlock != 0 { - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(suite.chainA.GetContext(), types.NewParams(timePerBlock)) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetParams(s.chainA.GetContext(), types.NewParams(timePerBlock)) } connection := path.EndpointA.GetConnection() connection.DelayPeriod = delayTimePeriod - err = suite.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyNextSequenceRecv( - suite.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, + err = s.chainA.App.GetIBCKeeper().ConnectionKeeper.VerifyNextSequenceRecv( + s.chainA.GetContext(), connection, malleateHeight(proofHeight, heightDiff), proof, packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()+offsetSeq, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/03-connection/types/connection_test.go b/modules/core/03-connection/types/connection_test.go index 9066e0b62b5..cad83f33771 100644 --- a/modules/core/03-connection/types/connection_test.go +++ b/modules/core/03-connection/types/connection_test.go @@ -56,7 +56,6 @@ func TestConnectionValidateBasic(t *testing.T) { } for i, tc := range testCases { - err := tc.connection.ValidateBasic() if tc.expError == nil { require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) @@ -79,7 +78,6 @@ func TestCounterpartyValidateBasic(t *testing.T) { } for i, tc := range testCases { - err := tc.counterparty.ValidateBasic() if tc.expError == nil { require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) @@ -108,7 +106,6 @@ func TestIdentifiedConnectionValidateBasic(t *testing.T) { } for i, tc := range testCases { - err := tc.connection.ValidateBasic() if tc.expError == nil { require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) diff --git a/modules/core/03-connection/types/expected_keepers.go b/modules/core/03-connection/types/expected_keepers.go index d4ddc3166e7..e576e869f66 100644 --- a/modules/core/03-connection/types/expected_keepers.go +++ b/modules/core/03-connection/types/expected_keepers.go @@ -2,7 +2,6 @@ package types import ( sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" "github.com/cosmos/ibc-go/v10/modules/core/exported" ) @@ -16,8 +15,3 @@ type ClientKeeper interface { VerifyNonMembership(ctx sdk.Context, clientID string, height exported.Height, delayTimePeriod uint64, delayBlockPeriod uint64, proof []byte, path exported.Path) error IterateClientStates(ctx sdk.Context, prefix []byte, cb func(string, exported.ClientState) bool) } - -// ParamSubspace defines the expected Subspace interface for module parameters. -type ParamSubspace interface { - GetParamSet(ctx sdk.Context, ps paramtypes.ParamSet) -} diff --git a/modules/core/03-connection/types/keys.go b/modules/core/03-connection/types/keys.go index 68b9651d4f2..0170838202e 100644 --- a/modules/core/03-connection/types/keys.go +++ b/modules/core/03-connection/types/keys.go @@ -33,6 +33,9 @@ const ( ParamsKey = "connectionParams" ) +// KeyMaxExpectedTimePerBlock is store's key for MaxExpectedTimePerBlock parameter +var KeyMaxExpectedTimePerBlock = []byte("MaxExpectedTimePerBlock") + // FormatConnectionIdentifier returns the connection identifier with the sequence appended. // This is an SDK specific format not enforced by IBC protocol. func FormatConnectionIdentifier(sequence uint64) string { diff --git a/modules/core/03-connection/types/msgs_test.go b/modules/core/03-connection/types/msgs_test.go index e9c7f743357..72738f8e038 100644 --- a/modules/core/03-connection/types/msgs_test.go +++ b/modules/core/03-connection/types/msgs_test.go @@ -47,22 +47,26 @@ type MsgTestSuite struct { proof []byte } -func (suite *MsgTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func TestMsgTestSuite(t *testing.T) { + testifysuite.Run(t, new(MsgTestSuite)) +} + +func (s *MsgTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) - app := simapp.Setup(suite.T(), false) + app := simapp.Setup(s.T(), false) db := dbm.NewMemDB() store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) storeKey := storetypes.NewKVStoreKey("iavlStoreKey") store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil) err := store.LoadVersion(0) - suite.Require().NoError(err) + s.Require().NoError(err) iavlStore, ok := store.GetCommitStore(storeKey).(*iavl.Store) - suite.Require().True(ok) + s.Require().True(ok) iavlStore.Set([]byte("KEY"), []byte("VALUE")) _ = store.Commit() @@ -72,21 +76,17 @@ func (suite *MsgTestSuite) SetupTest() { Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof Prove: true, }) - suite.Require().NoError(err) + s.Require().NoError(err) merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) - suite.Require().NoError(err) + s.Require().NoError(err) proof, err := app.AppCodec().Marshal(&merkleProof) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.proof = proof -} - -func TestMsgTestSuite(t *testing.T) { - testifysuite.Run(t, new(MsgTestSuite)) + s.proof = proof } -func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() { +func (s *MsgTestSuite) TestNewMsgConnectionOpenInit() { prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey")) // empty versions are considered valid, the default compatible versions // will be used in protocol. @@ -108,18 +108,17 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenInit() { } for _, tc := range testCases { - err := tc.msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } } } -func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() { +func (s *MsgTestSuite) TestNewMsgConnectionOpenTry() { prefix := commitmenttypes.NewMerklePrefix([]byte("storePrefixKey")) testCases := []struct { @@ -127,85 +126,82 @@ func (suite *MsgTestSuite) TestNewMsgConnectionOpenTry() { msg *types.MsgConnectionOpenTry expError error }{ - {"success", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, signer), nil}, - {"localhost client ID", types.NewMsgConnectionOpenTry(exported.LocalhostClientID, "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, signer), clienttypes.ErrInvalidClientType}, - {"invalid client ID", types.NewMsgConnectionOpenTry("test/iris", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, signer), host.ErrInvalidID}, - {"invalid counterparty connection ID", types.NewMsgConnectionOpenTry("clienttotesta", "ibc/test", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, signer), host.ErrInvalidID}, - {"invalid counterparty client ID", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "test/conn1", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, signer), host.ErrInvalidID}, - {"empty counterparty prefix", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", emptyPrefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, signer), types.ErrInvalidCounterparty}, - {"empty counterpartyVersions", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{}, 500, suite.proof, clientHeight, signer), ibcerrors.ErrInvalidVersion}, + {"success", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, signer), nil}, + {"localhost client ID", types.NewMsgConnectionOpenTry(exported.LocalhostClientID, "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, signer), clienttypes.ErrInvalidClientType}, + {"invalid client ID", types.NewMsgConnectionOpenTry("test/iris", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, signer), host.ErrInvalidID}, + {"invalid counterparty connection ID", types.NewMsgConnectionOpenTry("clienttotesta", "ibc/test", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, signer), host.ErrInvalidID}, + {"invalid counterparty client ID", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "test/conn1", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, signer), host.ErrInvalidID}, + {"empty counterparty prefix", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", emptyPrefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, signer), types.ErrInvalidCounterparty}, + {"empty counterpartyVersions", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{}, 500, s.proof, clientHeight, signer), ibcerrors.ErrInvalidVersion}, {"empty proofInit", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, emptyProof, clientHeight, signer), commitmenttypes.ErrInvalidProof}, - {"empty singer", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, suite.proof, clientHeight, ""), ibcerrors.ErrInvalidAddress}, - {"invalid version", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{{}}, 500, suite.proof, clientHeight, signer), types.ErrInvalidVersion}, - {"too many counterparty versions", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, make([]*types.Version, types.MaxCounterpartyVersionsLength+1), 500, suite.proof, clientHeight, signer), ibcerrors.ErrInvalidVersion}, - {"too many features in counterparty version", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{{"v1", make([]string, types.MaxFeaturesLength+1)}}, 500, suite.proof, clientHeight, signer), types.ErrInvalidVersion}, + {"empty singer", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{ibctesting.ConnectionVersion}, 500, s.proof, clientHeight, ""), ibcerrors.ErrInvalidAddress}, + {"invalid version", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{{}}, 500, s.proof, clientHeight, signer), types.ErrInvalidVersion}, + {"too many counterparty versions", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, make([]*types.Version, types.MaxCounterpartyVersionsLength+1), 500, s.proof, clientHeight, signer), ibcerrors.ErrInvalidVersion}, + {"too many features in counterparty version", types.NewMsgConnectionOpenTry("clienttotesta", "connectiontotest", "clienttotest", prefix, []*types.Version{{"v1", make([]string, types.MaxFeaturesLength+1)}}, 500, s.proof, clientHeight, signer), types.ErrInvalidVersion}, } for _, tc := range testCases { - err := tc.msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } } } -func (suite *MsgTestSuite) TestNewMsgConnectionOpenAck() { +func (s *MsgTestSuite) TestNewMsgConnectionOpenAck() { testCases := []struct { name string msg *types.MsgConnectionOpenAck expError error }{ - {"success", types.NewMsgConnectionOpenAck(connectionID, connectionID, suite.proof, clientHeight, ibctesting.ConnectionVersion, signer), nil}, - {"invalid connection ID", types.NewMsgConnectionOpenAck("test/conn1", connectionID, suite.proof, clientHeight, ibctesting.ConnectionVersion, signer), types.ErrInvalidConnectionIdentifier}, - {"invalid counterparty connection ID", types.NewMsgConnectionOpenAck(connectionID, "test/conn1", suite.proof, clientHeight, ibctesting.ConnectionVersion, signer), host.ErrInvalidID}, + {"success", types.NewMsgConnectionOpenAck(connectionID, connectionID, s.proof, clientHeight, ibctesting.ConnectionVersion, signer), nil}, + {"invalid connection ID", types.NewMsgConnectionOpenAck("test/conn1", connectionID, s.proof, clientHeight, ibctesting.ConnectionVersion, signer), types.ErrInvalidConnectionIdentifier}, + {"invalid counterparty connection ID", types.NewMsgConnectionOpenAck(connectionID, "test/conn1", s.proof, clientHeight, ibctesting.ConnectionVersion, signer), host.ErrInvalidID}, {"empty proofTry", types.NewMsgConnectionOpenAck(connectionID, connectionID, emptyProof, clientHeight, ibctesting.ConnectionVersion, signer), commitmenttypes.ErrInvalidProof}, - {"invalid version", types.NewMsgConnectionOpenAck(connectionID, connectionID, suite.proof, clientHeight, &types.Version{}, signer), types.ErrInvalidVersion}, - {"empty signer", types.NewMsgConnectionOpenAck(connectionID, connectionID, suite.proof, clientHeight, ibctesting.ConnectionVersion, ""), ibcerrors.ErrInvalidAddress}, + {"invalid version", types.NewMsgConnectionOpenAck(connectionID, connectionID, s.proof, clientHeight, &types.Version{}, signer), types.ErrInvalidVersion}, + {"empty signer", types.NewMsgConnectionOpenAck(connectionID, connectionID, s.proof, clientHeight, ibctesting.ConnectionVersion, ""), ibcerrors.ErrInvalidAddress}, } for _, tc := range testCases { - err := tc.msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } } } -func (suite *MsgTestSuite) TestNewMsgConnectionOpenConfirm() { +func (s *MsgTestSuite) TestNewMsgConnectionOpenConfirm() { testCases := []struct { name string msg *types.MsgConnectionOpenConfirm expError error }{ - {"invalid connection ID", types.NewMsgConnectionOpenConfirm("test/conn1", suite.proof, clientHeight, signer), types.ErrInvalidConnectionIdentifier}, + {"invalid connection ID", types.NewMsgConnectionOpenConfirm("test/conn1", s.proof, clientHeight, signer), types.ErrInvalidConnectionIdentifier}, {"empty proofTry", types.NewMsgConnectionOpenConfirm(connectionID, emptyProof, clientHeight, signer), commitmenttypes.ErrInvalidProof}, - {"empty signer", types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, ""), ibcerrors.ErrInvalidAddress}, - {"success", types.NewMsgConnectionOpenConfirm(connectionID, suite.proof, clientHeight, signer), nil}, + {"empty signer", types.NewMsgConnectionOpenConfirm(connectionID, s.proof, clientHeight, ""), ibcerrors.ErrInvalidAddress}, + {"success", types.NewMsgConnectionOpenConfirm(connectionID, s.proof, clientHeight, signer), nil}, } for _, tc := range testCases { - err := tc.msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } } } // TestMsgUpdateParamsValidateBasic tests ValidateBasic for MsgUpdateParams -func (suite *MsgTestSuite) TestMsgUpdateParamsValidateBasic() { - signer := suite.chainA.App.GetIBCKeeper().GetAuthority() +func (s *MsgTestSuite) TestMsgUpdateParamsValidateBasic() { + signer := s.chainA.App.GetIBCKeeper().GetAuthority() testCases := []struct { name string msg *types.MsgUpdateParams @@ -229,12 +225,11 @@ func (suite *MsgTestSuite) TestMsgUpdateParamsValidateBasic() { } for _, tc := range testCases { - err := tc.msg.ValidateBasic() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) } } } @@ -251,7 +246,6 @@ func TestMsgUpdateParamsGetSigners(t *testing.T) { } for _, tc := range testCases { - msg := types.MsgUpdateParams{ Signer: tc.address.String(), Params: types.DefaultParams(), diff --git a/modules/core/03-connection/types/params_legacy.go b/modules/core/03-connection/types/params_legacy.go deleted file mode 100644 index ac022dd1978..00000000000 --- a/modules/core/03-connection/types/params_legacy.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -NOTE: Usage of x/params to manage parameters is deprecated in favor of x/gov -controlled execution of MsgUpdateParams messages. These types remains solely -for migration purposes and will be removed in a future release. -[#3621](https://github.com/cosmos/ibc-go/issues/3621) -*/ -package types - -import ( - "fmt" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" -) - -// KeyMaxExpectedTimePerBlock is store's key for MaxExpectedTimePerBlock parameter -var KeyMaxExpectedTimePerBlock = []byte("MaxExpectedTimePerBlock") - -// ParamKeyTable type declaration for parameters -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -// ParamSetPairs implements params.ParamSet -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair(KeyMaxExpectedTimePerBlock, &p.MaxExpectedTimePerBlock, validateParams), - } -} - -func validateParams(i any) error { - _, ok := i.(uint64) - if !ok { - return fmt.Errorf("invalid parameter. expected %T, got type: %T", uint64(1), i) - } - return nil -} diff --git a/modules/core/03-connection/types/params_test.go b/modules/core/03-connection/types/params_test.go index 56b61c3aa97..73465f83179 100644 --- a/modules/core/03-connection/types/params_test.go +++ b/modules/core/03-connection/types/params_test.go @@ -21,7 +21,6 @@ func TestValidateParams(t *testing.T) { } for _, tc := range testCases { - err := tc.params.Validate() if tc.expError == nil { require.NoError(t, err, tc.name) diff --git a/modules/core/03-connection/types/query.go b/modules/core/03-connection/types/query.go index 3249da030f9..487da41a948 100644 --- a/modules/core/03-connection/types/query.go +++ b/modules/core/03-connection/types/query.go @@ -51,8 +51,8 @@ func NewQueryConnectionClientStateResponse(identifiedClientState clienttypes.Ide } // UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces -func (qccsr QueryConnectionClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - return qccsr.IdentifiedClientState.UnpackInterfaces(unpacker) +func (resp QueryConnectionClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return resp.IdentifiedClientState.UnpackInterfaces(unpacker) } // NewQueryConnectionConsensusStateResponse creates a newQueryConnectionConsensusStateResponse instance @@ -66,6 +66,6 @@ func NewQueryConnectionConsensusStateResponse(clientID string, anyConsensusState } // UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces -func (qccsr QueryConnectionConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - return unpacker.UnpackAny(qccsr.ConsensusState, new(exported.ConsensusState)) +func (resp QueryConnectionConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(resp.ConsensusState, new(exported.ConsensusState)) } diff --git a/modules/core/03-connection/types/query.pb.go b/modules/core/03-connection/types/query.pb.go index c7d5a74496b..ade90fc458c 100644 --- a/modules/core/03-connection/types/query.pb.go +++ b/modules/core/03-connection/types/query.pb.go @@ -1027,6 +1027,7 @@ func _Query_ConnectionParams_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.connection.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/core/03-connection/types/tx.pb.go b/modules/core/03-connection/types/tx.pb.go index 3ce43b4af74..1e01560ff5e 100644 --- a/modules/core/03-connection/types/tx.pb.go +++ b/modules/core/03-connection/types/tx.pb.go @@ -749,6 +749,7 @@ func _Msg_UpdateConnectionParams_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.connection.v1.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/core/03-connection/types/version.go b/modules/core/03-connection/types/version.go index c30c709277e..e51f15021e0 100644 --- a/modules/core/03-connection/types/version.go +++ b/modules/core/03-connection/types/version.go @@ -43,13 +43,13 @@ func NewVersion(identifier string, features []string) *Version { } // GetIdentifier implements the VersionI interface -func (version Version) GetIdentifier() string { - return version.Identifier +func (v Version) GetIdentifier() string { + return v.Identifier } // GetFeatures implements the VersionI interface -func (version Version) GetFeatures() []string { - return version.Features +func (v Version) GetFeatures() []string { + return v.Features } // ValidateVersion does basic validation of the version identifier and @@ -77,11 +77,11 @@ func ValidateVersion(version *Version) error { // proposed version is supported by this chain. If the feature set is // empty it verifies that this is allowed for the specified version // identifier. -func (version Version) VerifyProposedVersion(proposedVersion *Version) error { - if proposedVersion.GetIdentifier() != version.GetIdentifier() { +func (v Version) VerifyProposedVersion(proposedVersion *Version) error { + if proposedVersion.GetIdentifier() != v.GetIdentifier() { return errorsmod.Wrapf( ErrVersionNegotiationFailed, - "proposed version identifier does not equal supported version identifier (%s != %s)", proposedVersion.GetIdentifier(), version.GetIdentifier(), + "proposed version identifier does not equal supported version identifier (%s != %s)", proposedVersion.GetIdentifier(), v.GetIdentifier(), ) } @@ -93,10 +93,10 @@ func (version Version) VerifyProposedVersion(proposedVersion *Version) error { } for _, proposedFeature := range proposedVersion.GetFeatures() { - if !slices.Contains(version.GetFeatures(), proposedFeature) { + if !slices.Contains(v.GetFeatures(), proposedFeature) { return errorsmod.Wrapf( ErrVersionNegotiationFailed, - "proposed feature (%s) is not a supported feature set (%s)", proposedFeature, version.GetFeatures(), + "proposed feature (%s) is not a supported feature set (%s)", proposedFeature, v.GetFeatures(), ) } } @@ -181,7 +181,8 @@ func PickVersion(supportedVersions, counterpartyVersions []*Version) (*Version, // and the counterparty feature set. This is done by iterating over all the // features in the source version and seeing if they exist in the feature // set for the counterparty version. -func GetFeatureSetIntersection(sourceFeatureSet, counterpartyFeatureSet []string) (featureSet []string) { +func GetFeatureSetIntersection(sourceFeatureSet, counterpartyFeatureSet []string) []string { + var featureSet []string for _, feature := range sourceFeatureSet { if slices.Contains(counterpartyFeatureSet, feature) { featureSet = append(featureSet, feature) diff --git a/modules/core/03-connection/types/version_test.go b/modules/core/03-connection/types/version_test.go index 21c58cea276..96e08dd1a21 100644 --- a/modules/core/03-connection/types/version_test.go +++ b/modules/core/03-connection/types/version_test.go @@ -77,7 +77,6 @@ func TestFindSupportedVersion(t *testing.T) { } for i, tc := range testCases { - version, found := types.FindSupportedVersion(tc.version, tc.supportedVersions) if tc.expFound { require.Equal(t, tc.expVersion.GetIdentifier(), version.GetIdentifier(), "test case %d: %s", i, tc.name) diff --git a/modules/core/04-channel/client/utils/utils.go b/modules/core/04-channel/client/utils/utils.go index 12403d92e7d..37d7ff3b38f 100644 --- a/modules/core/04-channel/client/utils/utils.go +++ b/modules/core/04-channel/client/utils/utils.go @@ -13,6 +13,7 @@ import ( clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" + hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" ibcclient "github.com/cosmos/ibc-go/v10/modules/core/client" ) @@ -179,7 +180,7 @@ func QueryNextSequenceSend( } func queryNextSequenceSendABCI(clientCtx client.Context, portID, channelID string) (*types.QueryNextSequenceSendResponse, error) { - key := host.NextSequenceSendKey(portID, channelID) + key := hostv2.NextSequenceSendKey(channelID) value, proofBz, proofHeight, err := ibcclient.QueryTendermintProof(clientCtx, key) if err != nil { diff --git a/modules/core/04-channel/keeper/ante.go b/modules/core/04-channel/keeper/ante.go index a2edcef9bec..d8c20c908d7 100644 --- a/modules/core/04-channel/keeper/ante.go +++ b/modules/core/04-channel/keeper/ante.go @@ -16,9 +16,5 @@ func (k *Keeper) RecvPacketReCheckTx(ctx sdk.Context, packet types.Packet) error return errorsmod.Wrap(types.ErrChannelNotFound, packet.GetDestChannel()) } - if err := k.applyReplayProtection(ctx, packet, channel); err != nil { - return err - } - - return nil + return k.applyReplayProtection(ctx, packet, channel) } diff --git a/modules/core/04-channel/keeper/ante_test.go b/modules/core/04-channel/keeper/ante_test.go index 18f5de6cbcf..01fc7ac55eb 100644 --- a/modules/core/04-channel/keeper/ante_test.go +++ b/modules/core/04-channel/keeper/ante_test.go @@ -5,7 +5,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestRecvPacketReCheckTx() { +func (s *KeeperTestSuite) TestRecvPacketReCheckTx() { var ( path *ibctesting.Path packet types.Packet @@ -31,31 +31,31 @@ func (suite *KeeperTestSuite) TestRecvPacketReCheckTx() { { "redundant relay", func() { - err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacketReCheckTx(suite.chainB.GetContext(), packet) - suite.Require().NoError(err) + err := s.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacketReCheckTx(s.chainB.GetContext(), packet) + s.Require().NoError(err) }, types.ErrNoOpMsg, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) tc.malleate() - err = suite.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacketReCheckTx(suite.chainB.GetContext(), packet) + err = s.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacketReCheckTx(s.chainB.GetContext(), packet) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } diff --git a/modules/core/04-channel/keeper/grpc_query.go b/modules/core/04-channel/keeper/grpc_query.go index 7be35383b5a..e3c6c3bd119 100644 --- a/modules/core/04-channel/keeper/grpc_query.go +++ b/modules/core/04-channel/keeper/grpc_query.go @@ -372,7 +372,7 @@ func (q *queryServer) PacketAcknowledgements(goCtx context.Context, req *types.Q errorsmod.Wrapf(types.ErrChannelNotFound, "port ID (%s) channel ID (%s)", req.PortId, req.ChannelId).Error(), ) } - var acks []*types.PacketState + var acks []*types.PacketState // nolint: prealloc store := prefix.NewStore(runtime.KVStoreAdapter(q.storeService.OpenKVStore(ctx)), host.PacketAcknowledgementPrefixKey(req.PortId, req.ChannelId)) // if a list of packet sequences is provided then query for each specific ack and return a list <= len(req.PacketCommitmentSequences) @@ -552,7 +552,6 @@ func (q *queryServer) UnreceivedAcks(goCtx context.Context, req *types.QueryUnre if commitment := q.GetPacketCommitment(ctx, req.PortId, req.ChannelId, seq); len(commitment) != 0 { unreceivedSequences = append(unreceivedSequences, seq) } - } selfHeight := clienttypes.GetSelfHeight(ctx) diff --git a/modules/core/04-channel/keeper/grpc_query_test.go b/modules/core/04-channel/keeper/grpc_query_test.go index 899dfdf7b88..de47aa944fe 100644 --- a/modules/core/04-channel/keeper/grpc_query_test.go +++ b/modules/core/04-channel/keeper/grpc_query_test.go @@ -22,7 +22,7 @@ import ( const doesnotexist = "doesnotexist" -func (suite *KeeperTestSuite) TestQueryChannel() { +func (s *KeeperTestSuite) TestQueryChannel() { var ( req *types.QueryChannelRequest expChannel types.Channel @@ -82,13 +82,13 @@ func (suite *KeeperTestSuite) TestQueryChannel() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() path.SetChannelOrdered() // init channel err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) expChannel = path.EndpointA.GetChannel() @@ -102,28 +102,28 @@ func (suite *KeeperTestSuite) TestQueryChannel() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.Channel(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(&expChannel, res.Channel) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(&expChannel, res.Channel) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryChannels() { +func (s *KeeperTestSuite) TestQueryChannels() { var ( req *types.QueryChannelsRequest expChannels = []*types.IdentifiedChannel(nil) @@ -151,7 +151,7 @@ func (suite *KeeperTestSuite) TestQueryChannels() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // channel0 on first connection on chainA counterparty0 := types.Counterparty{ @@ -160,14 +160,14 @@ func (suite *KeeperTestSuite) TestQueryChannels() { } // path1 creates a second channel on first connection on chainA - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetChannelOrdered() path1.EndpointA.ClientID = path.EndpointA.ClientID path1.EndpointB.ClientID = path.EndpointB.ClientID path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID - suite.coordinator.CreateMockChannels(path1) + s.coordinator.CreateMockChannels(path1) counterparty1 := types.Counterparty{ PortId: path1.EndpointB.ChannelConfig.PortID, ChannelId: path1.EndpointB.ChannelID, @@ -200,29 +200,29 @@ func (suite *KeeperTestSuite) TestQueryChannels() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.Channels(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(len(expChannels), int(res.Pagination.Total)) - suite.Require().ElementsMatch(expChannels, res.Channels) // order of channels is not guaranteed, due to lexicographical ordering + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(len(expChannels), int(res.Pagination.Total)) + s.Require().ElementsMatch(expChannels, res.Channels) // order of channels is not guaranteed, due to lexicographical ordering } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryConnectionChannels() { +func (s *KeeperTestSuite) TestQueryConnectionChannels() { var ( req *types.QueryConnectionChannelsRequest expChannels = []*types.IdentifiedChannel{} @@ -255,7 +255,7 @@ func (suite *KeeperTestSuite) TestQueryConnectionChannels() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // channel0 on first connection on chainA counterparty0 := types.Counterparty{ @@ -264,14 +264,14 @@ func (suite *KeeperTestSuite) TestQueryConnectionChannels() { } // path1 creates a second channel on first connection on chainA - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetChannelOrdered() path1.EndpointA.ClientID = path.EndpointA.ClientID path1.EndpointB.ClientID = path.EndpointB.ClientID path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID - suite.coordinator.CreateMockChannels(path1) + s.coordinator.CreateMockChannels(path1) counterparty1 := types.Counterparty{ PortId: path1.EndpointB.ChannelConfig.PortID, ChannelId: path1.EndpointB.ChannelID, @@ -305,7 +305,7 @@ func (suite *KeeperTestSuite) TestQueryConnectionChannels() { { "success, empty response", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expChannels = []*types.IdentifiedChannel(nil) req = &types.QueryConnectionChannelsRequest{ @@ -322,28 +322,28 @@ func (suite *KeeperTestSuite) TestQueryConnectionChannels() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.ConnectionChannels(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expChannels, res.Channels) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expChannels, res.Channels) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryChannelClientState() { +func (s *KeeperTestSuite) TestQueryChannelClientState() { var ( req *types.QueryChannelClientStateRequest expIdentifiedClientState clienttypes.IdentifiedClientState @@ -403,7 +403,7 @@ func (suite *KeeperTestSuite) TestQueryChannelClientState() { { "connection not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() channel := path.EndpointA.GetChannel() @@ -411,7 +411,7 @@ func (suite *KeeperTestSuite) TestQueryChannelClientState() { channel.ConnectionHops[0] = doesnotexist // set connection hops to wrong connection ID - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) req = &types.QueryChannelClientStateRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -425,11 +425,11 @@ func (suite *KeeperTestSuite) TestQueryChannelClientState() { { "client state for channel's connection not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // set connection to empty so clientID is empty - suite.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chainA.GetContext(), path.EndpointA.ConnectionID, connectiontypes.ConnectionEnd{}) + s.chainA.App.GetIBCKeeper().ConnectionKeeper.SetConnection(s.chainA.GetContext(), path.EndpointA.ConnectionID, connectiontypes.ConnectionEnd{}) req = &types.QueryChannelClientStateRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -443,15 +443,15 @@ func (suite *KeeperTestSuite) TestQueryChannelClientState() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() path.SetChannelOrdered() // init channel err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) - expClientState := suite.chainA.GetClientState(path.EndpointA.ClientID) + expClientState := s.chainA.GetClientState(path.EndpointA.ClientID) expIdentifiedClientState = clienttypes.NewIdentifiedClientState(path.EndpointA.ClientID, expClientState) req = &types.QueryChannelClientStateRequest{ @@ -464,32 +464,32 @@ func (suite *KeeperTestSuite) TestQueryChannelClientState() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.ChannelClientState(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(&expIdentifiedClientState, res.IdentifiedClientState) // ensure UnpackInterfaces is defined cachedValue := res.IdentifiedClientState.ClientState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { +func (s *KeeperTestSuite) TestQueryChannelConsensusState() { var ( req *types.QueryChannelConsensusStateRequest expConsensusState exported.ConsensusState @@ -556,7 +556,7 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { { "connection not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() channel := path.EndpointA.GetChannel() @@ -564,7 +564,7 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { channel.ConnectionHops[0] = doesnotexist // set connection hops to wrong connection ID - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) req = &types.QueryChannelConsensusStateRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -580,14 +580,14 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { { "consensus state for channel's connection not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() req = &types.QueryChannelConsensusStateRequest{ PortId: path.EndpointA.ChannelConfig.PortID, ChannelId: path.EndpointA.ChannelID, RevisionNumber: 0, - RevisionHeight: uint64(suite.chainA.GetContext().BlockHeight()), // use current height + RevisionHeight: uint64(s.chainA.GetContext().BlockHeight()), // use current height } }, status.Error( codes.NotFound, @@ -597,16 +597,16 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() path.SetChannelOrdered() // init channel err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) - expConsensusState, _ = suite.chainA.GetConsensusState(path.EndpointA.ClientID, path.EndpointA.GetClientLatestHeight()) - suite.Require().NotNil(expConsensusState) + expConsensusState, _ = s.chainA.GetConsensusState(path.EndpointA.ClientID, path.EndpointA.GetClientLatestHeight()) + s.Require().NotNil(expConsensusState) expClientID = path.EndpointA.ClientID req = &types.QueryChannelConsensusStateRequest{ @@ -621,35 +621,35 @@ func (suite *KeeperTestSuite) TestQueryChannelConsensusState() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.ChannelConsensusState(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) consensusState, err := clienttypes.UnpackConsensusState(res.ConsensusState) - suite.Require().NoError(err) - suite.Require().Equal(expConsensusState, consensusState) - suite.Require().Equal(expClientID, res.ClientId) + s.Require().NoError(err) + s.Require().Equal(expConsensusState, consensusState) + s.Require().Equal(expClientID, res.ClientId) // ensure UnpackInterfaces is defined cachedValue := res.ConsensusState.GetCachedValue() - suite.Require().NotNil(cachedValue) + s.Require().NotNil(cachedValue) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketCommitment() { +func (s *KeeperTestSuite) TestQueryPacketCommitment() { var ( req *types.QueryPacketCommitmentRequest expCommitment []byte @@ -726,10 +726,10 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() { { "commitment not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expCommitment = []byte("hash") - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expCommitment) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expCommitment) req = &types.QueryPacketCommitmentRequest{ PortId: path.EndpointA.ChannelConfig.PortID, ChannelId: path.EndpointA.ChannelID, @@ -757,10 +757,10 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expCommitment = []byte("hash") - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expCommitment) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expCommitment) req = &types.QueryPacketCommitmentRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -773,28 +773,28 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.PacketCommitment(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expCommitment, res.Commitment) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expCommitment, res.Commitment) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketCommitments() { +func (s *KeeperTestSuite) TestQueryPacketCommitments() { var ( req *types.QueryPacketCommitmentsRequest expCommitments = []*types.PacketState{} @@ -841,14 +841,14 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitments() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expCommitments = make([]*types.PacketState, 9) - for i := uint64(0); i < 9; i++ { + for i := range uint64(9) { commitment := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chainA.GetContext(), commitment.PortId, commitment.ChannelId, commitment.Sequence, commitment.Data) expCommitments[i] = &commitment } @@ -867,28 +867,28 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitments() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.PacketCommitments(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expCommitments, res.Commitments) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expCommitments, res.Commitments) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketReceipt() { +func (s *KeeperTestSuite) TestQueryPacketReceipt() { var ( req *types.QueryPacketReceiptRequest expReceived bool @@ -965,9 +965,9 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() { { "success: receipt not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) req = &types.QueryPacketReceiptRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -981,9 +981,9 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() { { "success: receipt found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) req = &types.QueryPacketReceiptRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -997,28 +997,28 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.PacketReceipt(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expReceived, res.Received) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expReceived, res.Received) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { +func (s *KeeperTestSuite) TestQueryPacketAcknowledgement() { var ( req *types.QueryPacketAcknowledgementRequest expAck []byte @@ -1081,10 +1081,10 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { { "ack not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expAck = []byte("hash") - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expAck) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expAck) req = &types.QueryPacketAcknowledgementRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -1114,10 +1114,10 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expAck = []byte("hash") - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expAck) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, expAck) req = &types.QueryPacketAcknowledgementRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -1130,28 +1130,28 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.PacketAcknowledgement(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expAck, res.Acknowledgement) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expAck, res.Acknowledgement) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { +func (s *KeeperTestSuite) TestQueryPacketAcknowledgements() { var ( req *types.QueryPacketAcknowledgementsRequest expAcknowledgements = []*types.PacketState{} @@ -1198,14 +1198,14 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { { "success, filtered res", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() var commitments []uint64 - for i := uint64(0); i < 100; i++ { + for i := range uint64(100) { ack := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(s.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) if i < 10 { // populate the store with 100 and query for 10 specific acks expAcknowledgements = append(expAcknowledgements, &ack) @@ -1225,14 +1225,14 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expAcknowledgements = make([]*types.PacketState, 9) - for i := uint64(0); i < 9; i++ { + for i := range uint64(9) { ack := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(s.chainA.GetContext(), ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) expAcknowledgements[i] = &ack } @@ -1251,28 +1251,28 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.PacketAcknowledgements(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expAcknowledgements, res.Acknowledgements) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expAcknowledgements, res.Acknowledgements) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { +func (s *KeeperTestSuite) TestQueryUnreceivedPackets() { var ( req *types.QueryUnreceivedPacketsRequest expSeq = []uint64(nil) @@ -1319,7 +1319,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "invalid seq", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() req = &types.QueryUnreceivedPacketsRequest{ @@ -1336,7 +1336,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "invalid seq, ordered channel", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetChannelOrdered() path.Setup() @@ -1367,7 +1367,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success empty packet commitments", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = []uint64(nil) @@ -1382,7 +1382,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success unreceived packet commitments", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // no ack exists @@ -1399,10 +1399,10 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success unreceived packet commitments, nothing to relay", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) expSeq = []uint64(nil) req = &types.QueryUnreceivedPacketsRequest{ @@ -1416,7 +1416,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "success multiple unreceived packet commitments", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = []uint64(nil) // reset packetCommitments := []uint64{} @@ -1426,7 +1426,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { packetCommitments = append(packetCommitments, seq) if seq%2 == 0 { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) } else { expSeq = append(expSeq, seq) } @@ -1443,7 +1443,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success empty packet commitments, ordered channel", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetChannelOrdered() path.Setup() @@ -1459,7 +1459,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success unreceived packet commitments, ordered channel", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetChannelOrdered() path.Setup() @@ -1476,7 +1476,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success multiple unreceived packet commitments, ordered channel", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetChannelOrdered() path.Setup() @@ -1484,7 +1484,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { // Packet sequence 2 is already received so only sequences 7, 9, 10 should be considered unreceived. expSeq = []uint64{7, 9, 10} packetCommitments := []uint64{2, 7, 9, 10} - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 5) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 5) req = &types.QueryUnreceivedPacketsRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -1497,28 +1497,28 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.UnreceivedPackets(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.Sequences) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.Sequences) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { +func (s *KeeperTestSuite) TestQueryUnreceivedAcks() { var ( req *types.QueryUnreceivedAcksRequest expSeq = []uint64{} @@ -1578,7 +1578,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { { "invalid seq", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() req = &types.QueryUnreceivedAcksRequest{ @@ -1595,10 +1595,10 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { { "basic success unreceived packet acks", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte("commitment")) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte("commitment")) expSeq = []uint64{1} req = &types.QueryUnreceivedAcksRequest{ @@ -1612,7 +1612,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { { "basic success unreceived packet acknowledgements, nothing to relay", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = []uint64(nil) @@ -1627,7 +1627,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { { "success multiple unreceived packet acknowledgements", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = []uint64{} // reset packetAcks := []uint64{} @@ -1637,7 +1637,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { packetAcks = append(packetAcks, seq) if seq%2 == 0 { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, []byte("commitement")) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, []byte("commitement")) expSeq = append(expSeq, seq) } } @@ -1653,28 +1653,28 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.UnreceivedAcks(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.Sequences) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.Sequences) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() { +func (s *KeeperTestSuite) TestQueryNextSequenceReceive() { var ( req *types.QueryNextSequenceReceiveRequest expSeq uint64 @@ -1734,7 +1734,7 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() { { "basic success on unordered channel returns zero", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = 0 @@ -1748,13 +1748,13 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() { { "basic success on ordered channel returns the set receive sequence", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetChannelOrdered() path.Setup() expSeq = 3 seq := uint64(3) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) req = &types.QueryNextSequenceReceiveRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -1766,28 +1766,28 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceReceive() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.NextSequenceReceive(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.NextSequenceReceive) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.NextSequenceReceive) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { +func (s *KeeperTestSuite) TestQueryNextSequenceSend() { var ( req *types.QueryNextSequenceSendRequest expSeq uint64 @@ -1847,12 +1847,12 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { { "basic success on unordered channel returns the set send sequence", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = 42 seq := uint64(42) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) req = &types.QueryNextSequenceSendRequest{ PortId: path.EndpointA.ChannelConfig.PortID, ChannelId: path.EndpointA.ChannelID, @@ -1863,13 +1863,13 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { { "basic success on ordered channel returns the set send sequence", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetChannelOrdered() path.Setup() expSeq = 3 seq := uint64(3) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) req = &types.QueryNextSequenceSendRequest{ PortId: path.EndpointA.ChannelConfig.PortID, @@ -1881,22 +1881,22 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeper) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeper) res, err := queryServer.NextSequenceSend(ctx, req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.NextSequenceSend) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.NextSequenceSend) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/04-channel/keeper/handshake.go b/modules/core/04-channel/keeper/handshake.go index 22de3c5748c..886223557b5 100644 --- a/modules/core/04-channel/keeper/handshake.go +++ b/modules/core/04-channel/keeper/handshake.go @@ -239,6 +239,22 @@ func (k *Keeper) WriteOpenAckChannel( channel.Counterparty.ChannelId = counterpartyChannelID k.SetChannel(ctx, portID, channelID, channel) + if channel.Ordering == types.UNORDERED { + // get the counterparty and set it in the client keeper v2 to support IBC v2 on this + // channel ID through aliasing + // NOTE: This should never error as the channel is set in the line above + counterparty, ok := k.GetV2Counterparty(ctx, portID, channelID) + if !ok { + panic("could not convert channel to v2 counterparty") + } + k.clientKeeperV2.SetClientCounterparty(ctx, channelID, counterparty) + connection, ok := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !ok { + panic("connection not set") + } + k.channelKeeperV2.SetClientForAlias(ctx, channelID, connection.ClientId) + } + k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", types.INIT, "new-state", types.OPEN) defer telemetry.IncrCounter(1, "ibc", "channel", "open-ack") @@ -308,6 +324,22 @@ func (k *Keeper) WriteOpenConfirmChannel( k.SetChannel(ctx, portID, channelID, channel) k.Logger(ctx).Info("channel state updated", "port-id", portID, "channel-id", channelID, "previous-state", types.TRYOPEN, "new-state", types.OPEN) + if channel.Ordering == types.UNORDERED { + // get the counterparty and set it in the client keeper v2 to support IBC v2 on this + // channel ID through aliasing + // NOTE: This should never error as the channel is set in the line above + counterparty, ok := k.GetV2Counterparty(ctx, portID, channelID) + if !ok { + panic("could not convert channel to v2 counterparty") + } + k.clientKeeperV2.SetClientCounterparty(ctx, channelID, counterparty) + connection, ok := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !ok { + panic("connection not set") + } + k.channelKeeperV2.SetClientForAlias(ctx, channelID, connection.ClientId) + } + defer telemetry.IncrCounter(1, "ibc", "channel", "open-confirm") emitChannelOpenConfirmEvent(ctx, portID, channelID, channel) diff --git a/modules/core/04-channel/keeper/handshake_test.go b/modules/core/04-channel/keeper/handshake_test.go index 460a1c47447..0b95b272e77 100644 --- a/modules/core/04-channel/keeper/handshake_test.go +++ b/modules/core/04-channel/keeper/handshake_test.go @@ -24,7 +24,7 @@ type testCase = struct { // TestChanOpenInit tests the OpenInit handshake call for channels. It uses message passing // to enter into the appropriate state and then calls ChanOpenInit directly. The channel is // being created on chainA. -func (suite *KeeperTestSuite) TestChanOpenInit() { +func (s *KeeperTestSuite) TestChanOpenInit() { var ( path *ibctesting.Path features []string @@ -70,19 +70,19 @@ func (suite *KeeperTestSuite) TestChanOpenInit() { path.SetupConnections() // remove client from allowed list - params := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(suite.chainA.GetContext()) + params := s.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(s.chainA.GetContext()) params.AllowedClients = []string{} - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), params) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(s.chainA.GetContext(), params) }, }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { // run test for all types of ordering for _, order := range []types.Order{types.UNORDERED, types.ORDERED} { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) path.EndpointA.ChannelConfig.Order = order path.EndpointB.ChannelConfig.Order = order expErrorMsgSubstring = "" @@ -91,8 +91,8 @@ func (suite *KeeperTestSuite) TestChanOpenInit() { counterparty := types.NewCounterparty(ibctesting.MockPort, ibctesting.FirstChannelID) - channelID, err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenInit( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.Order, []string{path.EndpointA.ConnectionID}, + channelID, err := s.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenInit( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.Order, []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.PortID, counterparty, path.EndpointA.ChannelConfig.Version, ) @@ -107,12 +107,12 @@ func (suite *KeeperTestSuite) TestChanOpenInit() { // Testcase must have expectedPass = true AND channel order supported before // asserting the channel handshake initiation succeeded if (tc.expErr == nil) && orderSupported { - suite.Require().NoError(err) - suite.Require().Equal(types.FormatChannelIdentifier(0), channelID) + s.Require().NoError(err) + s.Require().Equal(types.FormatChannelIdentifier(0), channelID) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), expErrorMsgSubstring) - suite.Require().Equal("", channelID) + s.Require().Error(err) + s.Require().Contains(err.Error(), expErrorMsgSubstring) + s.Require().Equal("", channelID) } } }) @@ -122,7 +122,7 @@ func (suite *KeeperTestSuite) TestChanOpenInit() { // TestChanOpenTry tests the OpenTry handshake call for channels. It uses message passing // to enter into the appropriate state and then calls ChanOpenTry directly. The channel // is being created on chainB. -func (suite *KeeperTestSuite) TestChanOpenTry() { +func (s *KeeperTestSuite) TestChanOpenTry() { var ( path *ibctesting.Path heightDiff uint64 @@ -133,7 +133,7 @@ func (suite *KeeperTestSuite) TestChanOpenTry() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"connection doesn't exist", func() { path.EndpointA.ConnectionID = ibctesting.FirstConnectionID @@ -143,13 +143,13 @@ func (suite *KeeperTestSuite) TestChanOpenTry() { path.SetupClients() err := path.EndpointB.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, connectiontypes.ErrInvalidConnectionState}, {"consensus state not found", func() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) heightDiff = 3 // consensus state doesn't exist at this height }, errorsmod.Wrap(ibcerrors.ErrInvalidHeight, "")}, @@ -161,7 +161,7 @@ func (suite *KeeperTestSuite) TestChanOpenTry() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // modify connB versions path.EndpointB.UpdateConnection(func(c *connectiontypes.ConnectionEnd) { @@ -172,7 +172,7 @@ func (suite *KeeperTestSuite) TestChanOpenTry() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // modify connB versions to only support UNORDERED channels path.EndpointB.UpdateConnection(func(c *connectiontypes.ConnectionEnd) { @@ -182,36 +182,36 @@ func (suite *KeeperTestSuite) TestChanOpenTry() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - heightDiff = 0 // must be explicitly changed in malleate - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + heightDiff = 0 // must be explicitly changed in malleate + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() if path.EndpointB.ClientID != "" { // ensure client is up to date err := path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) } counterparty := types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) channelKey := host.ChannelKey(counterparty.PortId, counterparty.ChannelId) - proof, proofHeight := suite.chainA.QueryProof(channelKey) + proof, proofHeight := s.chainA.QueryProof(channelKey) - channelID, err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.ChanOpenTry( - suite.chainB.GetContext(), types.ORDERED, []string{path.EndpointB.ConnectionID}, + channelID, err := s.chainB.App.GetIBCKeeper().ChannelKeeper.ChanOpenTry( + s.chainB.GetContext(), types.ORDERED, []string{path.EndpointB.ConnectionID}, path.EndpointB.ChannelConfig.PortID, counterparty, path.EndpointA.ChannelConfig.Version, proof, malleateHeight(proofHeight, heightDiff), ) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotEmpty(channelID) + s.Require().NoError(err) + s.Require().NotEmpty(channelID) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -220,7 +220,7 @@ func (suite *KeeperTestSuite) TestChanOpenTry() { // TestChanOpenAck tests the OpenAck handshake call for channels. It uses message passing // to enter into the appropriate state and then calls ChanOpenAck directly. The handshake // call is occurring on chainA. -func (suite *KeeperTestSuite) TestChanOpenAck() { +func (s *KeeperTestSuite) TestChanOpenAck() { var ( path *ibctesting.Path counterpartyChannelID string @@ -232,20 +232,20 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"success with empty stored counterparty channel ID", func() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) // set the channel's counterparty channel identifier to empty string channel := path.EndpointA.GetChannel() @@ -254,7 +254,7 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { // use a different channel identifier counterpartyChannelID = path.EndpointB.ChannelID - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) }, nil}, {"channel doesn't exist", func() {}, errorsmod.Wrap(types.ErrChannelNotFound, "")}, {"channel state is not INIT", func() { @@ -265,37 +265,37 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) // set the channel's connection hops to wrong connection ID channel := path.EndpointA.GetChannel() channel.ConnectionHops[0] = doesnotexist - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) }, errorsmod.Wrap(connectiontypes.ErrConnectionNotFound, "")}, {"connection is not OPEN", func() { path.SetupClients() err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // create channel in init path.SetChannelOrdered() err = path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, connectiontypes.ErrInvalidConnectionState}, {"consensus state not found", func() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) heightDiff = 3 // consensus state doesn't exist at this height }, ibcerrors.ErrInvalidHeight}, @@ -304,10 +304,10 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) counterpartyChannelID = "otheridentifier" }, commitmenttypes.ErrInvalidProof}, @@ -317,19 +317,19 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { path.SetChannelOrdered() err := path.EndpointB.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrInvalidChannelState}, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset counterpartyChannelID = "" // must be explicitly changed in malleate heightDiff = 0 // must be explicitly changed - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -340,22 +340,22 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { if path.EndpointA.ClientID != "" { // ensure client is up to date err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) } channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) - proof, proofHeight := suite.chainB.QueryProof(channelKey) + proof, proofHeight := s.chainB.QueryProof(channelKey) - err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenAck( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.Version, counterpartyChannelID, + err := s.chainA.App.GetIBCKeeper().ChannelKeeper.ChanOpenAck( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.Version, counterpartyChannelID, proof, malleateHeight(proofHeight, heightDiff), ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -364,7 +364,7 @@ func (suite *KeeperTestSuite) TestChanOpenAck() { // TestChanOpenConfirm tests the OpenAck handshake call for channels. It uses message passing // to enter into the appropriate state and then calls ChanOpenConfirm directly. The handshake // call is occurring on chainB. -func (suite *KeeperTestSuite) TestChanOpenConfirm() { +func (s *KeeperTestSuite) TestChanOpenConfirm() { var ( path *ibctesting.Path heightDiff uint64 @@ -375,13 +375,13 @@ func (suite *KeeperTestSuite) TestChanOpenConfirm() { path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"channel doesn't exist", func() {}, types.ErrChannelNotFound}, {"channel state is not TRYOPEN", func() { @@ -393,37 +393,37 @@ func (suite *KeeperTestSuite) TestChanOpenConfirm() { path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) // set the channel's connection hops to wrong connection ID channel := path.EndpointB.GetChannel() channel.ConnectionHops[0] = doesnotexist - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel) + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel) }, errorsmod.Wrap(connectiontypes.ErrConnectionNotFound, "")}, {"connection is not OPEN", func() { path.SetupClients() err := path.EndpointB.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrChannelNotFound}, {"consensus state not found", func() { path.SetupConnections() path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.ChanOpenAck() - suite.Require().NoError(err) + s.Require().NoError(err) heightDiff = 3 }, ibcerrors.ErrInvalidHeight}, @@ -433,41 +433,40 @@ func (suite *KeeperTestSuite) TestChanOpenConfirm() { path.SetChannelOrdered() err := path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.ChanOpenTry() - suite.Require().NoError(err) + s.Require().NoError(err) }, commitmenttypes.ErrInvalidProof}, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - heightDiff = 0 // must be explicitly changed - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + heightDiff = 0 // must be explicitly changed + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() if path.EndpointB.ClientID != "" { // ensure client is up to date err := path.EndpointB.UpdateClient() - suite.Require().NoError(err) - + s.Require().NoError(err) } channelKey := host.ChannelKey(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - proof, proofHeight := suite.chainA.QueryProof(channelKey) + proof, proofHeight := s.chainA.QueryProof(channelKey) - err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.ChanOpenConfirm( - suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, + err := s.chainB.App.GetIBCKeeper().ChannelKeeper.ChanOpenConfirm( + s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, proof, malleateHeight(proofHeight, heightDiff), ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -475,7 +474,7 @@ func (suite *KeeperTestSuite) TestChanOpenConfirm() { // TestChanCloseInit tests the initial closing of a handshake on chainA by calling // ChanCloseInit. Both chains will use message passing to setup OPEN channels. -func (suite *KeeperTestSuite) TestChanCloseInit() { +func (s *KeeperTestSuite) TestChanCloseInit() { var ( path *ibctesting.Path expErrorMsgSubstring string @@ -505,18 +504,18 @@ func (suite *KeeperTestSuite) TestChanCloseInit() { // set the channel's connection hops to wrong connection ID channel := path.EndpointA.GetChannel() channel.ConnectionHops[0] = doesnotexist - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) }, errorsmod.Wrap(connectiontypes.ErrConnectionNotFound, "")}, {"connection is not OPEN", func() { path.SetupClients() err := path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // create channel in init path.SetChannelOrdered() err = path.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, connectiontypes.ErrInvalidConnectionState}, { msg: "unauthorized client", @@ -525,32 +524,32 @@ func (suite *KeeperTestSuite) TestChanCloseInit() { path.Setup() // remove client from allowed list - params := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(suite.chainA.GetContext()) + params := s.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(s.chainA.GetContext()) params.AllowedClients = []string{} - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(suite.chainA.GetContext(), params) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetParams(s.chainA.GetContext(), params) expErrorMsgSubstring = "status is Unauthorized" }, }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) expErrorMsgSubstring = "" tc.malleate() - err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.ChanCloseInit( - suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, + err := s.chainA.App.GetIBCKeeper().ChannelKeeper.ChanCloseInit( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), expErrorMsgSubstring) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().Contains(err.Error(), expErrorMsgSubstring) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -559,7 +558,7 @@ func (suite *KeeperTestSuite) TestChanCloseInit() { // TestChanCloseConfirm tests the confirming closing channel ends by calling ChanCloseConfirm // on chainB. Both chains will use message passing to setup OPEN channels. ChanCloseInit is // bypassed on chainA by setting the channel state in the ChannelKeeper. -func (suite *KeeperTestSuite) TestChanCloseConfirm() { +func (s *KeeperTestSuite) TestChanCloseConfirm() { var ( path *ibctesting.Path heightDiff uint64 @@ -586,18 +585,18 @@ func (suite *KeeperTestSuite) TestChanCloseConfirm() { // set the channel's connection hops to wrong connection ID channel := path.EndpointB.GetChannel() channel.ConnectionHops[0] = doesnotexist - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel) + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, channel) }, errorsmod.Wrap(connectiontypes.ErrConnectionNotFound, "")}, {"connection is not OPEN", func() { path.SetupClients() err := path.EndpointB.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // create channel in init path.SetChannelOrdered() err = path.EndpointB.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) }, connectiontypes.ErrInvalidConnectionState}, {"consensus state not found", func() { path.Setup() @@ -613,27 +612,27 @@ func (suite *KeeperTestSuite) TestChanCloseConfirm() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - heightDiff = 0 // must explicitly be changed - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + heightDiff = 0 // must explicitly be changed + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() channelKey := host.ChannelKey(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - proof, proofHeight := suite.chainA.QueryProof(channelKey) + proof, proofHeight := s.chainA.QueryProof(channelKey) - ctx := suite.chainB.GetContext() - err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.ChanCloseConfirm( + ctx := s.chainB.GetContext() + err := s.chainB.App.GetIBCKeeper().ChannelKeeper.ChanCloseConfirm( ctx, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, proof, malleateHeight(proofHeight, heightDiff), ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/04-channel/keeper/keeper.go b/modules/core/04-channel/keeper/keeper.go index b565f8151a6..f3212939cc9 100644 --- a/modules/core/04-channel/keeper/keeper.go +++ b/modules/core/04-channel/keeper/keeper.go @@ -17,10 +17,12 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + clientv2types "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/types" connectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" + hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" "github.com/cosmos/ibc-go/v10/modules/core/exported" ) @@ -35,6 +37,10 @@ type Keeper struct { cdc codec.BinaryCodec clientKeeper types.ClientKeeper connectionKeeper types.ConnectionKeeper + + // V2 Keepers are only used for channel aliasing + clientKeeperV2 types.ClientKeeperV2 + channelKeeperV2 types.ChannelKeeperV2 } // NewKeeper creates a new IBC channel Keeper instance @@ -43,17 +49,21 @@ func NewKeeper( storeService corestore.KVStoreService, clientKeeper types.ClientKeeper, connectionKeeper types.ConnectionKeeper, + clientKeeperV2 types.ClientKeeperV2, + channelKeeperV2 types.ChannelKeeperV2, ) *Keeper { return &Keeper{ storeService: storeService, cdc: cdc, clientKeeper: clientKeeper, connectionKeeper: connectionKeeper, + clientKeeperV2: clientKeeperV2, + channelKeeperV2: channelKeeperV2, } } // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+exported.ModuleName+"/"+types.SubModuleName) } @@ -136,9 +146,15 @@ func (k *Keeper) SetNextChannelSequence(ctx sdk.Context, sequence uint64) { } // GetNextSequenceSend gets a channel's next send sequence from the store +// NOTE: Even though we are using IBCv1 protocol, we are using the v2 NextSequenceSendKey +// this allows us to use the same identifiers for both v1 and v2 packets without having the sequences +// collide. +// The v2 NextSequenceSendKey does not include the port ID, and only uses the client ID. +// It is safe for us to only use the channel ID as the client ID since channel ids are unique chain identifiers +// in ibc-go. func (k *Keeper) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) { store := k.storeService.OpenKVStore(ctx) - bz, err := store.Get(host.NextSequenceSendKey(portID, channelID)) + bz, err := store.Get(hostv2.NextSequenceSendKey(channelID)) if err != nil { panic(err) } @@ -150,10 +166,16 @@ func (k *Keeper) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) } // SetNextSequenceSend sets a channel's next send sequence to the store +// NOTE: Even though we are using IBCv1 protocol, we are using the v2 NextSequenceSendKey +// this allows us to use the same identifiers for both v1 and v2 packets without having the sequences +// collide. +// The v2 NextSequenceSendKey does not include the port ID, and only uses the client ID. +// It is safe for us to only use the channel ID as the client ID since channel ids are unique chain identifiers +// in ibc-go. func (k *Keeper) SetNextSequenceSend(ctx sdk.Context, portID, channelID string, sequence uint64) { store := k.storeService.OpenKVStore(ctx) bz := sdk.Uint64ToBigEndian(sequence) - if err := store.Set(host.NextSequenceSendKey(portID, channelID), bz); err != nil { + if err := store.Set(hostv2.NextSequenceSendKey(channelID), bz); err != nil { panic(err) } } @@ -300,6 +322,7 @@ func (k *Keeper) HasPacketAcknowledgement(ctx sdk.Context, portID, channelID str // IteratePacketSequence provides an iterator over all send, receive or ack sequences. // For each sequence, cb will be called. If the cb returns true, the iterator // will close and stop. +// NOTE: This function will no longer work for NextSequenceSend func (k *Keeper) IteratePacketSequence(ctx sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64) bool) { defer sdk.LogDeferred(k.Logger(ctx), func() error { return iterator.Close() }) for ; iterator.Valid(); iterator.Next() { @@ -318,11 +341,16 @@ func (k *Keeper) IteratePacketSequence(ctx sdk.Context, iterator db.Iterator, cb } // GetAllPacketSendSeqs returns all stored next send sequences. -func (k *Keeper) GetAllPacketSendSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { - store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) - iterator := storetypes.KVStorePrefixIterator(store, []byte(host.KeyNextSeqSendPrefix)) - k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextSendSeq uint64) bool { - ps := types.NewPacketSequence(portID, channelID, nextSendSeq) +// NOTE: Implemented differently from NextSequenceRecv/Ack since the key format is different +func (k *Keeper) GetAllPacketSendSeqs(ctx sdk.Context) []types.PacketSequence { + var seqs []types.PacketSequence + k.IterateChannels(ctx, func(ic types.IdentifiedChannel) bool { + nextSeqSend, ok := k.GetNextSequenceSend(ctx, ic.PortId, ic.ChannelId) + if !ok { + panic("next sequence send not found for channel " + ic.ChannelId) + } + + ps := types.NewPacketSequence(ic.PortId, ic.ChannelId, nextSeqSend) seqs = append(seqs, ps) return false }) @@ -330,9 +358,10 @@ func (k *Keeper) GetAllPacketSendSeqs(ctx sdk.Context) (seqs []types.PacketSeque } // GetAllPacketRecvSeqs returns all stored next recv sequences. -func (k *Keeper) GetAllPacketRecvSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { +func (k *Keeper) GetAllPacketRecvSeqs(ctx sdk.Context) []types.PacketSequence { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(host.KeyNextSeqRecvPrefix)) + var seqs []types.PacketSequence k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextRecvSeq uint64) bool { ps := types.NewPacketSequence(portID, channelID, nextRecvSeq) seqs = append(seqs, ps) @@ -342,9 +371,10 @@ func (k *Keeper) GetAllPacketRecvSeqs(ctx sdk.Context) (seqs []types.PacketSeque } // GetAllPacketAckSeqs returns all stored next acknowledgements sequences. -func (k *Keeper) GetAllPacketAckSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { +func (k *Keeper) GetAllPacketAckSeqs(ctx sdk.Context) []types.PacketSequence { store := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) iterator := storetypes.KVStorePrefixIterator(store, []byte(host.KeyNextSeqAckPrefix)) + var seqs []types.PacketSequence k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextAckSeq uint64) bool { ps := types.NewPacketSequence(portID, channelID, nextAckSeq) seqs = append(seqs, ps) @@ -363,7 +393,8 @@ func (k *Keeper) IteratePacketCommitment(ctx sdk.Context, cb func(portID, channe } // GetAllPacketCommitments returns all stored PacketCommitments objects. -func (k *Keeper) GetAllPacketCommitments(ctx sdk.Context) (commitments []types.PacketState) { +func (k *Keeper) GetAllPacketCommitments(ctx sdk.Context) []types.PacketState { + var commitments []types.PacketState k.IteratePacketCommitment(ctx, func(portID, channelID string, sequence uint64, hash []byte) bool { pc := types.NewPacketState(portID, channelID, sequence, hash) commitments = append(commitments, pc) @@ -383,7 +414,8 @@ func (k *Keeper) IteratePacketCommitmentAtChannel(ctx sdk.Context, portID, chann // GetAllPacketCommitmentsAtChannel returns all stored PacketCommitments objects for a specified // port ID and channel ID. -func (k *Keeper) GetAllPacketCommitmentsAtChannel(ctx sdk.Context, portID, channelID string) (commitments []types.PacketState) { +func (k *Keeper) GetAllPacketCommitmentsAtChannel(ctx sdk.Context, portID, channelID string) []types.PacketState { + var commitments []types.PacketState k.IteratePacketCommitmentAtChannel(ctx, portID, channelID, func(_, _ string, sequence uint64, hash []byte) bool { pc := types.NewPacketState(portID, channelID, sequence, hash) commitments = append(commitments, pc) @@ -402,7 +434,8 @@ func (k *Keeper) IteratePacketReceipt(ctx sdk.Context, cb func(portID, channelID } // GetAllPacketReceipts returns all stored PacketReceipt objects. -func (k *Keeper) GetAllPacketReceipts(ctx sdk.Context) (receipts []types.PacketState) { +func (k *Keeper) GetAllPacketReceipts(ctx sdk.Context) []types.PacketState { + var receipts []types.PacketState k.IteratePacketReceipt(ctx, func(portID, channelID string, sequence uint64, receipt []byte) bool { packetReceipt := types.NewPacketState(portID, channelID, sequence, receipt) receipts = append(receipts, packetReceipt) @@ -421,7 +454,8 @@ func (k *Keeper) IteratePacketAcknowledgement(ctx sdk.Context, cb func(portID, c } // GetAllPacketAcks returns all stored PacketAcknowledgements objects. -func (k *Keeper) GetAllPacketAcks(ctx sdk.Context) (acks []types.PacketState) { +func (k *Keeper) GetAllPacketAcks(ctx sdk.Context) []types.PacketState { + var acks []types.PacketState k.IteratePacketAcknowledgement(ctx, func(portID, channelID string, sequence uint64, ack []byte) bool { packetAck := types.NewPacketState(portID, channelID, sequence, ack) acks = append(acks, packetAck) @@ -473,7 +507,8 @@ func (k *Keeper) GetAllChannelsWithPortPrefix(ctx sdk.Context, portPrefix string } // GetAllChannels returns all stored Channel objects. -func (k *Keeper) GetAllChannels(ctx sdk.Context) (channels []types.IdentifiedChannel) { +func (k *Keeper) GetAllChannels(ctx sdk.Context) []types.IdentifiedChannel { + var channels []types.IdentifiedChannel k.IterateChannels(ctx, func(channel types.IdentifiedChannel) bool { channels = append(channels, channel) return false @@ -584,3 +619,23 @@ func (k *Keeper) GetRecvStartSequence(ctx sdk.Context, portID, channelID string) return sdk.BigEndianToUint64(bz), true } + +func (k *Keeper) GetV2Counterparty(ctx sdk.Context, portID string, channelID string) (clientv2types.CounterpartyInfo, bool) { + channel, found := k.GetChannel(ctx, portID, channelID) + if !found { + return clientv2types.CounterpartyInfo{}, false + } + + // Do not allow channel to be converted into a version 2 counterparty + // if the channel is not OPEN or if it is ORDERED + if channel.State != types.OPEN || channel.Ordering == types.ORDERED { + return clientv2types.CounterpartyInfo{}, false + } + connection, ok := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + if !ok { + return clientv2types.CounterpartyInfo{}, false + } + merklePathPrefix := [][]byte{connection.Counterparty.Prefix.KeyPrefix, []byte("")} + + return clientv2types.NewCounterpartyInfo(merklePathPrefix, channel.Counterparty.ChannelId), true +} diff --git a/modules/core/04-channel/keeper/keeper_test.go b/modules/core/04-channel/keeper/keeper_test.go index 45d571cd99a..41ec71027bc 100644 --- a/modules/core/04-channel/keeper/keeper_test.go +++ b/modules/core/04-channel/keeper/keeper_test.go @@ -7,6 +7,7 @@ import ( testifysuite "github.com/stretchr/testify/suite" transfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + clientv2types "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/types" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" ibctesting "github.com/cosmos/ibc-go/v10/testing" ibcmock "github.com/cosmos/ibc-go/v10/testing/mock" @@ -30,64 +31,64 @@ func TestKeeperTestSuite(t *testing.T) { } // SetupTest creates a coordinator with 2 test chains. -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) - suite.coordinator.CommitNBlocks(suite.chainA, 2) - suite.coordinator.CommitNBlocks(suite.chainB, 2) - suite.coordinator.CommitNBlocks(suite.chainC, 2) + s.coordinator.CommitNBlocks(s.chainA, 2) + s.coordinator.CommitNBlocks(s.chainB, 2) + s.coordinator.CommitNBlocks(s.chainC, 2) } // TestSetChannel create clients and connections on both chains. It tests for the non-existence // and existence of a channel in INIT on chainA. -func (suite *KeeperTestSuite) TestSetChannel() { +func (s *KeeperTestSuite) TestSetChannel() { // create client and connections on both chains - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() // check for channel to be created on chainA - found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.False(found) + found := s.chainA.App.GetIBCKeeper().ChannelKeeper.HasChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.False(found) path.SetChannelOrdered() // init channel err := path.EndpointA.ChanOpenInit() - suite.NoError(err) + s.Require().NoError(err) - storedChannel, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + storedChannel, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) // counterparty channel id is empty after open init expectedCounterparty := types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, "") - suite.True(found) - suite.Equal(types.INIT, storedChannel.State) - suite.Equal(types.ORDERED, storedChannel.Ordering) - suite.Equal(expectedCounterparty, storedChannel.Counterparty) + s.True(found) + s.Equal(types.INIT, storedChannel.State) + s.Equal(types.ORDERED, storedChannel.Ordering) + s.Equal(expectedCounterparty, storedChannel.Counterparty) } -func (suite *KeeperTestSuite) TestGetAppVersion() { +func (s *KeeperTestSuite) TestGetAppVersion() { // create client and connections on both chains - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupConnections() - version, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAppVersion(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.Require().False(found) - suite.Require().Empty(version) + version, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAppVersion(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().False(found) + s.Require().Empty(version) // init channel err := path.EndpointA.ChanOpenInit() - suite.NoError(err) + s.Require().NoError(err) - channelVersion, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAppVersion(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.Require().True(found) - suite.Require().Equal(ibcmock.Version, channelVersion) + channelVersion, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAppVersion(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(found) + s.Require().Equal(ibcmock.Version, channelVersion) } // TestGetAllChannelsWithPortPrefix verifies ports are filtered correctly using a port prefix. -func (suite *KeeperTestSuite) TestGetAllChannelsWithPortPrefix() { +func (s *KeeperTestSuite) TestGetAllChannelsWithPortPrefix() { const ( secondChannelID = "channel-1" differentChannelPortID = "different-portid" @@ -131,18 +132,18 @@ func (suite *KeeperTestSuite) TestGetAllChannelsWithPortPrefix() { } for _, tc := range tests { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() for _, ch := range tc.allChannels { - suite.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chainA.GetContext(), ch.PortId, ch.ChannelId, types.Channel{}) + s.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), ch.PortId, ch.ChannelId, types.Channel{}) } - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() - actualChannels := suite.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.GetAllChannelsWithPortPrefix(ctxA, tc.prefix) + actualChannels := s.chainA.GetSimApp().GetIBCKeeper().ChannelKeeper.GetAllChannelsWithPortPrefix(ctxA, tc.prefix) - suite.Require().True(containsAll(tc.expectedChannels, actualChannels)) + s.Require().True(containsAll(tc.expectedChannels, actualChannels)) }) } } @@ -167,8 +168,8 @@ func containsAll(expected, actual []types.IdentifiedChannel) bool { // TestGetAllChannels creates multiple channels on chain A through various connections // and tests their retrieval. 2 channels are on connA0 and 1 channel is on connA1 -func (suite *KeeperTestSuite) TestGetAllChannels() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllChannels() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // channel0 on first connection on chainA counterparty0 := types.Counterparty{ @@ -177,25 +178,25 @@ func (suite *KeeperTestSuite) TestGetAllChannels() { } // path1 creates a second channel on first connection on chainA - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetChannelOrdered() path1.EndpointA.ClientID = path.EndpointA.ClientID path1.EndpointB.ClientID = path.EndpointB.ClientID path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID - suite.coordinator.CreateMockChannels(path1) + s.coordinator.CreateMockChannels(path1) counterparty1 := types.Counterparty{ PortId: path1.EndpointB.ChannelConfig.PortID, ChannelId: path1.EndpointB.ChannelID, } - path2 := ibctesting.NewPath(suite.chainA, suite.chainB) + path2 := ibctesting.NewPath(s.chainA, s.chainB) path2.SetupConnections() // path2 creates a second channel on chainA err := path2.EndpointA.ChanOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // counterparty channel id is empty after open init counterparty2 := types.Counterparty{ @@ -222,27 +223,27 @@ func (suite *KeeperTestSuite) TestGetAllChannels() { types.NewIdentifiedChannel(path2.EndpointA.ChannelConfig.PortID, path2.EndpointA.ChannelID, channel2), } - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() - channels := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllChannels(ctxA) - suite.Require().Len(channels, len(expChannels)) - suite.Require().Equal(expChannels, channels) + channels := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllChannels(ctxA) + s.Require().Len(channels, len(expChannels)) + s.Require().Equal(expChannels, channels) } // TestGetAllSequences sets all packet sequences for two different channels on chain A and // tests their retrieval. -func (suite *KeeperTestSuite) TestGetAllSequences() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllSequences() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetChannelOrdered() path1.EndpointA.ClientID = path.EndpointA.ClientID path1.EndpointB.ClientID = path.EndpointB.ClientID path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID - suite.coordinator.CreateMockChannels(path1) + s.coordinator.CreateMockChannels(path1) seq1 := types.NewPacketSequence(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1) seq2 := types.NewPacketSequence(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 2) @@ -251,39 +252,39 @@ func (suite *KeeperTestSuite) TestGetAllSequences() { // seq1 should be overwritten by seq2 expSeqs := []types.PacketSequence{seq2, seq3} - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() for _, seq := range []types.PacketSequence{seq1, seq2, seq3} { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(ctxA, seq.PortId, seq.ChannelId, seq.Sequence) } - sendSeqs := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketSendSeqs(ctxA) - recvSeqs := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketRecvSeqs(ctxA) - ackSeqs := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketAckSeqs(ctxA) - suite.Len(sendSeqs, 2) - suite.Len(recvSeqs, 2) - suite.Len(ackSeqs, 2) + sendSeqs := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketSendSeqs(ctxA) + recvSeqs := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketRecvSeqs(ctxA) + ackSeqs := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketAckSeqs(ctxA) + s.Len(sendSeqs, 2) + s.Len(recvSeqs, 2) + s.Len(ackSeqs, 2) - suite.Equal(expSeqs, sendSeqs) - suite.Equal(expSeqs, recvSeqs) - suite.Equal(expSeqs, ackSeqs) + s.Equal(expSeqs, sendSeqs) + s.Equal(expSeqs, recvSeqs) + s.Equal(expSeqs, ackSeqs) } // TestGetAllPacketState creates a set of acks, packet commitments, and receipts on two different // channels on chain A and tests their retrieval. -func (suite *KeeperTestSuite) TestGetAllPacketState() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllPacketState() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.EndpointA.ClientID = path.EndpointA.ClientID path1.EndpointB.ClientID = path.EndpointB.ClientID path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID - suite.coordinator.CreateMockChannels(path1) + s.coordinator.CreateMockChannels(path1) // channel 0 acks ack1 := types.NewPacketState(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 1, []byte("ack")) @@ -316,130 +317,130 @@ func (suite *KeeperTestSuite) TestGetAllPacketState() { expReceipts := []types.PacketState{rec1, rec2, rec3, rec4} expCommitments := []types.PacketState{comm1, comm2, comm3, comm4} - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() // set acknowledgements for _, ack := range []types.PacketState{ack1, ack2, ack2dup, ack3} { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, ack.PortId, ack.ChannelId, ack.Sequence, ack.Data) } // set packet receipts for _, rec := range expReceipts { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(ctxA, rec.PortId, rec.ChannelId, rec.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(ctxA, rec.PortId, rec.ChannelId, rec.Sequence) } // set packet commitments for _, comm := range expCommitments { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, comm.PortId, comm.ChannelId, comm.Sequence, comm.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, comm.PortId, comm.ChannelId, comm.Sequence, comm.Data) } - acks := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketAcks(ctxA) - receipts := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketReceipts(ctxA) - commitments := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitments(ctxA) + acks := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketAcks(ctxA) + receipts := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketReceipts(ctxA) + commitments := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitments(ctxA) - suite.Require().Len(acks, len(expAcks)) - suite.Require().Len(commitments, len(expCommitments)) - suite.Require().Len(receipts, len(expReceipts)) + s.Require().Len(acks, len(expAcks)) + s.Require().Len(commitments, len(expCommitments)) + s.Require().Len(receipts, len(expReceipts)) - suite.Require().Equal(expAcks, acks) - suite.Require().Equal(expReceipts, receipts) - suite.Require().Equal(expCommitments, commitments) + s.Require().Equal(expAcks, acks) + s.Require().Equal(expReceipts, receipts) + s.Require().Equal(expCommitments, commitments) } // TestSetSequence verifies that the keeper correctly sets the sequence counters. -func (suite *KeeperTestSuite) TestSetSequence() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestSetSequence() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() one := uint64(1) // initialized channel has next send seq of 1 - seq, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.True(found) - suite.Equal(one, seq) + seq, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.True(found) + s.Equal(one, seq) // initialized channel has next seq recv of 1 - seq, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.True(found) - suite.Equal(one, seq) + seq, found = s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.True(found) + s.Equal(one, seq) // initialized channel has next seq ack of - seq, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.True(found) - suite.Equal(one, seq) + seq, found = s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.True(found) + s.Equal(one, seq) nextSeqSend, nextSeqRecv, nextSeqAck := uint64(10), uint64(10), uint64(10) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqSend) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqRecv) - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqAck) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqSend) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqRecv) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nextSeqAck) - storedNextSeqSend, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.True(found) - suite.Equal(nextSeqSend, storedNextSeqSend) + storedNextSeqSend, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.True(found) + s.Equal(nextSeqSend, storedNextSeqSend) - storedNextSeqRecv, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.True(found) - suite.Equal(nextSeqRecv, storedNextSeqRecv) + storedNextSeqRecv, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.True(found) + s.Equal(nextSeqRecv, storedNextSeqRecv) - storedNextSeqAck, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.True(found) - suite.Equal(nextSeqAck, storedNextSeqAck) + storedNextSeqAck, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.True(found) + s.Equal(nextSeqAck, storedNextSeqAck) } // TestGetAllPacketCommitmentsAtChannel verifies that the keeper returns all stored packet // commitments for a specific channel. The test will store consecutive commitments up to the // value of "seq" and then add non-consecutive up to the value of "maxSeq". A final commitment // with the value maxSeq + 1 is set on a different channel. -func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() // create second channel - path1 := ibctesting.NewPath(suite.chainA, suite.chainB) + path1 := ibctesting.NewPath(s.chainA, s.chainB) path1.SetChannelOrdered() path1.EndpointA.ClientID = path.EndpointA.ClientID path1.EndpointB.ClientID = path.EndpointB.ClientID path1.EndpointA.ConnectionID = path.EndpointA.ConnectionID path1.EndpointB.ConnectionID = path.EndpointB.ConnectionID - suite.coordinator.CreateMockChannels(path1) + s.coordinator.CreateMockChannels(path1) - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() expectedSeqs := make(map[uint64]bool) hash := []byte("commitment") seq := uint64(15) maxSeq := uint64(25) - suite.Require().Greater(maxSeq, seq) + s.Require().Greater(maxSeq, seq) // create consecutive commitments for i := uint64(1); i < seq; i++ { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash) expectedSeqs[i] = true } // add non-consecutive commitments for i := seq; i < maxSeq; i += 2 { - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, i, hash) expectedSeqs[i] = true } // add sequence on different channel/port - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, maxSeq+1, hash) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(ctxA, path1.EndpointA.ChannelConfig.PortID, path1.EndpointA.ChannelID, maxSeq+1, hash) - commitments := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + commitments := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetAllPacketCommitmentsAtChannel(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) - suite.Equal(len(expectedSeqs), len(commitments)) + s.Len(commitments, len(expectedSeqs)) // ensure above for loops occurred - suite.NotEqual(0, len(commitments)) + s.Require().NotEmpty(commitments) // verify that all the packet commitments were stored for _, packet := range commitments { - suite.True(expectedSeqs[packet.Sequence]) - suite.Equal(path.EndpointA.ChannelConfig.PortID, packet.PortId) - suite.Equal(path.EndpointA.ChannelID, packet.ChannelId) - suite.Equal(hash, packet.Data) + s.True(expectedSeqs[packet.Sequence]) + s.Equal(path.EndpointA.ChannelConfig.PortID, packet.PortId) + s.Equal(path.EndpointA.ChannelID, packet.ChannelId) + s.Equal(hash, packet.Data) // prevent duplicates from passing checks expectedSeqs[packet.Sequence] = false @@ -448,22 +449,95 @@ func (suite *KeeperTestSuite) TestGetAllPacketCommitmentsAtChannel() { // TestSetPacketAcknowledgement verifies that packet acknowledgements are correctly // set in the keeper. -func (suite *KeeperTestSuite) TestSetPacketAcknowledgement() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *KeeperTestSuite) TestSetPacketAcknowledgement() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - ctxA := suite.chainA.GetContext() + ctxA := s.chainA.GetContext() seq := uint64(10) - storedAckHash, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) - suite.Require().False(found) - suite.Require().Nil(storedAckHash) + storedAckHash, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) + s.Require().False(found) + s.Require().Nil(storedAckHash) ackHash := []byte("ackhash") - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, ackHash) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq, ackHash) - storedAckHash, found = suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) - suite.Require().True(found) - suite.Require().Equal(ackHash, storedAckHash) - suite.Require().True(suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)) + storedAckHash, found = s.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq) + s.Require().True(found) + s.Require().Equal(ackHash, storedAckHash) + s.Require().True(s.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketAcknowledgement(ctxA, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, seq)) +} + +// TestGetV2Counterparty verifies that the v2 counterparty is correctly retrieved from v1 channel. +func (s *KeeperTestSuite) TestGetV2Counterparty() { + var ( + path *ibctesting.Path + expCounterparty clientv2types.CounterpartyInfo + ) + testCases := []struct { + name string + malleate func() + }{ + { + name: "success", + malleate: func() {}, + }, + { + name: "channel not found", + malleate: func() { + path.EndpointA.ChannelID = "fake-channel" + expCounterparty = clientv2types.CounterpartyInfo{} + }, + }, + { + name: "channel not OPEN", + malleate: func() { + channel, ok := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(ok) + channel.State = types.CLOSED + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + expCounterparty = clientv2types.CounterpartyInfo{} + }, + }, + { + name: "channel not UNORDERED", + malleate: func() { + channel, ok := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(ok) + channel.Ordering = types.ORDERED + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + expCounterparty = clientv2types.CounterpartyInfo{} + }, + }, + { + name: "connection not found", + malleate: func() { + channel, ok := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(ok) + channel.ConnectionHops = []string{"fake-connection"} + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + expCounterparty = clientv2types.CounterpartyInfo{} + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) + path.Setup() + + expCounterparty = clientv2types.CounterpartyInfo{ + ClientId: path.EndpointB.ChannelID, + MerklePrefix: [][]byte{[]byte("ibc"), []byte("")}, + } + + tc.malleate() + + counterparty, ok := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetV2Counterparty(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().Equal(expCounterparty, counterparty) + s.Require().Equal(ok, !reflect.DeepEqual(expCounterparty, clientv2types.CounterpartyInfo{})) + }) + } } diff --git a/modules/core/04-channel/keeper/migrations.go b/modules/core/04-channel/keeper/migrations.go index 09b7a9ec2f4..76380d5fc8c 100644 --- a/modules/core/04-channel/keeper/migrations.go +++ b/modules/core/04-channel/keeper/migrations.go @@ -3,7 +3,7 @@ package keeper import ( sdk "github.com/cosmos/cosmos-sdk/types" - v10 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v10" + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v10" ) // Migrator is a struct for handling in-place store migrations. diff --git a/modules/core/04-channel/keeper/packet.go b/modules/core/04-channel/keeper/packet.go index 909bb4b5404..4be6c2b966b 100644 --- a/modules/core/04-channel/keeper/packet.go +++ b/modules/core/04-channel/keeper/packet.go @@ -243,6 +243,9 @@ func (k *Keeper) applyReplayProtection(ctx sdk.Context, packet types.Packet, cha // incrementing nextSequenceRecv and storing under this chain's channelEnd identifiers // Since this is the receiving chain, our channelEnd is packet's destination port and channel k.SetNextSequenceRecv(ctx, packet.GetDestPort(), packet.GetDestChannel(), nextSequenceRecv) + + default: + return errorsmod.Wrapf(types.ErrInvalidChannelOrdering, "invalid channel ordering: %s", channel.Ordering) } return nil @@ -426,7 +429,6 @@ func (k *Keeper) AcknowledgePacket( // incrementing NextSequenceAck and storing under this chain's channelEnd identifiers // Since this is the original sending chain, our channelEnd is packet's source port and channel k.SetNextSequenceAck(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), nextSequenceAck) - } // Delete packet commitment, since the packet has been acknowledged, the commitement is no longer necessary diff --git a/modules/core/04-channel/keeper/packet_test.go b/modules/core/04-channel/keeper/packet_test.go index 85b992d7215..0ee0d487e15 100644 --- a/modules/core/04-channel/keeper/packet_test.go +++ b/modules/core/04-channel/keeper/packet_test.go @@ -29,7 +29,7 @@ var ( ) // TestSendPacket tests SendPacket from chainA to chainB -func (suite *KeeperTestSuite) TestSendPacket() { +func (s *KeeperTestSuite) TestSendPacket() { var ( path *ibctesting.Path sourcePort string @@ -54,7 +54,7 @@ func (suite *KeeperTestSuite) TestSendPacket() { sourceChannel = path.EndpointA.ChannelID // swap client with solo machine - solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1) + solomachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachinesingle", "testing", 1) path.EndpointA.ClientID = clienttypes.FormatClientIdentifier(exported.Solomachine, 10) path.EndpointA.SetClientState(solomachine.ClientState()) path.EndpointA.UpdateConnection(func(c *connectiontypes.ConnectionEnd) { c.ClientId = path.EndpointA.ClientID }) @@ -65,7 +65,7 @@ func (suite *KeeperTestSuite) TestSendPacket() { sourceChannel = path.EndpointA.ChannelID // swap client with solomachine - solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1) + solomachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachinesingle", "testing", 1) path.EndpointA.ClientID = clienttypes.FormatClientIdentifier(exported.Solomachine, 10) path.EndpointA.SetClientState(solomachine.ClientState()) @@ -121,11 +121,11 @@ func (suite *KeeperTestSuite) TestSendPacket() { connection := path.EndpointA.GetConnection() clientState := path.EndpointA.GetClientState() cs, ok := clientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // freeze client cs.FrozenHeight = clienttypes.NewHeight(0, 1) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), connection.ClientId, cs) }, clienttypes.ErrClientNotActive}, {"client state zero height", func() { path.Setup() @@ -134,14 +134,14 @@ func (suite *KeeperTestSuite) TestSendPacket() { connection := path.EndpointA.GetConnection() clientState := path.EndpointA.GetClientState() cs, ok := clientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // force a consensus state into the store at height zero to allow client status check to pass. consensusState := path.EndpointA.GetConsensusState(cs.LatestHeight) path.EndpointA.SetConsensusState(consensusState, clienttypes.ZeroHeight()) cs.LatestHeight = clienttypes.ZeroHeight() - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), connection.ClientId, cs) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), connection.ClientId, cs) }, clienttypes.ErrInvalidHeight}, {"timeout height passed", func() { path.Setup() @@ -149,15 +149,15 @@ func (suite *KeeperTestSuite) TestSendPacket() { var ok bool timeoutHeight, ok = path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) }, types.ErrTimeoutElapsed}, {"timeout timestamp passed", func() { path.Setup() sourceChannel = path.EndpointA.ChannelID connection := path.EndpointA.GetConnection() - timestamp, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientTimestampAtHeight(suite.chainA.GetContext(), connection.ClientId, path.EndpointA.GetClientLatestHeight()) - suite.Require().NoError(err) + timestamp, err := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientTimestampAtHeight(s.chainA.GetContext(), connection.ClientId, path.EndpointA.GetClientLatestHeight()) + s.Require().NoError(err) timeoutHeight = disabledTimeoutHeight timeoutTimestamp = timestamp @@ -165,28 +165,28 @@ func (suite *KeeperTestSuite) TestSendPacket() { {"timeout timestamp passed with solomachine", func() { path.Setup() // swap client with solomachine - solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachinesingle", "testing", 1) + solomachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachinesingle", "testing", 1) path.EndpointA.ClientID = clienttypes.FormatClientIdentifier(exported.Solomachine, 10) path.EndpointA.SetClientState(solomachine.ClientState()) path.EndpointA.UpdateConnection(func(c *connectiontypes.ConnectionEnd) { c.ClientId = path.EndpointA.ClientID }) connection := path.EndpointA.GetConnection() - timestamp, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientTimestampAtHeight(suite.chainA.GetContext(), connection.ClientId, path.EndpointA.GetClientLatestHeight()) - suite.Require().NoError(err) + timestamp, err := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientTimestampAtHeight(s.chainA.GetContext(), connection.ClientId, path.EndpointA.GetClientLatestHeight()) + s.Require().NoError(err) sourceChannel = path.EndpointA.ChannelID timeoutHeight = disabledTimeoutHeight timeoutTimestamp = timestamp }, types.ErrTimeoutElapsed}, {"next sequence send not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) sourceChannel = path.EndpointA.ChannelID path.SetupConnections() // manually creating channel prevents next sequence from being set - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.ChannelConfig.Version), ) @@ -194,9 +194,9 @@ func (suite *KeeperTestSuite) TestSendPacket() { } for i, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) // set default send packet arguments // sourceChannel is set after path is setup @@ -209,19 +209,19 @@ func (suite *KeeperTestSuite) TestSendPacket() { tc.malleate() // only check if nextSequenceSend exists in no error case since it is a tested error case above. - expectedSequence, ok := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(suite.chainA.GetContext(), sourcePort, sourceChannel) + expectedSequence, ok := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(s.chainA.GetContext(), sourcePort, sourceChannel) - sequence, err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.SendPacket(suite.chainA.GetContext(), + sequence, err := s.chainA.App.GetIBCKeeper().ChannelKeeper.SendPacket(s.chainA.GetContext(), sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, packetData) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // verify that the returned sequence matches expected value - suite.Require().True(ok) - suite.Require().Equal(expectedSequence, sequence, "send packet did not return the expected sequence of the outgoing packet") + s.Require().True(ok) + s.Require().Equal(expectedSequence, sequence, "send packet did not return the expected sequence of the outgoing packet") } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -230,7 +230,7 @@ func (suite *KeeperTestSuite) TestSendPacket() { // TestRecvPacket test RecvPacket on chainB. Since packet commitment verification will always // occur last (resource instensive), only tests expected to succeed and packet commitment // verification tests need to simulate sending a packet from chainA to chainB. -func (suite *KeeperTestSuite) TestRecvPacket() { +func (s *KeeperTestSuite) TestRecvPacket() { var ( path *ibctesting.Path packet types.Packet @@ -248,7 +248,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) }, nil, @@ -259,7 +259,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() { // setup uses an UNORDERED channel path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) }, nil, @@ -271,9 +271,9 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.Setup() // send 2 packets _, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) // attempts to receive packet 2 without receiving packet 1 }, @@ -286,11 +286,11 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrNoOpMsg, }, @@ -300,11 +300,11 @@ func (suite *KeeperTestSuite) TestRecvPacket() { // setup uses an UNORDERED channel path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrNoOpMsg, }, @@ -317,9 +317,9 @@ func (suite *KeeperTestSuite) TestRecvPacket() { // send 2 packets _, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) // attempts to receive packet 2 without receiving packet 1 }, @@ -370,8 +370,8 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.Setup() // pass channel check - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainB.GetContext(), + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{connIDB}, path.EndpointB.ChannelConfig.Version), ) @@ -386,11 +386,11 @@ func (suite *KeeperTestSuite) TestRecvPacket() { // connection on chainB is in INIT err := path.EndpointB.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // pass channel check - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainB.GetContext(), + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointB.ConnectionID}, path.EndpointB.ChannelConfig.Version), ) @@ -403,7 +403,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() { func() { path.Setup() - packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), disabledTimeoutTimestamp) + packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(s.chainB.GetContext()), disabledTimeoutTimestamp) }, types.ErrTimeoutElapsed, }, @@ -412,7 +412,7 @@ func (suite *KeeperTestSuite) TestRecvPacket() { func() { path.Setup() - packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, disabledTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, disabledTimeoutHeight, uint64(s.chainB.GetContext().BlockTime().UnixNano())) }, types.ErrTimeoutElapsed, }, @@ -425,8 +425,8 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.EndpointB.ChannelID = ibctesting.FirstChannelID // manually creating channel prevents next recv sequence from being set - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainB.GetContext(), + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), []string{path.EndpointB.ConnectionID}, path.EndpointB.ChannelConfig.Version), ) @@ -434,12 +434,12 @@ func (suite *KeeperTestSuite) TestRecvPacket() { packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) // manually set packet commitment - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), types.CommitPacket(packet)) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence(), types.CommitPacket(packet)) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, types.ErrSequenceReceiveNotFound, }, @@ -449,11 +449,11 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) // set recv seq start to indicate packet was processed in previous upgrade - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetRecvStartSequence(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence+1) + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetRecvStartSequence(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence+1) }, types.ErrPacketReceived, }, @@ -463,8 +463,8 @@ func (suite *KeeperTestSuite) TestRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence) + s.Require().NoError(err) + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketReceipt(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) }, types.ErrNoOpMsg, @@ -481,9 +481,9 @@ func (suite *KeeperTestSuite) TestRecvPacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -491,35 +491,35 @@ func (suite *KeeperTestSuite) TestRecvPacket() { packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) proof, proofHeight := path.EndpointA.QueryProof(packetKey) - channelVersion, err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacket(suite.chainB.GetContext(), packet, proof, proofHeight) + channelVersion, err := s.chainB.App.GetIBCKeeper().ChannelKeeper.RecvPacket(s.chainB.GetContext(), packet, proof, proofHeight) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion, "channel version is incorrect") + s.Require().NoError(err) + s.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion, "channel version is incorrect") - channelB, _ := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetChannel(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel()) - nextSeqRecv, found := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel()) - suite.Require().True(found) - receipt, receiptStored := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetPacketReceipt(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + channelB, _ := s.chainB.App.GetIBCKeeper().ChannelKeeper.GetChannel(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel()) + nextSeqRecv, found := s.chainB.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel()) + s.Require().True(found) + receipt, receiptStored := s.chainB.App.GetIBCKeeper().ChannelKeeper.GetPacketReceipt(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) if channelB.Ordering == types.ORDERED { - suite.Require().Equal(packet.GetSequence()+1, nextSeqRecv, "sequence not incremented in ordered channel") - suite.Require().False(receiptStored, "packet receipt stored on ORDERED channel") + s.Require().Equal(packet.GetSequence()+1, nextSeqRecv, "sequence not incremented in ordered channel") + s.Require().False(receiptStored, "packet receipt stored on ORDERED channel") } else { - suite.Require().Equal(uint64(1), nextSeqRecv, "sequence incremented for UNORDERED channel") - suite.Require().True(receiptStored, "packet receipt not stored after RecvPacket in UNORDERED channel") - suite.Require().Equal(string([]byte{byte(1)}), receipt, "packet receipt is not empty string") + s.Require().Equal(uint64(1), nextSeqRecv, "sequence incremented for UNORDERED channel") + s.Require().True(receiptStored, "packet receipt not stored after RecvPacket in UNORDERED channel") + s.Require().Equal(string([]byte{byte(1)}), receipt, "packet receipt is not empty string") } } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Equal("", channelVersion) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) + s.Require().Equal("", channelVersion) } }) } } -func (suite *KeeperTestSuite) TestWriteAcknowledgement() { +func (s *KeeperTestSuite) TestWriteAcknowledgement() { var ( path *ibctesting.Path ack exported.Acknowledgement @@ -555,7 +555,7 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { path.Setup() packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) ack = ibcmock.MockAcknowledgement - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack.Acknowledgement()) + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence(), ack.Acknowledgement()) }, types.ErrAcknowledgementExists, }, @@ -583,37 +583,37 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) ack = ibcmock.MockAcknowledgement // set recv seq start to indicate packet was processed in previous upgrade - suite.chainB.App.GetIBCKeeper().ChannelKeeper.SetRecvStartSequence(suite.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence+1) + s.chainB.App.GetIBCKeeper().ChannelKeeper.SetRecvStartSequence(s.chainB.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence+1) }, errorsmod.Wrap(types.ErrPacketReceived, ""), }, } for i, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() - err := suite.chainB.App.GetIBCKeeper().ChannelKeeper.WriteAcknowledgement(suite.chainB.GetContext(), packet, ack) + err := s.chainB.App.GetIBCKeeper().ChannelKeeper.WriteAcknowledgement(s.chainB.GetContext(), packet, ack) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } // TestAcknowledgePacket tests the call AcknowledgePacket on chainA. -func (suite *KeeperTestSuite) TestAcknowledgePacket() { +func (s *KeeperTestSuite) TestAcknowledgePacket() { var ( path *ibctesting.Path packet types.Packet @@ -622,30 +622,30 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { assertErr := func(errType *errorsmod.Error) func(commitment []byte, channelVersion string, err error) { return func(commitment []byte, channelVersion string, err error) { - suite.Require().Error(err) - suite.Require().ErrorIs(err, errType) - suite.Require().NotNil(commitment) - suite.Require().Equal("", channelVersion) + s.Require().Error(err) + s.Require().ErrorIs(err, errType) + s.Require().NotNil(commitment) + s.Require().Equal("", channelVersion) } } assertNoOp := func(commitment []byte, channelVersion string, err error) { - suite.Require().Error(err) - suite.Require().ErrorIs(err, types.ErrNoOpMsg) - suite.Require().Nil(commitment) - suite.Require().Equal("", channelVersion) + s.Require().Error(err) + s.Require().ErrorIs(err, types.ErrNoOpMsg) + s.Require().Nil(commitment) + s.Require().Equal("", channelVersion) } assertSuccess := func(seq func() uint64, msg string) func(commitment []byte, channelVersion string, err error) { return func(commitment []byte, channelVersion string, err error) { - suite.Require().NoError(err) - suite.Require().Nil(commitment) - suite.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion) + s.Require().NoError(err) + s.Require().Nil(commitment) + s.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion) - nextSequenceAck, found := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel()) + nextSequenceAck, found := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceAck(s.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel()) - suite.Require().True(found) - suite.Require().Equal(seq(), nextSequenceAck, msg) + s.Require().True(found) + s.Require().Equal(seq(), nextSequenceAck, msg) } } @@ -663,12 +663,12 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet receipt and acknowledgement packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, expResult: assertSuccess(func() uint64 { return packet.GetSequence() + 1 }, "sequence not incremented in ordered channel"), }, @@ -680,12 +680,12 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet receipt and acknowledgement packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, expResult: assertSuccess(func() uint64 { return uint64(1) }, "sequence incremented for UNORDERED channel"), }, @@ -697,15 +697,15 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet receipt and acknowledgement packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.AcknowledgePacket(packet, ack) - suite.Require().NoError(err) + s.Require().NoError(err) }, expResult: assertNoOp, }, @@ -717,15 +717,15 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet receipt and acknowledgement packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.AcknowledgePacket(packet, ack) - suite.Require().NoError(err) + s.Require().NoError(err) }, expResult: assertNoOp, }, @@ -736,7 +736,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) @@ -754,24 +754,24 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { path.EndpointB.Chain.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(path.EndpointB.Chain.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence, types.CommitAcknowledgement(ack)) path.EndpointB.Chain.NextBlock() - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) }, expResult: func(commitment []byte, channelVersion string, err error) { - suite.Require().Error(err) - suite.Require().ErrorIs(err, types.ErrInvalidAcknowledgement) - suite.Require().Equal("", channelVersion) - suite.Require().NotNil(commitment) + s.Require().Error(err) + s.Require().ErrorIs(err, types.ErrInvalidAcknowledgement) + s.Require().Equal("", channelVersion) + s.Require().NotNil(commitment) }, }, { name: "non-standard acknowledgement", malleate: func() { // setup uses an UNORDERED channel - suite.coordinator.Setup(path) + s.coordinator.Setup(path) // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) @@ -783,13 +783,13 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { path.EndpointB.Chain.App.GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(path.EndpointB.Chain.GetContext(), path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, sequence, types.CommitAcknowledgement(ack)) path.EndpointB.Chain.NextBlock() - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) }, expResult: func(commitment []byte, channelVersion string, err error) { - suite.Require().NoError(err) + s.Require().NoError(err) channel := path.EndpointA.GetChannel() - suite.Require().Equal(channel.Version, channelVersion) - suite.Require().Nil(commitment) + s.Require().Equal(channel.Version, channelVersion) + s.Require().Nil(commitment) }, }, { @@ -800,7 +800,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, ibctesting.InvalidID, ibctesting.InvalidID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) }, @@ -813,7 +813,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) @@ -828,7 +828,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // use wrong port for dest packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, ibctesting.InvalidID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) @@ -842,7 +842,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // use wrong channel for dest packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, ibctesting.InvalidID, defaultTimeoutHeight, disabledTimeoutTimestamp) @@ -856,13 +856,13 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) // pass channel check - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{"connection-1000"}, path.EndpointA.GetChannel().Version), ) @@ -876,17 +876,17 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) // connection on chainA is in INIT err = path.EndpointA.ConnOpenInit() - suite.Require().NoError(err) + s.Require().NoError(err) // pass channel check - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{path.EndpointA.ConnectionID}, path.EndpointA.GetChannel().Version), ) @@ -912,7 +912,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) }, expResult: assertErr(commitmenttypes.ErrInvalidProof), @@ -925,12 +925,12 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet receipt and acknowledgement packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) packet.Data = []byte("invalid packet commitment") }, @@ -944,15 +944,15 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) // manually delete the next sequence ack in the ibc store - storeKey := suite.chainA.GetSimApp().GetKey(exported.ModuleName) - ibcStore := suite.chainA.GetContext().KVStore(storeKey) + storeKey := s.chainA.GetSimApp().GetKey(exported.ModuleName) + ibcStore := s.chainA.GetContext().KVStore(storeKey) ibcStore.Delete(host.NextSequenceAckKey(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID)) }, @@ -966,44 +966,44 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // create packet acknowledgement packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) // set next sequence ack wrong - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(suite.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 10) + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetNextSequenceAck(s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, 10) }, expResult: assertErr(types.ErrPacketSequenceOutOfOrder), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset // reset ack ack = ibcmock.MockAcknowledgement.Acknowledgement() - path = ibctesting.NewPath(suite.chainA, suite.chainB) - ctx := suite.chainA.GetContext() + path = ibctesting.NewPath(s.chainA, s.chainB) + ctx := s.chainA.GetContext() tc.malleate() packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) proof, proofHeight := path.EndpointB.QueryProof(packetKey) - channelVersion, err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.AcknowledgePacket(ctx, packet, ack, proof, proofHeight) + channelVersion, err := s.chainA.App.GetIBCKeeper().ChannelKeeper.AcknowledgePacket(ctx, packet, ack, proof, proofHeight) - commitment := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence()) + commitment := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, packet.GetSequence()) tc.expResult(commitment, channelVersion, err) if tc.expEvents != nil { events := ctx.EventManager().ABCIEvents() expEvents := tc.expEvents(path) - ibctesting.AssertEvents(&suite.Suite, expEvents, events) + ibctesting.AssertEvents(&s.Suite, expEvents, events) } }) } diff --git a/modules/core/04-channel/keeper/timeout_test.go b/modules/core/04-channel/keeper/timeout_test.go index 0580e21252c..0a643172f35 100644 --- a/modules/core/04-channel/keeper/timeout_test.go +++ b/modules/core/04-channel/keeper/timeout_test.go @@ -21,7 +21,7 @@ import ( // TestTimeoutPacket test the TimeoutPacket call on chainA by ensuring the timeout has passed // on chainB, but that no ack has been written yet. Test cases expected to reach proof // verification must specify which proof to use using the ordered bool. -func (suite *KeeperTestSuite) TestTimeoutPacket() { +func (s *KeeperTestSuite) TestTimeoutPacket() { var ( path *ibctesting.Path packet types.Packet @@ -36,28 +36,28 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"success: UNORDERED", func() { ordered = false path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil}, {"packet already timed out: ORDERED", func() { expError = types.ErrNoOpMsg @@ -65,35 +65,35 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) err = path.EndpointA.TimeoutPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrNoOpMsg, "")}, {"packet already timed out: UNORDERED", func() { expError = types.ErrNoOpMsg ordered = false path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp) err = path.EndpointA.TimeoutPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrNoOpMsg, "")}, {"channel not found", func() { expError = types.ErrChannelNotFound @@ -116,8 +116,8 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { {"connection not found", func() { expError = connectiontypes.ErrConnectionNotFound // pass channel check - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{connIDA}, path.EndpointA.ChannelConfig.Version), ) @@ -127,10 +127,10 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { expError = types.ErrTimeoutNotReached path.Setup() sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, disabledTimeoutTimestamp) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrTimeoutNotReached, "")}, {"packet already received ", func() { expError = types.ErrPacketReceived @@ -139,14 +139,14 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.Setup() nextSeqRecv = 2 - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(defaultTimeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, timeoutTimestamp) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrPacketReceived, "")}, {"packet hasn't been sent", func() { expError = types.ErrNoOpMsg @@ -154,9 +154,9 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.SetChannelOrdered() path.Setup() - packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, defaultTimeoutHeight, uint64(s.chainB.GetContext().BlockTime().UnixNano())) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(types.ErrNoOpMsg, "")}, {"next seq receive verification failed", func() { // skip error check, error occurs in light-clients @@ -167,13 +167,13 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "")}, {"packet ack verification failed", func() { // skip error check, error occurs in light-clients @@ -183,27 +183,27 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "")}, } for _, tc := range testCases { - suite.Run(tc.msg, func() { + s.Run(tc.msg, func() { var ( proof []byte proofHeight exported.Height ) - suite.SetupTest() // reset - expError = nil // must be expliticly changed by failed cases - nextSeqRecv = 1 // must be explicitly changed - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.SetupTest() // reset + expError = nil // must be expliticly changed by failed cases + nextSeqRecv = 1 // must be explicitly changed + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -218,18 +218,18 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { } } - channelVersion, err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutPacket(suite.chainA.GetContext(), packet, proof, proofHeight, nextSeqRecv) + channelVersion, err := s.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutPacket(s.chainA.GetContext(), packet, proof, proofHeight, nextSeqRecv) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion) + s.Require().NoError(err) + s.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Equal("", channelVersion) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Equal("", channelVersion) // only check if expError is set, since not all error codes can be known if expError != nil { - suite.Require().True(errors.Is(err, expError)) + s.Require().True(errors.Is(err, expError)) } } }) @@ -239,7 +239,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { // TestTimeoutExecuted verifies that packet commitments are deleted. // In addition, the test verifies that the channel state // after a timeout is updated accordingly. -func (suite *KeeperTestSuite) TestTimeoutExecuted() { +func (s *KeeperTestSuite) TestTimeoutExecuted() { var ( path *ibctesting.Path packet types.Packet @@ -257,36 +257,36 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) }, func(packetCommitment []byte, err error) { - suite.Require().NoError(err) - suite.Require().Nil(packetCommitment) + s.Require().NoError(err) + s.Require().Nil(packetCommitment) // Check channel has been closed channel := path.EndpointA.GetChannel() - suite.Require().Equal(channel.State, types.CLOSED) + s.Require().Equal(channel.State, types.CLOSED) }, nil, }, } for i, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) - ctx := suite.chainA.GetContext() + s.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) + ctx := s.chainA.GetContext() tc.malleate() - err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutExecuted(ctx, path.EndpointA.GetChannel(), packet) - pc := suite.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + err := s.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutExecuted(ctx, path.EndpointA.GetChannel(), packet) + pc := s.chainA.App.GetIBCKeeper().ChannelKeeper.GetPacketCommitment(ctx, packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) tc.expResult(pc, err) if tc.expEvents != nil { @@ -294,7 +294,7 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() { expEvents := tc.expEvents(path) - ibctesting.AssertEvents(&suite.Suite, expEvents, events) + ibctesting.AssertEvents(&s.Suite, expEvents, events) } }) } @@ -302,7 +302,7 @@ func (suite *KeeperTestSuite) TestTimeoutExecuted() { // TestTimeoutOnClose tests the call TimeoutOnClose on chainA by closing the corresponding // channel on chainB after the packet commitment has been created. -func (suite *KeeperTestSuite) TestTimeoutOnClose() { +func (s *KeeperTestSuite) TestTimeoutOnClose() { var ( path *ibctesting.Path packet types.Packet @@ -316,15 +316,15 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.UpdateChannel(func(channel *types.Channel) { channel.State = types.CLOSED }) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) }, nil}, @@ -332,14 +332,14 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { ordered = false path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.UpdateChannel(func(channel *types.Channel) { channel.State = types.CLOSED }) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp) }, nil}, @@ -360,8 +360,8 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { }, errorsmod.Wrap(types.ErrInvalidPacket, "")}, {"connection not found", func() { // pass channel check - suite.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.chainA.GetContext(), + s.chainA.App.GetIBCKeeper().ChannelKeeper.SetChannel( + s.chainA.GetContext(), path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, types.NewChannel(types.OPEN, types.ORDERED, types.NewCounterparty(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID), []string{connIDA}, path.EndpointA.ChannelConfig.Version), ) @@ -371,7 +371,7 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { path.SetChannelOrdered() path.Setup() - packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + packet = types.NewPacket(ibctesting.MockPacketData, 1, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(s.chainB.GetContext()), uint64(s.chainB.GetContext().BlockTime().UnixNano())) }, errorsmod.Wrap(types.ErrNoOpMsg, "")}, {"packet already received ORDERED", func() { path.SetChannelOrdered() @@ -379,15 +379,15 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { ordered = true path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.UpdateChannel(func(channel *types.Channel) { channel.State = types.CLOSED }) // need to update chainA's client representing chainB to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) }, errorsmod.Wrap(types.ErrInvalidPacket, "")}, @@ -396,11 +396,11 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) }, ibcerrors.ErrInvalidHeight}, {"next seq receive verification failed ORDERED", func() { @@ -409,41 +409,41 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.UpdateChannel(func(channel *types.Channel) { channel.State = types.CLOSED }) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(suite.chainB.GetContext()), uint64(suite.chainB.GetContext().BlockTime().UnixNano())) + packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.GetSelfHeight(s.chainB.GetContext()), uint64(s.chainB.GetContext().BlockTime().UnixNano())) }, errorsmod.Wrap(types.ErrInvalidPacket, "")}, {"packet ack verification failed", func() { // set ordered to true providing the wrong proof for UNORDERED case ordered = true path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) sequence, err := path.EndpointA.SendPacket(timeoutHeight, disabledTimeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) path.EndpointB.UpdateChannel(func(channel *types.Channel) { channel.State = types.CLOSED }) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = types.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, disabledTimeoutTimestamp) }, errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "")}, } for i, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { + s.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.msg, i, len(testCases)), func() { var proof []byte - suite.SetupTest() // reset - nextSeqRecv = 1 // must be explicitly changed - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.SetupTest() // reset + nextSeqRecv = 1 // must be explicitly changed + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -451,16 +451,16 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { unorderedPacketKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) orderedPacketKey := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) - closedProof, proofHeight := suite.chainB.QueryProof(channelKey) + closedProof, proofHeight := s.chainB.QueryProof(channelKey) if ordered { - proof, _ = suite.chainB.QueryProof(orderedPacketKey) + proof, _ = s.chainB.QueryProof(orderedPacketKey) } else { - proof, _ = suite.chainB.QueryProof(unorderedPacketKey) + proof, _ = s.chainB.QueryProof(unorderedPacketKey) } - channelVersion, err := suite.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutOnClose( - suite.chainA.GetContext(), + channelVersion, err := s.chainA.App.GetIBCKeeper().ChannelKeeper.TimeoutOnClose( + s.chainA.GetContext(), packet, proof, closedProof, @@ -469,12 +469,12 @@ func (suite *KeeperTestSuite) TestTimeoutOnClose() { ) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion) + s.Require().NoError(err) + s.Require().Equal(path.EndpointA.GetChannel().Version, channelVersion) } else { - suite.Require().Error(err) - suite.Require().Equal("", channelVersion) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().Equal("", channelVersion) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/core/04-channel/migrations/v10/store.go b/modules/core/04-channel/migrations/v10/store.go index 6a39fa46d39..4be1fc45f16 100644 --- a/modules/core/04-channel/migrations/v10/store.go +++ b/modules/core/04-channel/migrations/v10/store.go @@ -2,7 +2,7 @@ package v10 import ( "errors" - fmt "fmt" + "fmt" corestore "cosmossdk.io/core/store" storetypes "cosmossdk.io/store/types" @@ -28,23 +28,19 @@ const ( // PruningSequenceStartKey returns the store key for the pruning sequence start of a particular channel func PruningSequenceStartKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s", KeyPruningSequenceStart, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s", KeyPruningSequenceStart, host.ChannelPath(portID, channelID)) } func ChannelUpgradeKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s/%s", KeyChannelUpgradePrefix, KeyUpgradePrefix, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s/%s", KeyChannelUpgradePrefix, KeyUpgradePrefix, host.ChannelPath(portID, channelID)) } func ChannelUpgradeErrorKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s/%s", KeyChannelUpgradePrefix, KeyUpgradeErrorPrefix, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s/%s", KeyChannelUpgradePrefix, KeyUpgradeErrorPrefix, host.ChannelPath(portID, channelID)) } func ChannelCounterpartyUpgradeKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s/%s", KeyChannelUpgradePrefix, KeyCounterpartyUpgrade, channelPath(portID, channelID)) -} - -func channelPath(portID, channelID string) string { - return fmt.Sprintf("%s/%s/%s/%s", host.KeyPortPrefix, portID, host.KeyChannelPrefix, channelID) + return fmt.Appendf(nil, "%s/%s/%s", KeyChannelUpgradePrefix, KeyCounterpartyUpgrade, host.ChannelPath(portID, channelID)) } // MigrateStore migrates the channel store to the ibc-go v10 store by: diff --git a/modules/core/04-channel/migrations/v10/store_test.go b/modules/core/04-channel/migrations/v10/store_test.go index 0a60a08ad14..beec472e234 100644 --- a/modules/core/04-channel/migrations/v10/store_test.go +++ b/modules/core/04-channel/migrations/v10/store_test.go @@ -13,7 +13,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/keeper" - v10 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v10" + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v10" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" ibctesting "github.com/cosmos/ibc-go/v10/testing" @@ -28,63 +28,63 @@ type MigrationsV10TestSuite struct { chainB *ibctesting.TestChain } -func (suite *MigrationsV10TestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) -} - func TestMigrationsV10TestSuite(t *testing.T) { testifysuite.Run(t, new(MigrationsV10TestSuite)) } +func (s *MigrationsV10TestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + // set up channels that are still in upgrade state, and assert that the upgrade fails. // migrate the store, and assert that the channels have been upgraded and state removed as expected -func (suite *MigrationsV10TestSuite) TestMigrateStoreWithUpgradingChannels() { - ctx := suite.chainA.GetContext() - cdc := suite.chainA.App.AppCodec() - channelKeeper := suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper - storeService := runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)) +func (s *MigrationsV10TestSuite) TestMigrateStoreWithUpgradingChannels() { + ctx := s.chainA.GetContext() + cdc := s.chainA.App.AppCodec() + channelKeeper := s.chainA.GetSimApp().IBCKeeper.ChannelKeeper + storeService := runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)) - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() - preMigrationChannels := suite.getPreMigrationTypeChannels(ctx, cdc, storeService) - suite.Require().Len(preMigrationChannels, 2) + preMigrationChannels := s.getPreMigrationTypeChannels(ctx, cdc, storeService) + s.Require().Len(preMigrationChannels, 2) // Set up some channels with old state flushingChannel := preMigrationChannels[0] flushingChannel.State = v10.FLUSHING - suite.setPreMigrationChannel(ctx, cdc, storeService, flushingChannel) + s.setPreMigrationChannel(ctx, cdc, storeService, flushingChannel) flushCompleteChannel := preMigrationChannels[1] flushCompleteChannel.State = v10.FLUSHCOMPLETE - suite.setPreMigrationChannel(ctx, cdc, storeService, flushCompleteChannel) + s.setPreMigrationChannel(ctx, cdc, storeService, flushCompleteChannel) err := v10.MigrateStore(ctx, storeService, cdc, channelKeeper) - suite.Require().Errorf(err, "channel in state FLUSHING or FLUSHCOMPLETE found, to proceed with migration, please ensure no channels are currently upgrading") + s.Require().Errorf(err, "channel in state FLUSHING or FLUSHCOMPLETE found, to proceed with migration, please ensure no channels are currently upgrading") } // set up channels, upgrades, params, and prune sequences in the store, // migrate the store, and assert that the channels have been upgraded and state removed as expected -func (suite *MigrationsV10TestSuite) TestMigrateStore() { - ctx := suite.chainA.GetContext() - cdc := suite.chainA.App.AppCodec() - channelKeeper := suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper - storeService := runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)) +func (s *MigrationsV10TestSuite) TestMigrateStore() { + ctx := s.chainA.GetContext() + cdc := s.chainA.App.AppCodec() + channelKeeper := s.chainA.GetSimApp().IBCKeeper.ChannelKeeper + storeService := runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)) store := storeService.OpenKVStore(ctx) numberOfChannels := 100 for range numberOfChannels { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() } - preMigrationChannels := suite.getPreMigrationTypeChannels(ctx, cdc, storeService) - suite.Require().Len(preMigrationChannels, numberOfChannels) + preMigrationChannels := s.getPreMigrationTypeChannels(ctx, cdc, storeService) + s.Require().Len(preMigrationChannels, numberOfChannels) // Set up some channels with old state testChannel1 := preMigrationChannels[0] @@ -101,7 +101,7 @@ func (suite *MigrationsV10TestSuite) TestMigrateStore() { NextSequenceSend: 2, } err := store.Set(v10.ChannelUpgradeKey(testChannel1.PortId, testChannel1.ChannelId), cdc.MustMarshal(&upgrade)) - suite.Require().NoError(err) + s.Require().NoError(err) upgrade = v10.Upgrade{ Fields: v10.UpgradeFields{ Ordering: v10.ORDERED, @@ -112,7 +112,7 @@ func (suite *MigrationsV10TestSuite) TestMigrateStore() { NextSequenceSend: 20, } err = store.Set(v10.ChannelUpgradeKey(testChannel2.PortId, testChannel2.ChannelId), cdc.MustMarshal(&upgrade)) - suite.Require().NoError(err) + s.Require().NoError(err) counterpartyUpgrade := v10.Upgrade{ Fields: v10.UpgradeFields{ @@ -124,37 +124,37 @@ func (suite *MigrationsV10TestSuite) TestMigrateStore() { NextSequenceSend: 20, } err = store.Set(v10.ChannelCounterpartyUpgradeKey(testChannel2.PortId, testChannel2.ChannelId), cdc.MustMarshal(&counterpartyUpgrade)) - suite.Require().NoError(err) + s.Require().NoError(err) errorReceipt := v10.ErrorReceipt{ Sequence: 3, Message: "🤷", } err = store.Set(v10.ChannelUpgradeErrorKey(testChannel1.PortId, testChannel1.ChannelId), cdc.MustMarshal(&errorReceipt)) - suite.Require().NoError(err) + s.Require().NoError(err) // Set some params err = store.Set([]byte(v10.ParamsKey), cdc.MustMarshal(&v10.Params{UpgradeTimeout: v10.Timeout{ Timestamp: 1000, }})) - suite.Require().NoError(err) + s.Require().NoError(err) // Set some prune sequences err = store.Set(v10.PruningSequenceStartKey(testChannel1.PortId, testChannel1.ChannelId), sdk.Uint64ToBigEndian(0)) - suite.Require().NoError(err) + s.Require().NoError(err) err = store.Set(v10.PruningSequenceStartKey(testChannel2.PortId, testChannel2.ChannelId), sdk.Uint64ToBigEndian(42)) - suite.Require().NoError(err) + s.Require().NoError(err) err = v10.MigrateStore(ctx, storeService, cdc, channelKeeper) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.assertChannelsUpgraded(ctx, suite.chainA.App.AppCodec(), storeService, channelKeeper, preMigrationChannels) - suite.assertNoUpgrades(ctx, storeService) - suite.assertNoParms(ctx, storeService) - suite.assertNoPruneSequences(ctx, storeService) + s.assertChannelsUpgraded(ctx, s.chainA.App.AppCodec(), storeService, channelKeeper, preMigrationChannels) + s.assertNoUpgrades(ctx, storeService) + s.assertNoParms(ctx, storeService) + s.assertNoPruneSequences(ctx, storeService) } -func (suite *MigrationsV10TestSuite) setPreMigrationChannel(ctx sdk.Context, cdc codec.Codec, storeService corestore.KVStoreService, channel v10.IdentifiedChannel) { +func (s *MigrationsV10TestSuite) setPreMigrationChannel(ctx sdk.Context, cdc codec.Codec, storeService corestore.KVStoreService, channel v10.IdentifiedChannel) { store := storeService.OpenKVStore(ctx) channelKey := host.ChannelKey(channel.PortId, channel.ChannelId) err := store.Set(channelKey, cdc.MustMarshal(&v10.Channel{ @@ -165,17 +165,17 @@ func (suite *MigrationsV10TestSuite) setPreMigrationChannel(ctx sdk.Context, cdc Version: channel.Version, UpgradeSequence: channel.UpgradeSequence, })) - suite.Require().NoError(err) + s.Require().NoError(err) } -func (suite *MigrationsV10TestSuite) getPreMigrationTypeChannels(ctx sdk.Context, cdc codec.Codec, storeService corestore.KVStoreService) []v10.IdentifiedChannel { +func (s *MigrationsV10TestSuite) getPreMigrationTypeChannels(ctx sdk.Context, cdc codec.Codec, storeService corestore.KVStoreService) []v10.IdentifiedChannel { var channels []v10.IdentifiedChannel iterator := storetypes.KVStorePrefixIterator(runtime.KVStoreAdapter(storeService.OpenKVStore(ctx)), []byte(host.KeyChannelEndPrefix)) for ; iterator.Valid(); iterator.Next() { var channel v10.Channel err := cdc.Unmarshal(iterator.Value(), &channel) - suite.Require().NoError(err) + s.Require().NoError(err) portID, channelID, err := host.ParseChannelPath(string(iterator.Key())) identifiedChannel := v10.IdentifiedChannel{ @@ -188,45 +188,44 @@ func (suite *MigrationsV10TestSuite) getPreMigrationTypeChannels(ctx sdk.Context ChannelId: channelID, UpgradeSequence: channel.UpgradeSequence, } - suite.Require().NoError(err) + s.Require().NoError(err) channels = append(channels, identifiedChannel) - } iterator.Close() return channels } -func (suite *MigrationsV10TestSuite) assertChannelsUpgraded(ctx sdk.Context, cdc codec.Codec, storeService corestore.KVStoreService, channelKeeper *keeper.Keeper, preMigrationChannels []v10.IdentifiedChannel) { +func (s *MigrationsV10TestSuite) assertChannelsUpgraded(ctx sdk.Context, cdc codec.Codec, storeService corestore.KVStoreService, channelKeeper *keeper.Keeper, preMigrationChannels []v10.IdentifiedChannel) { // First check that all channels have gotten the old state pruned - newChannelsWithPreMigrationType := suite.getPreMigrationTypeChannels(ctx, cdc, storeService) + newChannelsWithPreMigrationType := s.getPreMigrationTypeChannels(ctx, cdc, storeService) for _, channel := range newChannelsWithPreMigrationType { - suite.Require().NotEqual(v10.FLUSHING, channel.State) - suite.Require().NotEqual(v10.FLUSHCOMPLETE, channel.State) - suite.Require().Equal(uint64(0), channel.UpgradeSequence) + s.Require().NotEqual(v10.FLUSHING, channel.State) + s.Require().NotEqual(v10.FLUSHCOMPLETE, channel.State) + s.Require().Equal(uint64(0), channel.UpgradeSequence) } // Then check that we can still receive all the channels newChannelsWithPostMigrationType := channelKeeper.GetAllChannels(ctx) for _, channel := range newChannelsWithPostMigrationType { - suite.Require().NoError(channel.ValidateBasic()) + s.Require().NoError(channel.ValidateBasic()) } - suite.Require().Equal(len(newChannelsWithPreMigrationType), len(newChannelsWithPostMigrationType)) - suite.Require().Equal(len(newChannelsWithPostMigrationType), len(preMigrationChannels)) + s.Require().Len(newChannelsWithPostMigrationType, len(newChannelsWithPreMigrationType)) + s.Require().Len(preMigrationChannels, len(newChannelsWithPostMigrationType)) } -func (suite *MigrationsV10TestSuite) assertNoUpgrades(ctx sdk.Context, storeService corestore.KVStoreService) { +func (s *MigrationsV10TestSuite) assertNoUpgrades(ctx sdk.Context, storeService corestore.KVStoreService) { store := storeService.OpenKVStore(ctx) - suite.Require().False(store.Has([]byte(v10.KeyChannelUpgradePrefix))) + s.Require().False(store.Has([]byte(v10.KeyChannelUpgradePrefix))) } -func (suite *MigrationsV10TestSuite) assertNoParms(ctx sdk.Context, storeService corestore.KVStoreService) { +func (s *MigrationsV10TestSuite) assertNoParms(ctx sdk.Context, storeService corestore.KVStoreService) { store := storeService.OpenKVStore(ctx) - suite.Require().False(store.Has([]byte(v10.ParamsKey))) + s.Require().False(store.Has([]byte(v10.ParamsKey))) } -func (suite *MigrationsV10TestSuite) assertNoPruneSequences(ctx sdk.Context, storeService corestore.KVStoreService) { +func (s *MigrationsV10TestSuite) assertNoPruneSequences(ctx sdk.Context, storeService corestore.KVStoreService) { store := storeService.OpenKVStore(ctx) - suite.Require().False(store.Has([]byte(v10.KeyPruningSequenceStart))) + s.Require().False(store.Has([]byte(v10.KeyPruningSequenceStart))) } diff --git a/modules/core/04-channel/migrations/v11/store.go b/modules/core/04-channel/migrations/v11/store.go new file mode 100644 index 00000000000..095b6a866f1 --- /dev/null +++ b/modules/core/04-channel/migrations/v11/store.go @@ -0,0 +1,68 @@ +package v11 + +import ( + "fmt" + + corestore "cosmossdk.io/core/store" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v10/modules/core/24-host" + "github.com/cosmos/ibc-go/v10/modules/core/keeper" +) + +const ( + KeyNextSeqSendPrefix = "nextSequenceSend" + KeyChannelEndPrefix = "channelEnds" + KeyChannelPrefix = "channels" + KeyPortPrefix = "ports" +) + +// NextSequenceSendV1Key returns the store key for the send sequence of a particular +// channel binded to a specific port. +func NextSequenceSendV1Key(portID, channelID string) []byte { + return fmt.Appendf(nil, "%s/%s", KeyNextSeqSendPrefix, host.ChannelPath(portID, channelID)) +} + +// MigrateStore migrates the channel store to add support for IBC v2 +// for all OPEN UNORDERED channels by: +// - Adding client counterparty information keyed to the channel ID +// - Migrating the NextSequenceSend path to use the v2 format +// - Store an alias key mapping the v1 channel ID to the underlying client ID +func MigrateStore(ctx sdk.Context, storeService corestore.KVStoreService, cdc codec.BinaryCodec, + ibcKeeper *keeper.Keeper, +) error { + store := storeService.OpenKVStore(ctx) + + ibcKeeper.ChannelKeeper.IterateChannels(ctx, func(ic types.IdentifiedChannel) bool { + // only add counterparty for channels that are OPEN and UNORDERED + // set a base client mapping from the channelId to the underlying base client + counterparty, ok := ibcKeeper.ChannelKeeper.GetV2Counterparty(ctx, ic.PortId, ic.ChannelId) + if ok { + ibcKeeper.ClientV2Keeper.SetClientCounterparty(ctx, ic.ChannelId, counterparty) + connection, ok := ibcKeeper.ConnectionKeeper.GetConnection(ctx, ic.ConnectionHops[0]) + if !ok { + panic("connection not set") + } + ibcKeeper.ChannelKeeperV2.SetClientForAlias(ctx, ic.ChannelId, connection.ClientId) + } + + // migrate the NextSequenceSend key to the v2 format for every channel + seqbz, err := store.Get(NextSequenceSendV1Key(ic.PortId, ic.ChannelId)) + if err != nil { + panic("NextSequenceSend not found for channel " + ic.ChannelId) + } + seq := sdk.BigEndianToUint64(seqbz) + // set the NextSequenceSend in the v2 keeper + ibcKeeper.ChannelKeeperV2.SetNextSequenceSend(ctx, ic.ChannelId, seq) + // remove the old NextSequenceSend key + if err := store.Delete(NextSequenceSendV1Key(ic.PortId, ic.ChannelId)); err != nil { + panic("failed to delete NextSequenceSend key for channel " + ic.ChannelId) + } + + return false + }) + return nil +} diff --git a/modules/core/04-channel/migrations/v11/store_test.go b/modules/core/04-channel/migrations/v11/store_test.go new file mode 100644 index 00000000000..6b80a06c6d6 --- /dev/null +++ b/modules/core/04-channel/migrations/v11/store_test.go @@ -0,0 +1,129 @@ +package v11_test + +import ( + "testing" + + testifysuite "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + + clientv2types "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/types" + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v11" + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" + channelv2types "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" + hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" + ibctesting "github.com/cosmos/ibc-go/v10/testing" + "github.com/cosmos/ibc-go/v10/testing/mock" +) + +type MigrationsV11TestSuite struct { + testifysuite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +func (s *MigrationsV11TestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + +func TestMigrationsV11TestSuite(t *testing.T) { + testifysuite.Run(t, new(MigrationsV11TestSuite)) +} + +func (s *MigrationsV11TestSuite) TestMigrateStore() { + ctx := s.chainA.GetContext() + cdc := s.chainA.App.AppCodec() + ibcKeeper := s.chainA.App.GetIBCKeeper() + storeService := runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)) + store := storeService.OpenKVStore(ctx) + numberOfChannels := 100 + + for i := range numberOfChannels { + path := ibctesting.NewPath(s.chainA, s.chainB) + // needed to add this line to have channel ids increment correctly + // without this line, the channel ids skip a number in the sequence + path = path.DisableUniqueChannelIDs() + if i%2 == 0 { + path.SetChannelOrdered() + } + path.Setup() + + // Move sequence back to its old v1 format key + // to mock channels that were created before the new changes + seq, ok := ibcKeeper.ChannelKeeper.GetNextSequenceSend(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(ok) + err := store.Delete(hostv2.NextSequenceSendKey(path.EndpointA.ChannelID)) + s.Require().NoError(err) + err = store.Set(v11.NextSequenceSendV1Key(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID), sdk.Uint64ToBigEndian(seq)) + s.Require().NoError(err) + + // Remove counterparty to mock pre migration channels + clientStore := ibcKeeper.ClientKeeper.ClientStore(ctx, path.EndpointA.ChannelID) + clientStore.Delete(clientv2types.CounterpartyKey()) + + // Remove alias to mock pre migration channels + err = store.Delete(channelv2types.AliasKey(path.EndpointA.ChannelID)) + s.Require().NoError(err) + + if i%5 == 0 { + channel, ok := ibcKeeper.ChannelKeeper.GetChannel(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID) + s.Require().True(ok) + if i%2 == 0 { + channel.State = types.INIT + } else { + channel.State = types.CLOSED + } + ibcKeeper.ChannelKeeper.SetChannel(ctx, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, channel) + } + } + + err := v11.MigrateStore(ctx, storeService, cdc, ibcKeeper) + s.Require().NoError(err) + + for i := range numberOfChannels { + channelID := types.FormatChannelIdentifier(uint64(i)) + channel, ok := ibcKeeper.ChannelKeeper.GetChannel(ctx, mock.PortID, channelID) + s.Require().True(ok, i) + + if channel.Ordering == types.UNORDERED && channel.State == types.OPEN { + // ensure counterparty set + expCounterparty, ok := ibcKeeper.ChannelKeeper.GetV2Counterparty(ctx, mock.PortID, channelID) + s.Require().True(ok) + counterparty, ok := ibcKeeper.ClientV2Keeper.GetClientCounterparty(ctx, channelID) + s.Require().True(ok) + s.Require().Equal(expCounterparty, counterparty, "counterparty not set correctly") + + // ensure base client mapping set + baseClientID, ok := ibcKeeper.ChannelKeeperV2.GetClientForAlias(ctx, channelID) + s.Require().True(ok) + s.Require().NotEqual(channelID, baseClientID) + connection, ok := ibcKeeper.ConnectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) + s.Require().True(ok) + s.Require().Equal(connection.ClientId, baseClientID, "base client mapping not set correctly") + } else { + // ensure counterparty not set for closed channels + _, ok := ibcKeeper.ClientV2Keeper.GetClientCounterparty(ctx, channelID) + s.Require().False(ok, "counterparty should not be set for closed channels") + + // ensure base client mapping not set for closed channels + baseClientID, ok := ibcKeeper.ChannelKeeperV2.GetClientForAlias(ctx, channelID) + s.Require().False(ok) + s.Require().Empty(baseClientID, "base client mapping should not be set for closed channels") + } + + // ensure that sequence migrated correctly + bz, _ := store.Get(v11.NextSequenceSendV1Key(mock.PortID, channelID)) + s.Require().Nil(bz) + seq, ok := ibcKeeper.ChannelKeeper.GetNextSequenceSend(ctx, mock.PortID, channelID) + s.Require().True(ok) + s.Require().Equal(uint64(1), seq) + } +} diff --git a/modules/core/04-channel/simulation/decoder_test.go b/modules/core/04-channel/simulation/decoder_test.go index 1208072c360..7f53481ea2c 100644 --- a/modules/core/04-channel/simulation/decoder_test.go +++ b/modules/core/04-channel/simulation/decoder_test.go @@ -12,6 +12,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/core/04-channel/simulation" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" host "github.com/cosmos/ibc-go/v10/modules/core/24-host" + hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" "github.com/cosmos/ibc-go/v10/testing/simapp" ) @@ -36,7 +37,7 @@ func TestDecodeStore(t *testing.T) { Value: cdc.MustMarshal(&channel), }, { - Key: host.NextSequenceSendKey(portID, channelID), + Key: hostv2.NextSequenceSendKey(channelID), Value: sdk.Uint64ToBigEndian(1), }, { diff --git a/modules/core/04-channel/types/acknowledgement_test.go b/modules/core/04-channel/types/acknowledgement_test.go index 97e7a3d917a..690bd32162e 100644 --- a/modules/core/04-channel/types/acknowledgement_test.go +++ b/modules/core/04-channel/types/acknowledgement_test.go @@ -20,7 +20,7 @@ const ( ) // tests acknowledgement.ValidateBasic and acknowledgement.Acknowledgement -func (suite TypesTestSuite) TestAcknowledgement() { //nolint:govet // this is a test, we are okay with copying locks +func (s *TypesTestSuite) TestAcknowledgement() { //nolint:govet // this is a test, we are okay with copying locks testCases := []struct { name string ack types.Acknowledgement @@ -68,25 +68,25 @@ func (suite TypesTestSuite) TestAcknowledgement() { //nolint:govet // this is a } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() err := tc.ack.ValidateBasic() if tc.expValidates { - suite.Require().NoError(err) + s.Require().NoError(err) // expect all valid acks to be able to be marshaled - suite.NotPanics(func() { + s.Require().NotPanics(func() { bz := tc.ack.Acknowledgement() - suite.Require().NotNil(bz) - suite.Require().Equal(tc.expBytes, bz) + s.Require().NotNil(bz) + s.Require().Equal(tc.expBytes, bz) }) } else { - suite.Require().Error(err) + s.Require().Error(err) } - suite.Require().Equal(tc.expSuccess, tc.ack.Success()) + s.Require().Equal(tc.expSuccess, tc.ack.Success()) }) } } @@ -97,7 +97,7 @@ func (suite TypesTestSuite) TestAcknowledgement() { //nolint:govet // this is a // in the packet acknowledgement. // // This test acts as an indicator that the ABCI error codes may no longer be deterministic. -func (suite *TypesTestSuite) TestABCICodeDeterminism() { +func (s *TypesTestSuite) TestABCICodeDeterminism() { // same ABCI error code used err := errorsmod.Wrap(ibcerrors.ErrOutOfGas, "error string 1") errSameABCICode := errorsmod.Wrap(ibcerrors.ErrOutOfGas, "error string 2") @@ -118,13 +118,13 @@ func (suite *TypesTestSuite) TestABCICodeDeterminism() { hashSameABCICode := cmtstate.TxResultsHash(resultsSameABCICode) hashDifferentABCICode := cmtstate.TxResultsHash(resultsDifferentABCICode) - suite.Require().Equal(hash, hashSameABCICode) - suite.Require().NotEqual(hash, hashDifferentABCICode) + s.Require().Equal(hash, hashSameABCICode) + s.Require().NotEqual(hash, hashDifferentABCICode) } // TestAcknowledgementError will verify that only a constant string and // ABCI error code are used in constructing the acknowledgement error string -func (suite *TypesTestSuite) TestAcknowledgementError() { +func (s *TypesTestSuite) TestAcknowledgementError() { // same ABCI error code used err := errorsmod.Wrap(ibcerrors.ErrOutOfGas, "error string 1") errSameABCICode := errorsmod.Wrap(ibcerrors.ErrOutOfGas, "error string 2") @@ -136,11 +136,11 @@ func (suite *TypesTestSuite) TestAcknowledgementError() { ackSameABCICode := types.NewErrorAcknowledgement(errSameABCICode) ackDifferentABCICode := types.NewErrorAcknowledgement(errDifferentABCICode) - suite.Require().Equal(ack, ackSameABCICode) - suite.Require().NotEqual(ack, ackDifferentABCICode) + s.Require().Equal(ack, ackSameABCICode) + s.Require().NotEqual(ack, ackDifferentABCICode) } -func (suite TypesTestSuite) TestAcknowledgementWithCodespace() { //nolint:govet // this is a test, we are okay with copying locks +func (s *TypesTestSuite) TestAcknowledgementWithCodespace() { //nolint:govet // this is a test, we are okay with copying locks testCases := []struct { name string ack types.Acknowledgement @@ -164,8 +164,8 @@ func (suite TypesTestSuite) TestAcknowledgementWithCodespace() { //nolint:govet } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.Require().Equal(tc.expBytes, tc.ack.Acknowledgement()) + s.Run(tc.name, func() { + s.Require().Equal(tc.expBytes, tc.ack.Acknowledgement()) }) } } diff --git a/modules/core/04-channel/types/channel_test.go b/modules/core/04-channel/types/channel_test.go index 6136b2746dd..8cdbcc26ed0 100644 --- a/modules/core/04-channel/types/channel_test.go +++ b/modules/core/04-channel/types/channel_test.go @@ -25,7 +25,6 @@ func TestChannelValidateBasic(t *testing.T) { } for i, tc := range testCases { - err := tc.channel.ValidateBasic() if tc.expErr == nil { require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) @@ -48,7 +47,6 @@ func TestCounterpartyValidateBasic(t *testing.T) { } for i, tc := range testCases { - err := tc.counterparty.ValidateBasic() if tc.expErr == nil { require.NoError(t, err, "valid test case %d failed: %s", i, tc.name) @@ -86,7 +84,6 @@ func TestIdentifiedChannelValidateBasic(t *testing.T) { } for _, tc := range testCases { - err := tc.identifiedChannel.ValidateBasic() require.ErrorIs(t, err, tc.expErr) } diff --git a/modules/core/04-channel/types/errors.go b/modules/core/04-channel/types/errors.go index 9e13edef03c..add5f35f2a5 100644 --- a/modules/core/04-channel/types/errors.go +++ b/modules/core/04-channel/types/errors.go @@ -59,4 +59,5 @@ var ( ErrPruningSequenceStartNotFound = errorsmod.Register(SubModuleName, 41, "pruning sequence start not found") ErrRecvStartSequenceNotFound = errorsmod.Register(SubModuleName, 42, "recv start sequence not found") ErrInvalidCommitment = errorsmod.Register(SubModuleName, 43, "invalid commitment") + ErrKeeperNotSet = errorsmod.Register(SubModuleName, 44, "keeper not set") ) diff --git a/modules/core/04-channel/types/expected_keepers.go b/modules/core/04-channel/types/expected_keepers.go index 9b6f12d6e95..1d6a765bfcf 100644 --- a/modules/core/04-channel/types/expected_keepers.go +++ b/modules/core/04-channel/types/expected_keepers.go @@ -4,6 +4,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" + clientv2types "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/types" connectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" "github.com/cosmos/ibc-go/v10/modules/core/exported" ) @@ -68,3 +69,11 @@ type ConnectionKeeper interface { nextSequenceRecv uint64, ) error } + +type ClientKeeperV2 interface { + SetClientCounterparty(ctx sdk.Context, channelID string, counterparty clientv2types.CounterpartyInfo) +} + +type ChannelKeeperV2 interface { + SetClientForAlias(ctx sdk.Context, channelID, clientID string) +} diff --git a/modules/core/04-channel/types/genesis_test.go b/modules/core/04-channel/types/genesis_test.go index 682af38ba94..2a561a04465 100644 --- a/modules/core/04-channel/types/genesis_test.go +++ b/modules/core/04-channel/types/genesis_test.go @@ -216,7 +216,6 @@ func TestValidateGenesis(t *testing.T) { } for _, tc := range testCases { - err := tc.genState.Validate() if tc.expErr == nil { require.NoError(t, err, tc.name) diff --git a/modules/core/04-channel/types/keys_test.go b/modules/core/04-channel/types/keys_test.go index 982f56174c8..3c5e63636a7 100644 --- a/modules/core/04-channel/types/keys_test.go +++ b/modules/core/04-channel/types/keys_test.go @@ -33,7 +33,6 @@ func TestParseChannelSequence(t *testing.T) { } for _, tc := range testCases { - seq, err := types.ParseChannelSequence(tc.channelID) valid := types.IsValidChannelID(tc.channelID) require.Equal(t, tc.expSeq, seq) diff --git a/modules/core/04-channel/types/msgs_test.go b/modules/core/04-channel/types/msgs_test.go index b10c85b8563..e23a182619c 100644 --- a/modules/core/04-channel/types/msgs_test.go +++ b/modules/core/04-channel/types/msgs_test.go @@ -80,17 +80,17 @@ type TypesTestSuite struct { proof []byte } -func (suite *TypesTestSuite) SetupTest() { - app := simapp.Setup(suite.T(), false) +func (s *TypesTestSuite) SetupTest() { + app := simapp.Setup(s.T(), false) db := dbm.NewMemDB() store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) storeKey := storetypes.NewKVStoreKey("iavlStoreKey") store.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, nil) err := store.LoadVersion(0) - suite.Require().NoError(err) + s.Require().NoError(err) iavlStore, ok := store.GetCommitStore(storeKey).(*iavl.Store) - suite.Require().True(ok) + s.Require().True(ok) iavlStore.Set([]byte("KEY"), []byte("VALUE")) _ = store.Commit() @@ -100,21 +100,21 @@ func (suite *TypesTestSuite) SetupTest() { Path: fmt.Sprintf("/%s/key", storeKey.Name()), // required path to get key/value+proof Prove: true, }) - suite.Require().NoError(err) + s.Require().NoError(err) merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) - suite.Require().NoError(err) + s.Require().NoError(err) proof, err := app.AppCodec().Marshal(&merkleProof) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.proof = proof + s.proof = proof } func TestTypesTestSuite(t *testing.T) { testifysuite.Run(t, new(TypesTestSuite)) } -func (suite *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() { +func (s *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() { counterparty := types.NewCounterparty(cpportid, cpchanid) tryOpenChannel := types.NewChannel(types.TRYOPEN, types.ORDERED, counterparty, connHops, version) @@ -238,32 +238,32 @@ func (suite *TypesTestSuite) TestMsgChannelOpenInitValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgChannelOpenInitGetSigners() { +func (s *TypesTestSuite) TestMsgChannelOpenInitGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) + s.Require().NoError(err) msg := types.NewMsgChannelOpenInit(portid, version, types.ORDERED, connHops, cpportid, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { +func (s *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { counterparty := types.NewCounterparty(cpportid, cpchanid) initChannel := types.NewChannel(types.INIT, types.ORDERED, counterparty, connHops, version) @@ -274,22 +274,22 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }{ { "success", - types.NewMsgChannelOpenTry(portid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.ORDERED, connHops, cpportid, cpchanid, version, s.proof, height, addr), nil, }, { "success with empty channel version", - types.NewMsgChannelOpenTry(portid, "", types.UNORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, "", types.UNORDERED, connHops, cpportid, cpchanid, version, s.proof, height, addr), nil, }, { "success with empty counterparty version", - types.NewMsgChannelOpenTry(portid, version, types.ORDERED, connHops, cpportid, cpchanid, "", suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.ORDERED, connHops, cpportid, cpchanid, "", s.proof, height, addr), nil, }, { "too short port id", - types.NewMsgChannelOpenTry(invalidShortPort, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(invalidShortPort, version, types.ORDERED, connHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -300,7 +300,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "too long port id", - types.NewMsgChannelOpenTry(invalidLongPort, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(invalidLongPort, version, types.ORDERED, connHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -311,7 +311,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "port id contains non-alpha", - types.NewMsgChannelOpenTry(invalidPort, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(invalidPort, version, types.ORDERED, connHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -322,12 +322,12 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "invalid channel order", - types.NewMsgChannelOpenTry(portid, version, types.Order(4), connHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.Order(4), connHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap(types.ErrInvalidChannelOrdering, types.Order(4).String()), }, { "connection hops more than 1 ", - types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, invalidConnHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, invalidConnHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( types.ErrTooManyConnectionHops, "current IBC version only supports one connection hop", @@ -335,7 +335,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "too short connection id", - types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, invalidShortConnHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, invalidShortConnHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -345,7 +345,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "too long connection id", - types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, invalidLongConnHops, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, invalidLongConnHops, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -355,7 +355,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "connection id contains non-alpha", - types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, []string{invalidConnection}, cpportid, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, []string{invalidConnection}, cpportid, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -366,7 +366,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "invalid counterparty port id", - types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, connHops, invalidPort, cpchanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, connHops, invalidPort, cpchanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -377,7 +377,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "invalid counterparty channel id", - types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, connHops, cpportid, invalidChannel, version, suite.proof, height, addr), + types.NewMsgChannelOpenTry(portid, version, types.UNORDERED, connHops, cpportid, invalidChannel, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -393,7 +393,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "channel not in TRYOPEN state", - &types.MsgChannelOpenTry{portid, "", initChannel, version, suite.proof, height, addr}, + &types.MsgChannelOpenTry{portid, "", initChannel, version, s.proof, height, addr}, errorsmod.Wrapf(types.ErrInvalidChannelState, "channel state must be TRYOPEN in MsgChannelOpenTry. expected: %s, got: %s", types.TRYOPEN, initChannel.State, @@ -401,38 +401,38 @@ func (suite *TypesTestSuite) TestMsgChannelOpenTryValidateBasic() { }, { "previous channel id is not empty", - &types.MsgChannelOpenTry{portid, chanid, initChannel, version, suite.proof, height, addr}, + &types.MsgChannelOpenTry{portid, chanid, initChannel, version, s.proof, height, addr}, errorsmod.Wrap(types.ErrInvalidChannelIdentifier, "previous channel identifier must be empty, this field has been deprecated as crossing hellos are no longer supported"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgChannelOpenTryGetSigners() { +func (s *TypesTestSuite) TestMsgChannelOpenTryGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgChannelOpenTry(portid, version, types.ORDERED, connHops, cpportid, cpchanid, version, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgChannelOpenTry(portid, version, types.ORDERED, connHops, cpportid, cpchanid, version, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { +func (s *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { testCases := []struct { name string msg *types.MsgChannelOpenAck @@ -440,17 +440,17 @@ func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { }{ { "success", - types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(portid, chanid, chanid, version, s.proof, height, addr), nil, }, { "success empty cpv", - types.NewMsgChannelOpenAck(portid, chanid, chanid, "", suite.proof, height, addr), + types.NewMsgChannelOpenAck(portid, chanid, chanid, "", s.proof, height, addr), nil, }, { "too short port id", - types.NewMsgChannelOpenAck(invalidShortPort, chanid, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(invalidShortPort, chanid, chanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -461,7 +461,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { }, { "too long port id", - types.NewMsgChannelOpenAck(invalidLongPort, chanid, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(invalidLongPort, chanid, chanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -472,7 +472,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { }, { "port id contains non-alpha", - types.NewMsgChannelOpenAck(invalidPort, chanid, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(invalidPort, chanid, chanid, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -483,17 +483,17 @@ func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { }, { "too short channel id", - types.NewMsgChannelOpenAck(portid, invalidShortChannel, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(portid, invalidShortChannel, chanid, version, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { "too long channel id", - types.NewMsgChannelOpenAck(portid, invalidLongChannel, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(portid, invalidLongChannel, chanid, version, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { "channel id contains non-alpha", - types.NewMsgChannelOpenAck(portid, invalidChannel, chanid, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(portid, invalidChannel, chanid, version, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { @@ -503,7 +503,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { }, { "invalid counterparty channel id", - types.NewMsgChannelOpenAck(portid, chanid, invalidShortChannel, version, suite.proof, height, addr), + types.NewMsgChannelOpenAck(portid, chanid, invalidShortChannel, version, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -515,32 +515,32 @@ func (suite *TypesTestSuite) TestMsgChannelOpenAckValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgChannelOpenAckGetSigners() { +func (s *TypesTestSuite) TestMsgChannelOpenAckGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgChannelOpenAck(portid, chanid, chanid, version, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgChannelOpenAck(portid, chanid, chanid, version, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { +func (s *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { testCases := []struct { name string msg *types.MsgChannelOpenConfirm @@ -548,12 +548,12 @@ func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { }{ { "success", - types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(portid, chanid, s.proof, height, addr), nil, }, { "too short port id", - types.NewMsgChannelOpenConfirm(invalidShortPort, chanid, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(invalidShortPort, chanid, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -564,7 +564,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { }, { "too long port id", - types.NewMsgChannelOpenConfirm(invalidLongPort, chanid, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(invalidLongPort, chanid, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -575,7 +575,7 @@ func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { }, { "port id contains non-alpha", - types.NewMsgChannelOpenConfirm(invalidPort, chanid, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(invalidPort, chanid, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -586,17 +586,17 @@ func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { }, { "too short channel id", - types.NewMsgChannelOpenConfirm(portid, invalidShortChannel, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(portid, invalidShortChannel, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { "too long channel id", - types.NewMsgChannelOpenConfirm(portid, invalidLongChannel, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(portid, invalidLongChannel, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { "channel id contains non-alpha", - types.NewMsgChannelOpenConfirm(portid, invalidChannel, suite.proof, height, addr), + types.NewMsgChannelOpenConfirm(portid, invalidChannel, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { @@ -607,32 +607,32 @@ func (suite *TypesTestSuite) TestMsgChannelOpenConfirmValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgChannelOpenConfirmGetSigners() { +func (s *TypesTestSuite) TestMsgChannelOpenConfirmGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgChannelOpenConfirm(portid, chanid, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgChannelOpenConfirm(portid, chanid, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgChannelCloseInitValidateBasic() { +func (s *TypesTestSuite) TestMsgChannelCloseInitValidateBasic() { testCases := []struct { name string msg *types.MsgChannelCloseInit @@ -694,32 +694,32 @@ func (suite *TypesTestSuite) TestMsgChannelCloseInitValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgChannelCloseInitGetSigners() { +func (s *TypesTestSuite) TestMsgChannelCloseInitGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) + s.Require().NoError(err) msg := types.NewMsgChannelCloseInit(portid, chanid, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { +func (s *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { testCases := []struct { name string msg *types.MsgChannelCloseConfirm @@ -727,17 +727,17 @@ func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { }{ { "success", - types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(portid, chanid, s.proof, height, addr), nil, }, { "success, positive counterparty upgrade sequence", - types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(portid, chanid, s.proof, height, addr), nil, }, { "too short port id", - types.NewMsgChannelCloseConfirm(invalidShortPort, chanid, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(invalidShortPort, chanid, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -748,7 +748,7 @@ func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { }, { "too long port id", - types.NewMsgChannelCloseConfirm(invalidLongPort, chanid, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(invalidLongPort, chanid, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -759,7 +759,7 @@ func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { }, { "port id contains non-alpha", - types.NewMsgChannelCloseConfirm(invalidPort, chanid, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(invalidPort, chanid, s.proof, height, addr), errorsmod.Wrap( errorsmod.Wrapf( host.ErrInvalidID, @@ -770,17 +770,17 @@ func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { }, { "too short channel id", - types.NewMsgChannelCloseConfirm(portid, invalidShortChannel, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(portid, invalidShortChannel, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { "too long channel id", - types.NewMsgChannelCloseConfirm(portid, invalidLongChannel, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(portid, invalidLongChannel, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { "channel id contains non-alpha", - types.NewMsgChannelCloseConfirm(portid, invalidChannel, suite.proof, height, addr), + types.NewMsgChannelCloseConfirm(portid, invalidChannel, s.proof, height, addr), types.ErrInvalidChannelIdentifier, }, { @@ -791,32 +791,32 @@ func (suite *TypesTestSuite) TestMsgChannelCloseConfirmValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgChannelCloseConfirmGetSigners() { +func (s *TypesTestSuite) TestMsgChannelCloseConfirmGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgChannelCloseConfirm(portid, chanid, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgChannelCloseConfirm(portid, chanid, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() { +func (s *TypesTestSuite) TestMsgRecvPacketValidateBasic() { testCases := []struct { name string msg *types.MsgRecvPacket @@ -824,12 +824,12 @@ func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() { }{ { "success", - types.NewMsgRecvPacket(packet, suite.proof, height, addr), + types.NewMsgRecvPacket(packet, s.proof, height, addr), nil, }, { "missing signer address", - types.NewMsgRecvPacket(packet, suite.proof, height, emptyAddr), + types.NewMsgRecvPacket(packet, s.proof, height, emptyAddr), errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", errors.New("empty address string is not allowed")), }, { @@ -839,38 +839,38 @@ func (suite *TypesTestSuite) TestMsgRecvPacketValidateBasic() { }, { "invalid packet", - types.NewMsgRecvPacket(invalidPacket, suite.proof, height, addr), + types.NewMsgRecvPacket(invalidPacket, s.proof, height, addr), errorsmod.Wrap(types.ErrInvalidPacket, "packet sequence cannot be 0"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.NoError(err) + s.Require().NoError(err) } else { - suite.Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgRecvPacketGetSigners() { +func (s *TypesTestSuite) TestMsgRecvPacketGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgRecvPacket(packet, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgRecvPacket(packet, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgTimeoutValidateBasic() { +func (s *TypesTestSuite) TestMsgTimeoutValidateBasic() { testCases := []struct { name string msg *types.MsgTimeout @@ -878,17 +878,17 @@ func (suite *TypesTestSuite) TestMsgTimeoutValidateBasic() { }{ { "success", - types.NewMsgTimeout(packet, 1, suite.proof, height, addr), + types.NewMsgTimeout(packet, 1, s.proof, height, addr), nil, }, { "seq 0", - types.NewMsgTimeout(packet, 0, suite.proof, height, addr), + types.NewMsgTimeout(packet, 0, s.proof, height, addr), errorsmod.Wrap(ibcerrors.ErrInvalidSequence, "next sequence receive cannot be 0"), }, { "missing signer address", - types.NewMsgTimeout(packet, 1, suite.proof, height, emptyAddr), + types.NewMsgTimeout(packet, 1, s.proof, height, emptyAddr), errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", errors.New("empty address string is not allowed")), }, { @@ -898,38 +898,38 @@ func (suite *TypesTestSuite) TestMsgTimeoutValidateBasic() { }, { "invalid packet", - types.NewMsgTimeout(invalidPacket, 1, suite.proof, height, addr), + types.NewMsgTimeout(invalidPacket, 1, s.proof, height, addr), errorsmod.Wrap(types.ErrInvalidPacket, "packet sequence cannot be 0"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgTimeoutGetSigners() { +func (s *TypesTestSuite) TestMsgTimeoutGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgTimeout(packet, 1, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgTimeout(packet, 1, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgTimeoutOnCloseValidateBasic() { +func (s *TypesTestSuite) TestMsgTimeoutOnCloseValidateBasic() { testCases := []struct { name string msg *types.MsgTimeoutOnClose @@ -937,68 +937,68 @@ func (suite *TypesTestSuite) TestMsgTimeoutOnCloseValidateBasic() { }{ { "success", - types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, addr), + types.NewMsgTimeoutOnClose(packet, 1, s.proof, s.proof, height, addr), nil, }, { "success, positive counterparty upgrade sequence", - types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, addr), + types.NewMsgTimeoutOnClose(packet, 1, s.proof, s.proof, height, addr), nil, }, { "seq 0", - types.NewMsgTimeoutOnClose(packet, 0, suite.proof, suite.proof, height, addr), + types.NewMsgTimeoutOnClose(packet, 0, s.proof, s.proof, height, addr), errorsmod.Wrap(ibcerrors.ErrInvalidSequence, "next sequence receive cannot be 0"), }, { "signer address is empty", - types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, emptyAddr), + types.NewMsgTimeoutOnClose(packet, 1, s.proof, s.proof, height, emptyAddr), errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", errors.New("empty address string is not allowed")), }, { "empty proof", - types.NewMsgTimeoutOnClose(packet, 1, emptyProof, suite.proof, height, addr), + types.NewMsgTimeoutOnClose(packet, 1, emptyProof, s.proof, height, addr), errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty unreceived proof"), }, { "empty proof close", - types.NewMsgTimeoutOnClose(packet, 1, suite.proof, emptyProof, height, addr), + types.NewMsgTimeoutOnClose(packet, 1, s.proof, emptyProof, height, addr), errorsmod.Wrap(commitmenttypes.ErrInvalidProof, "cannot submit an empty proof of closed counterparty channel end"), }, { "invalid packet", - types.NewMsgTimeoutOnClose(invalidPacket, 1, suite.proof, suite.proof, height, addr), + types.NewMsgTimeoutOnClose(invalidPacket, 1, s.proof, s.proof, height, addr), errorsmod.Wrap(types.ErrInvalidPacket, "packet sequence cannot be 0"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgTimeoutOnCloseGetSigners() { +func (s *TypesTestSuite) TestMsgTimeoutOnCloseGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgTimeoutOnClose(packet, 1, suite.proof, suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgTimeoutOnClose(packet, 1, s.proof, s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } -func (suite *TypesTestSuite) TestMsgAcknowledgementValidateBasic() { +func (s *TypesTestSuite) TestMsgAcknowledgementValidateBasic() { testCases := []struct { name string msg *types.MsgAcknowledgement @@ -1006,17 +1006,17 @@ func (suite *TypesTestSuite) TestMsgAcknowledgementValidateBasic() { }{ { "success", - types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, addr), + types.NewMsgAcknowledgement(packet, packet.GetData(), s.proof, height, addr), nil, }, { "empty ack", - types.NewMsgAcknowledgement(packet, nil, suite.proof, height, addr), + types.NewMsgAcknowledgement(packet, nil, s.proof, height, addr), errorsmod.Wrap(types.ErrInvalidAcknowledgement, "ack bytes cannot be empty"), }, { "missing signer address", - types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, emptyAddr), + types.NewMsgAcknowledgement(packet, packet.GetData(), s.proof, height, emptyAddr), errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", errors.New("empty address string is not allowed")), }, { @@ -1026,33 +1026,33 @@ func (suite *TypesTestSuite) TestMsgAcknowledgementValidateBasic() { }, { "invalid packet", - types.NewMsgAcknowledgement(invalidPacket, packet.GetData(), suite.proof, height, addr), + types.NewMsgAcknowledgement(invalidPacket, packet.GetData(), s.proof, height, addr), errorsmod.Wrap(types.ErrInvalidPacket, "packet sequence cannot be 0"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.msg.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } } -func (suite *TypesTestSuite) TestMsgAcknowledgementGetSigners() { +func (s *TypesTestSuite) TestMsgAcknowledgementGetSigners() { expSigner, err := sdk.AccAddressFromBech32(addr) - suite.Require().NoError(err) - msg := types.NewMsgAcknowledgement(packet, packet.GetData(), suite.proof, height, addr) + s.Require().NoError(err) + msg := types.NewMsgAcknowledgement(packet, packet.GetData(), s.proof, height, addr) encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) signers, _, err := encodingCfg.Codec.GetMsgV1Signers(msg) - suite.Require().NoError(err) - suite.Require().Equal(expSigner.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(expSigner.Bytes(), signers[0]) } diff --git a/modules/core/04-channel/types/query.go b/modules/core/04-channel/types/query.go index b6c7d7c2f15..a73ddb4821e 100644 --- a/modules/core/04-channel/types/query.go +++ b/modules/core/04-channel/types/query.go @@ -31,8 +31,8 @@ func NewQueryChannelClientStateResponse(identifiedClientState clienttypes.Identi } // UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces -func (qccsr QueryChannelClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - return qccsr.IdentifiedClientState.UnpackInterfaces(unpacker) +func (resp QueryChannelClientStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return resp.IdentifiedClientState.UnpackInterfaces(unpacker) } // NewQueryChannelConsensusStateResponse creates a newQueryChannelConsensusStateResponse instance @@ -46,8 +46,8 @@ func NewQueryChannelConsensusStateResponse(clientID string, anyConsensusState *c } // UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces -func (qccsr QueryChannelConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { - return unpacker.UnpackAny(qccsr.ConsensusState, new(exported.ConsensusState)) +func (resp QueryChannelConsensusStateResponse) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + return unpacker.UnpackAny(resp.ConsensusState, new(exported.ConsensusState)) } // NewQueryPacketCommitmentResponse creates a new QueryPacketCommitmentResponse instance diff --git a/modules/core/04-channel/types/query.pb.go b/modules/core/04-channel/types/query.pb.go index ea141f84ad9..133e4923a91 100644 --- a/modules/core/04-channel/types/query.pb.go +++ b/modules/core/04-channel/types/query.pb.go @@ -2458,6 +2458,7 @@ func _Query_NextSequenceSend_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.channel.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/core/04-channel/types/timeout_test.go b/modules/core/04-channel/types/timeout_test.go index 4a4d0190c76..462499a37f9 100644 --- a/modules/core/04-channel/types/timeout_test.go +++ b/modules/core/04-channel/types/timeout_test.go @@ -7,7 +7,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" ) -func (suite *TypesTestSuite) TestIsValid() { +func (s *TypesTestSuite) TestIsValid() { var timeout types.Timeout testCases := []struct { @@ -46,16 +46,16 @@ func (suite *TypesTestSuite) TestIsValid() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() isValid := timeout.IsValid() - suite.Require().Equal(tc.isValid, isValid) + s.Require().Equal(tc.isValid, isValid) }) } } -func (suite *TypesTestSuite) TestElapsed() { +func (s *TypesTestSuite) TestElapsed() { // elapsed is expected to be true when either timeout height or timestamp // is greater than or equal to 2 var ( @@ -126,14 +126,14 @@ func (suite *TypesTestSuite) TestElapsed() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { elapsed := tc.timeout.Elapsed(height, timestamp) - suite.Require().Equal(tc.expElapsed, elapsed) + s.Require().Equal(tc.expElapsed, elapsed) }) } } -func (suite *TypesTestSuite) TestErrTimeoutElapsed() { +func (s *TypesTestSuite) TestErrTimeoutElapsed() { // elapsed is expected to be true when either timeout height or timestamp // is greater than or equal to 2 var ( @@ -184,14 +184,14 @@ func (suite *TypesTestSuite) TestErrTimeoutElapsed() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.timeout.ErrTimeoutElapsed(height, timestamp) - suite.Require().Equal(tc.expError.Error(), err.Error()) + s.Require().Equal(tc.expError.Error(), err.Error()) }) } } -func (suite *TypesTestSuite) TestErrTimeoutNotReached() { +func (s *TypesTestSuite) TestErrTimeoutNotReached() { // elapsed is expected to be true when either timeout height or timestamp // is greater than or equal to 2 var ( @@ -222,9 +222,9 @@ func (suite *TypesTestSuite) TestErrTimeoutNotReached() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.timeout.ErrTimeoutNotReached(height, timestamp) - suite.Require().Equal(tc.expError.Error(), err.Error()) + s.Require().Equal(tc.expError.Error(), err.Error()) }) } } diff --git a/modules/core/04-channel/types/tx.pb.go b/modules/core/04-channel/types/tx.pb.go index 99cf551455f..77bc653ce38 100644 --- a/modules/core/04-channel/types/tx.pb.go +++ b/modules/core/04-channel/types/tx.pb.go @@ -1358,6 +1358,7 @@ func _Msg_Acknowledgement_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.channel.v1.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/core/04-channel/v2/genesis_test.go b/modules/core/04-channel/v2/genesis_test.go index 85488493946..8d58979d099 100644 --- a/modules/core/04-channel/v2/genesis_test.go +++ b/modules/core/04-channel/v2/genesis_test.go @@ -1,7 +1,7 @@ package channelv2_test import ( - proto "github.com/cosmos/gogoproto/proto" + "github.com/cosmos/gogoproto/proto" channelv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" @@ -10,19 +10,19 @@ import ( ) // TestInitExportGenesis tests the import and export flow for the channel v2 keeper. -func (suite *ModuleTestSuite) TestInitExportGenesis() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *ModuleTestSuite) TestInitExportGenesis() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - path2 := ibctesting.NewPath(suite.chainA, suite.chainC) + path2 := ibctesting.NewPath(s.chainA, s.chainC) path2.SetupV2() - app := suite.chainA.App + app := s.chainA.App emptyGenesis := types.DefaultGenesisState() // create a valid genesis state that uses the client keepers existing client IDs - clientStates := app.GetIBCKeeper().ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext()) + clientStates := app.GetIBCKeeper().ClientKeeper.GetAllGenesisClients(s.chainA.GetContext()) validGs := types.DefaultGenesisState() for i, clientState := range clientStates { ack := types.NewPacketState(clientState.ClientId, uint64(i+1), []byte("ack")) @@ -34,11 +34,11 @@ func (suite *ModuleTestSuite) TestInitExportGenesis() { uint64(i+1), clientState.ClientId, clientState.ClientId, - uint64(suite.chainA.GetContext().BlockTime().Unix()), + uint64(s.chainA.GetContext().BlockTime().Unix()), mockv2.NewMockPayload("src", "dst"), ) bz, err := proto.Marshal(&packet) - suite.Require().NoError(err) + s.Require().NoError(err) asyncPacket := types.NewPacketState(clientState.ClientId, uint64(i+1), bz) validGs.Acknowledgements = append(validGs.Acknowledgements, ack) @@ -64,13 +64,13 @@ func (suite *ModuleTestSuite) TestInitExportGenesis() { } for _, tt := range tests { - suite.Run(tt.name, func() { + s.Run(tt.name, func() { channelV2Keeper := app.GetIBCKeeper().ChannelKeeperV2 - channelv2.InitGenesis(suite.chainA.GetContext(), channelV2Keeper, tt.genState) + channelv2.InitGenesis(s.chainA.GetContext(), channelV2Keeper, tt.genState) - exported := channelv2.ExportGenesis(suite.chainA.GetContext(), channelV2Keeper) - suite.Require().Equal(tt.genState, exported) + exported := channelv2.ExportGenesis(s.chainA.GetContext(), channelV2Keeper) + s.Require().Equal(tt.genState, exported) }) } } diff --git a/modules/core/04-channel/v2/keeper/grpc_query.go b/modules/core/04-channel/v2/keeper/grpc_query.go index 9c695e6709d..304545e957e 100644 --- a/modules/core/04-channel/v2/keeper/grpc_query.go +++ b/modules/core/04-channel/v2/keeper/grpc_query.go @@ -155,7 +155,7 @@ func (q *queryServer) PacketAcknowledgements(goCtx context.Context, req *types.Q return nil, status.Error(codes.InvalidArgument, err.Error()) } - var acks []*types.PacketState + var acks []*types.PacketState // nolint: prealloc store := prefix.NewStore(runtime.KVStoreAdapter(q.storeService.OpenKVStore(goCtx)), hostv2.PacketAcknowledgementPrefixKey(req.ClientId)) // if a list of packet sequences is provided then query for each specific ack and return a list <= len(req.PacketCommitmentSequences) @@ -311,7 +311,6 @@ func (q *queryServer) UnreceivedAcks(goCtx context.Context, req *types.QueryUnre if commitment := q.GetPacketCommitment(ctx, req.ClientId, seq); len(commitment) != 0 { unreceivedSequences = append(unreceivedSequences, seq) } - } selfHeight := clienttypes.GetSelfHeight(ctx) diff --git a/modules/core/04-channel/v2/keeper/grpc_query_test.go b/modules/core/04-channel/v2/keeper/grpc_query_test.go index 5b9c7426f55..0023a905237 100644 --- a/modules/core/04-channel/v2/keeper/grpc_query_test.go +++ b/modules/core/04-channel/v2/keeper/grpc_query_test.go @@ -13,7 +13,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestQueryPacketCommitment() { +func (s *KeeperTestSuite) TestQueryPacketCommitment() { var ( expCommitment []byte path *ibctesting.Path @@ -28,11 +28,11 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() { { "success", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expCommitment = []byte("commitmentHash") - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ClientID, 1, expCommitment) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ClientID, 1, expCommitment) req = &types.QueryPacketCommitmentRequest{ ClientId: path.EndpointA.ClientID, @@ -71,7 +71,7 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() { { "commitment not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() req = &types.QueryPacketCommitmentRequest{ @@ -84,28 +84,28 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitment() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) - res, err := queryServer.PacketCommitment(suite.chainA.GetContext(), req) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) + res, err := queryServer.PacketCommitment(s.chainA.GetContext(), req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expCommitment, res.Commitment) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expCommitment, res.Commitment) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketCommitments() { +func (s *KeeperTestSuite) TestQueryPacketCommitments() { var ( req *types.QueryPacketCommitmentsRequest expCommitments = []*types.PacketState{} @@ -119,13 +119,13 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitments() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expCommitments = make([]*types.PacketState, 0, 10) // reset expected commitments for i := uint64(1); i <= 10; i++ { pktStateCommitment := types.NewPacketState(path.EndpointA.ClientID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), pktStateCommitment.ClientId, pktStateCommitment.Sequence, pktStateCommitment.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), pktStateCommitment.ClientId, pktStateCommitment.Sequence, pktStateCommitment.Data) expCommitments = append(expCommitments, &pktStateCommitment) } @@ -143,13 +143,13 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitments() { { "success: with pagination", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expCommitments = make([]*types.PacketState, 0, 10) // reset expected commitments for i := uint64(1); i <= 10; i++ { pktStateCommitment := types.NewPacketState(path.EndpointA.ClientID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), pktStateCommitment.ClientId, pktStateCommitment.Sequence, pktStateCommitment.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), pktStateCommitment.ClientId, pktStateCommitment.Sequence, pktStateCommitment.Data) expCommitments = append(expCommitments, &pktStateCommitment) } @@ -186,28 +186,28 @@ func (suite *KeeperTestSuite) TestQueryPacketCommitments() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) res, err := queryServer.PacketCommitments(ctx, req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expCommitments, res.Commitments) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expCommitments, res.Commitments) } else { - suite.Require().Error(err) + s.Require().Error(err) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { +func (s *KeeperTestSuite) TestQueryPacketAcknowledgement() { var ( expAcknowledgement []byte path *ibctesting.Path @@ -222,11 +222,11 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { { "success", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expAcknowledgement = []byte("acknowledgementHash") - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(suite.chainA.GetContext(), path.EndpointA.ClientID, 1, expAcknowledgement) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(s.chainA.GetContext(), path.EndpointA.ClientID, 1, expAcknowledgement) req = &types.QueryPacketAcknowledgementRequest{ ClientId: path.EndpointA.ClientID, @@ -265,7 +265,7 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { { "acknowledgement not found", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() req = &types.QueryPacketAcknowledgementRequest{ @@ -278,28 +278,28 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgement() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) - res, err := queryServer.PacketAcknowledgement(suite.chainA.GetContext(), req) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) + res, err := queryServer.PacketAcknowledgement(s.chainA.GetContext(), req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expAcknowledgement, res.Acknowledgement) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expAcknowledgement, res.Acknowledgement) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { +func (s *KeeperTestSuite) TestQueryPacketAcknowledgements() { var ( req *types.QueryPacketAcknowledgementsRequest expAcknowledgements = []*types.PacketState{} @@ -313,14 +313,14 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { { "success: with PacketCommitmentSequences", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() var commitments []uint64 - for i := uint64(0); i < 100; i++ { + for i := range uint64(100) { ack := types.NewPacketState(path.EndpointA.ClientID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.ClientId, ack.Sequence, ack.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(s.chainA.GetContext(), ack.ClientId, ack.Sequence, ack.Data) if i < 10 { // populate the store with 100 and query for 10 specific acks expAcknowledgements = append(expAcknowledgements, &ack) @@ -339,14 +339,14 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { { "success: with pagination", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expAcknowledgements = make([]*types.PacketState, 0, 10) for i := uint64(1); i <= 10; i++ { ack := types.NewPacketState(path.EndpointA.ClientID, i, fmt.Appendf(nil, "hash_%d", i)) - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(suite.chainA.GetContext(), ack.ClientId, ack.Sequence, ack.Data) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(s.chainA.GetContext(), ack.ClientId, ack.Sequence, ack.Data) expAcknowledgements = append(expAcknowledgements, &ack) } @@ -380,28 +380,28 @@ func (suite *KeeperTestSuite) TestQueryPacketAcknowledgements() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeperV2) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeperV2) res, err := queryServer.PacketAcknowledgements(ctx, req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expAcknowledgements, res.Acknowledgements) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expAcknowledgements, res.Acknowledgements) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestQueryPacketReceipt() { +func (s *KeeperTestSuite) TestQueryPacketReceipt() { var ( expReceipt bool path *ibctesting.Path @@ -416,10 +416,10 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() { { "success with receipt", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ClientID, 1) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ClientID, 1) expReceipt = true req = &types.QueryPacketReceiptRequest{ @@ -432,7 +432,7 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() { { "success with no receipt", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expReceipt = false @@ -473,28 +473,28 @@ func (suite *KeeperTestSuite) TestQueryPacketReceipt() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - queryServer := keeper.NewQueryServer(suite.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) - res, err := queryServer.PacketReceipt(suite.chainA.GetContext(), req) + queryServer := keeper.NewQueryServer(s.chainA.GetSimApp().IBCKeeper.ChannelKeeperV2) + res, err := queryServer.PacketReceipt(s.chainA.GetContext(), req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expReceipt, res.Received) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expReceipt, res.Received) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } } -func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { +func (s *KeeperTestSuite) TestQueryNextSequenceSend() { var ( req *types.QueryNextSequenceSendRequest expSeq uint64 @@ -508,12 +508,12 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { { "success", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.Setup() expSeq = 42 seq := uint64(42) - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetNextSequenceSend(suite.chainA.GetContext(), path.EndpointA.ClientID, seq) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetNextSequenceSend(s.chainA.GetContext(), path.EndpointA.ClientID, seq) req = types.NewQueryNextSequenceSendRequest(path.EndpointA.ClientID) }, nil, @@ -542,29 +542,29 @@ func (suite *KeeperTestSuite) TestQueryNextSequenceSend() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeperV2) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeperV2) res, err := queryServer.NextSequenceSend(ctx, req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.NextSequenceSend) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.NextSequenceSend) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } } -func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { +func (s *KeeperTestSuite) TestQueryUnreceivedPackets() { var ( expSeq []uint64 path *ibctesting.Path @@ -595,7 +595,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "invalid seq", func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() req = &types.QueryUnreceivedPacketsRequest{ @@ -608,7 +608,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success empty packet commitments", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expSeq = []uint64(nil) @@ -622,7 +622,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success unreceived packet commitments", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() // no ack exists @@ -638,10 +638,10 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "basic success unreceived packet commitments, nothing to relay", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ClientID, 1) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ClientID, 1) expSeq = []uint64(nil) req = &types.QueryUnreceivedPacketsRequest{ @@ -654,7 +654,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { { "success multiple unreceived packet commitments", func() { - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() expSeq = []uint64(nil) // reset packetCommitments := []uint64{} @@ -664,7 +664,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { packetCommitments = append(packetCommitments, seq) if seq%2 == 0 { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainA.GetContext(), path.EndpointA.ClientID, seq) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainA.GetContext(), path.EndpointA.ClientID, seq) } else { expSeq = append(expSeq, seq) } @@ -680,29 +680,29 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedPackets() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeperV2) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeperV2) res, err := queryServer.UnreceivedPackets(ctx, req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.Sequences) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.Sequences) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) } }) } } -func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { +func (s *KeeperTestSuite) TestQueryUnreceivedAcks() { var ( path *ibctesting.Path req *types.QueryUnreceivedAcksRequest @@ -728,7 +728,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { { "success: single unreceived packet ack", func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ClientID, 1, []byte("commitment")) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ClientID, 1, []byte("commitment")) expSeq = []uint64{1} req = &types.QueryUnreceivedAcksRequest{ @@ -749,7 +749,7 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { packetAcks = append(packetAcks, seq) if seq%2 == 0 { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), path.EndpointA.ClientID, seq, []byte("commitement")) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), path.EndpointA.ClientID, seq, []byte("commitement")) expSeq = append(expSeq, seq) } } @@ -790,25 +790,25 @@ func (suite *KeeperTestSuite) TestQueryUnreceivedAcks() { } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() tc.malleate() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - queryServer := keeper.NewQueryServer(suite.chainA.App.GetIBCKeeper().ChannelKeeperV2) + queryServer := keeper.NewQueryServer(s.chainA.App.GetIBCKeeper().ChannelKeeperV2) res, err := queryServer.UnreceivedAcks(ctx, req) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(expSeq, res.Sequences) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(expSeq, res.Sequences) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } diff --git a/modules/core/04-channel/v2/keeper/keeper.go b/modules/core/04-channel/v2/keeper/keeper.go index ada78fd62a2..b00767f5d80 100644 --- a/modules/core/04-channel/v2/keeper/keeper.go +++ b/modules/core/04-channel/v2/keeper/keeper.go @@ -13,7 +13,6 @@ import ( clientv2keeper "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/keeper" connectionkeeper "github.com/cosmos/ibc-go/v10/modules/core/03-connection/keeper" - channelkeeperv1 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/keeper" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" "github.com/cosmos/ibc-go/v10/modules/core/api" @@ -28,8 +27,6 @@ type Keeper struct { // clientV2Keeper is used for counterparty access. clientV2Keeper *clientv2keeper.Keeper - // channelKeeperV1 is used for channel aliasing only. - channelKeeperV1 *channelkeeperv1.Keeper connectionKeeper *connectionkeeper.Keeper // Router is used to route messages to the appropriate module callbacks @@ -43,13 +40,11 @@ func NewKeeper( storeService corestore.KVStoreService, clientKeeper types.ClientKeeper, clientV2Keeper *clientv2keeper.Keeper, - channelKeeperV1 *channelkeeperv1.Keeper, connectionKeeper *connectionkeeper.Keeper, ) *Keeper { return &Keeper{ storeService: storeService, cdc: cdc, - channelKeeperV1: channelKeeperV1, clientV2Keeper: clientV2Keeper, connectionKeeper: connectionKeeper, ClientKeeper: clientKeeper, @@ -57,7 +52,7 @@ func NewKeeper( } // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+exported.ModuleName+"/"+types.SubModuleName) } @@ -261,3 +256,24 @@ func (k *Keeper) getAllPacketStateForClient(ctx sdk.Context, clientID string, pr } return packets } + +// SetClientForAlias sets the base client ID under the alias key for an aliased channelID. +func (k *Keeper) SetClientForAlias(ctx sdk.Context, alias string, baseClientID string) { + store := k.storeService.OpenKVStore(ctx) + if err := store.Set(types.AliasKey(alias), []byte(baseClientID)); err != nil { + panic(err) + } +} + +// GetClientForAlias get the base client ID under the alias key for an aliased channelID. +func (k *Keeper) GetClientForAlias(ctx sdk.Context, alias string) (string, bool) { + store := k.storeService.OpenKVStore(ctx) + bz, err := store.Get(types.AliasKey(alias)) + if err != nil { + panic(err) + } + if len(bz) == 0 { + return "", false + } + return string(bz), true +} diff --git a/modules/core/04-channel/v2/keeper/keeper_test.go b/modules/core/04-channel/v2/keeper/keeper_test.go index 999cd73d117..9f5b327c044 100644 --- a/modules/core/04-channel/v2/keeper/keeper_test.go +++ b/modules/core/04-channel/v2/keeper/keeper_test.go @@ -23,9 +23,9 @@ type KeeperTestSuite struct { chainC *ibctesting.TestChain } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } diff --git a/modules/core/04-channel/v2/keeper/msg_server.go b/modules/core/04-channel/v2/keeper/msg_server.go index 38a765fbdd9..4659a264e7a 100644 --- a/modules/core/04-channel/v2/keeper/msg_server.go +++ b/modules/core/04-channel/v2/keeper/msg_server.go @@ -3,6 +3,7 @@ package keeper import ( "bytes" "context" + "errors" errorsmod "cosmossdk.io/errors" @@ -66,11 +67,10 @@ func (k *Keeper) RecvPacket(goCtx context.Context, msg *types.MsgRecvPacket) (*t cacheCtx, writeFn := ctx.CacheContext() err = k.recvPacket(cacheCtx, msg.Packet, msg.ProofCommitment, msg.ProofHeight) - switch err { - case nil: + switch { + case err == nil: writeFn() - case types.ErrNoOpMsg: - // no-ops do not need event emission as they will be ignored + case errors.Is(err, types.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "source-client", msg.Packet.SourceClient) return &types.MsgRecvPacketResponse{Result: types.NOOP}, nil default: @@ -86,21 +86,10 @@ func (k *Keeper) RecvPacket(goCtx context.Context, msg *types.MsgRecvPacket) (*t var isAsync bool isSuccess := true for _, pd := range msg.Packet.Payloads { - // Cache context so that we may discard state changes from callback if the acknowledgement is unsuccessful. - cacheCtx, writeFn = ctx.CacheContext() cb := k.Router.Route(pd.DestinationPort) res := cb.OnRecvPacket(cacheCtx, msg.Packet.SourceClient, msg.Packet.DestinationClient, msg.Packet.Sequence, pd, signer) - if res.Status != types.PacketStatus_Failure { - // successful app acknowledgement cannot equal sentinel error acknowledgement - if bytes.Equal(res.GetAcknowledgement(), types.ErrorAcknowledgement[:]) { - return nil, errorsmod.Wrapf(types.ErrInvalidAcknowledgement, "application acknowledgement cannot be sentinel error acknowledgement") - } - // write application state changes for asynchronous and successful acknowledgements - writeFn() - // append app acknowledgement to the overall acknowledgement - ack.AppAcknowledgements = append(ack.AppAcknowledgements, res.Acknowledgement) - } else { + if res.Status == types.PacketStatus_Failure { isSuccess = false // construct acknowledgement with single app acknowledgement that is the sentinel error acknowledgement ack = types.Acknowledgement{ @@ -111,6 +100,13 @@ func (k *Keeper) RecvPacket(goCtx context.Context, msg *types.MsgRecvPacket) (*t break } + // successful app acknowledgement cannot equal sentinel error acknowledgement + if bytes.Equal(res.GetAcknowledgement(), types.ErrorAcknowledgement[:]) { + return nil, errorsmod.Wrapf(types.ErrInvalidAcknowledgement, "application acknowledgement cannot be sentinel error acknowledgement") + } + // append app acknowledgement to the overall acknowledgement + ack.AppAcknowledgements = append(ack.AppAcknowledgements, res.Acknowledgement) + if res.Status == types.PacketStatus_Async { // Set packet acknowledgement to async if any of the acknowledgements are async. isAsync = true @@ -122,18 +118,19 @@ func (k *Keeper) RecvPacket(goCtx context.Context, msg *types.MsgRecvPacket) (*t } } + // write application state changes for asynchronous and successful acknowledgements + // if any application returns a failure, then we discard all state changes + // to ensure an atomic execution of all payloads + if isSuccess { + writeFn() + } + if !isAsync { - // If the application callback was successful, the acknowledgement must have the same number of app acknowledgements as the packet payloads. - if isSuccess { - if len(ack.AppAcknowledgements) != len(msg.Packet.Payloads) { - return nil, errorsmod.Wrapf(types.ErrInvalidAcknowledgement, "length of app acknowledgement %d does not match length of app payload %d", len(ack.AppAcknowledgements), len(msg.Packet.Payloads)) - } + // sanity check to ensure returned acknowledgement and calculated isSuccess boolean matches + if ack.Success() != isSuccess { + panic("acknowledgement success does not match isSuccess") } - // Validate ack before forwarding to WriteAcknowledgement. - if err := ack.Validate(); err != nil { - return nil, err - } // Set packet acknowledgement only if the acknowledgement is not async. // NOTE: IBC applications modules may call the WriteAcknowledgement asynchronously if the // acknowledgement is async. @@ -170,10 +167,10 @@ func (k *Keeper) Acknowledgement(goCtx context.Context, msg *types.MsgAcknowledg cacheCtx, writeFn := ctx.CacheContext() err = k.acknowledgePacket(cacheCtx, msg.Packet, msg.Acknowledgement, msg.ProofAcked, msg.ProofHeight) - switch err { - case nil: + switch { + case err == nil: writeFn() - case types.ErrNoOpMsg: + case errors.Is(err, types.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "source-client", msg.Packet.SourceClient) return &types.MsgAcknowledgementResponse{Result: types.NOOP}, nil default: @@ -224,10 +221,10 @@ func (k *Keeper) Timeout(goCtx context.Context, timeout *types.MsgTimeout) (*typ cacheCtx, writeFn := ctx.CacheContext() err = k.timeoutPacket(cacheCtx, timeout.Packet, timeout.ProofUnreceived, timeout.ProofHeight) - switch err { - case nil: + switch { + case err == nil: writeFn() - case types.ErrNoOpMsg: + case errors.Is(err, types.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "source-client", timeout.Packet.SourceClient) return &types.MsgTimeoutResponse{Result: types.NOOP}, nil default: diff --git a/modules/core/04-channel/v2/keeper/msg_server_test.go b/modules/core/04-channel/v2/keeper/msg_server_test.go index f853e96ec49..7df4d9aebc8 100644 --- a/modules/core/04-channel/v2/keeper/msg_server_test.go +++ b/modules/core/04-channel/v2/keeper/msg_server_test.go @@ -1,6 +1,7 @@ package keeper_test import ( + "bytes" "errors" "time" @@ -12,16 +13,16 @@ import ( commitmenttypes "github.com/cosmos/ibc-go/v10/modules/core/23-commitment/types" ibcerrors "github.com/cosmos/ibc-go/v10/modules/core/errors" ibctesting "github.com/cosmos/ibc-go/v10/testing" - "github.com/cosmos/ibc-go/v10/testing/mock" + mockv1 "github.com/cosmos/ibc-go/v10/testing/mock" mockv2 "github.com/cosmos/ibc-go/v10/testing/mock/v2" ) -func (suite *KeeperTestSuite) TestMsgSendPacket() { +func (s *KeeperTestSuite) TestMsgSendPacket() { var ( path *ibctesting.Path expectedPacket types.Packet timeoutTimestamp uint64 - payload types.Payload + payloads []types.Payload ) testCases := []struct { @@ -34,12 +35,19 @@ func (suite *KeeperTestSuite) TestMsgSendPacket() { malleate: func() {}, expError: nil, }, + { + name: "success multiple payloads", + malleate: func() { + payloads = append(payloads, payloads[0]) + }, + expError: nil, + }, { name: "success: valid timeout timestamp", malleate: func() { // ensure a message timeout. - timeoutTimestamp = uint64(suite.chainA.GetContext().BlockTime().Add(types.MaxTimeoutDelta - 10*time.Second).Unix()) - expectedPacket = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, timeoutTimestamp, payload) + timeoutTimestamp = uint64(s.chainA.GetContext().BlockTime().Add(types.MaxTimeoutDelta - 10*time.Second).Unix()) + expectedPacket = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, timeoutTimestamp, payloads...) }, expError: nil, }, @@ -55,7 +63,7 @@ func (suite *KeeperTestSuite) TestMsgSendPacket() { name: "failure: timeout timestamp exceeds max allowed input", malleate: func() { // ensure message timeout exceeds max allowed input. - timeoutTimestamp = uint64(suite.chainA.GetContext().BlockTime().Add(types.MaxTimeoutDelta + 10*time.Second).Unix()) + timeoutTimestamp = uint64(s.chainA.GetContext().BlockTime().Add(types.MaxTimeoutDelta + 10*time.Second).Unix()) }, expError: types.ErrInvalidTimeout, }, @@ -63,7 +71,7 @@ func (suite *KeeperTestSuite) TestMsgSendPacket() { name: "failure: timeout timestamp less than current block timestamp", malleate: func() { // ensure message timeout exceeds max allowed input. - timeoutTimestamp = uint64(suite.chainA.GetContext().BlockTime().Unix()) - 1 + timeoutTimestamp = uint64(s.chainA.GetContext().BlockTime().Unix()) - 1 }, expError: types.ErrTimeoutElapsed, }, @@ -78,10 +86,23 @@ func (suite *KeeperTestSuite) TestMsgSendPacket() { name: "failure: application callback error", malleate: func() { path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnSendPacket = func(ctx sdk.Context, sourceID string, destinationID string, sequence uint64, data types.Payload, signer sdk.AccAddress) error { - return mock.MockApplicationCallbackError + return mockv1.MockApplicationCallbackError + } + }, + expError: mockv1.MockApplicationCallbackError, + }, + { + name: "failure: multiple payload application callback error", + malleate: func() { + path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnSendPacket = func(ctx sdk.Context, sourceID string, destinationID string, sequence uint64, data types.Payload, signer sdk.AccAddress) error { + if bytes.Equal(mockv1.MockFailPacketData, data.Value) { + return mockv1.MockApplicationCallbackError + } + return nil } + payloads = append(payloads, mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)) }, - expError: mock.MockApplicationCallbackError, + expError: mockv1.MockApplicationCallbackError, }, { name: "failure: client not found", @@ -93,123 +114,160 @@ func (suite *KeeperTestSuite) TestMsgSendPacket() { { name: "failure: route to non existing app", malleate: func() { - payload.SourcePort = "foo" + payloads[0].SourcePort = "foo" }, expError: errors.New("no route for foo"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - timeoutTimestamp = suite.chainA.GetTimeoutTimestampSecs() - payload = mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) - - expectedPacket = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, timeoutTimestamp, payload) + timeoutTimestamp = s.chainA.GetTimeoutTimestampSecs() + payloads = []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)} tc.malleate() - packet, err := path.EndpointA.MsgSendPacket(timeoutTimestamp, payload) + expectedPacket = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, timeoutTimestamp, payloads...) + packet, err := path.EndpointA.MsgSendPacket(timeoutTimestamp, payloads...) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) - suite.Require().NotEmpty(packet) + s.Require().NoError(err) + s.Require().NotEmpty(packet) ck := path.EndpointA.Chain.GetSimApp().IBCKeeper.ChannelKeeperV2 packetCommitment := ck.GetPacketCommitment(path.EndpointA.Chain.GetContext(), path.EndpointA.ClientID, 1) - suite.Require().NotNil(packetCommitment) - suite.Require().Equal(types.CommitPacket(expectedPacket), packetCommitment, "packet commitment is not stored correctly") + s.Require().NotNil(packetCommitment) + s.Require().Equal(types.CommitPacket(expectedPacket), packetCommitment, "packet commitment is not stored correctly") nextSequenceSend, ok := ck.GetNextSequenceSend(path.EndpointA.Chain.GetContext(), path.EndpointA.ClientID) - suite.Require().True(ok) - suite.Require().Equal(uint64(2), nextSequenceSend, "next sequence send was not incremented correctly") - - suite.Require().Equal(expectedPacket, packet) + s.Require().True(ok) + s.Require().Equal(uint64(2), nextSequenceSend, "next sequence send was not incremented correctly") + s.Require().Equal(expectedPacket, packet) } else { - suite.Require().Error(err) - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expError) + s.Require().Error(err) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestMsgRecvPacket() { +func (s *KeeperTestSuite) TestMsgRecvPacket() { var ( - path *ibctesting.Path - packet types.Packet - expRecvRes types.RecvPacketResult + path *ibctesting.Path + packet types.Packet + expAck types.Acknowledgement ) testCases := []struct { name string + payloads []types.Payload malleate func() expError error expAckWritten bool }{ { name: "success", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() {}, expError: nil, expAckWritten: true, }, { - name: "success: failed recv result", + name: "success: error ack", + payloads: []types.Payload{mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() { - expRecvRes = types.RecvPacketResult{ - Status: types.PacketStatus_Failure, + expAck = types.Acknowledgement{ + AppAcknowledgements: [][]byte{types.ErrorAcknowledgement[:]}, } }, expError: nil, expAckWritten: true, }, { - name: "success: async recv result", + name: "success: async recv result", + payloads: []types.Payload{mockv2.NewAsyncMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, + malleate: func() {}, + expError: nil, + expAckWritten: false, + }, + { + name: "success: NoOp", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() { - expRecvRes = types.RecvPacketResult{ - Status: types.PacketStatus_Async, - } + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) }, expError: nil, expAckWritten: false, }, { - name: "success: NoOp", + name: "success: multiple payloads", + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + }, malleate: func() { - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + expAck = types.Acknowledgement{ + AppAcknowledgements: [][]byte{ + mockv2.MockRecvPacketResult.Acknowledgement, + mockv2.MockRecvPacketResult.Acknowledgement, + }, + } }, expError: nil, - expAckWritten: false, + expAckWritten: true, }, { - name: "success: receive permissioned with msg sender", + name: "success: multiple payloads with error ack", + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + }, malleate: func() { - creator := suite.chainB.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointB.ClientID, creator.String(), clientv2types.NewConfig(suite.chainA.SenderAccount.GetAddress().String(), creator.String())) - _, err := suite.chainB.App.GetIBCKeeper().UpdateClientConfig(suite.chainB.GetContext(), msg) - suite.Require().NoError(err) + expAck = types.Acknowledgement{ + AppAcknowledgements: [][]byte{ + types.ErrorAcknowledgement[:], + }, + } }, expError: nil, expAckWritten: true, }, { - name: "failure: relayer not permissioned", + name: "success: receive permissioned with msg sender", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, + + malleate: func() { + creator := s.chainB.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointB.ClientID, creator.String(), clientv2types.NewConfig(s.chainA.SenderAccount.GetAddress().String(), creator.String())) + _, err := s.chainB.App.GetIBCKeeper().UpdateClientConfig(s.chainB.GetContext(), msg) + s.Require().NoError(err) + }, + expError: nil, + expAckWritten: true, + }, + { + name: "failure: relayer not permissioned", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() { - creator := suite.chainB.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointB.ClientID, creator.String(), clientv2types.NewConfig(suite.chainA.SenderAccount.GetAddress().String())) - _, err := suite.chainB.App.GetIBCKeeper().UpdateClientConfig(suite.chainB.GetContext(), msg) - suite.Require().NoError(err) + creator := s.chainB.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointB.ClientID, creator.String(), clientv2types.NewConfig(s.chainA.SenderAccount.GetAddress().String())) + _, err := s.chainB.App.GetIBCKeeper().UpdateClientConfig(s.chainB.GetContext(), msg) + s.Require().NoError(err) }, expError: ibcerrors.ErrUnauthorized, }, { - name: "failure: counterparty not found", + name: "failure: counterparty not found", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() { // change the destination id to a non-existent channel. packet.DestinationClient = ibctesting.InvalidID @@ -217,7 +275,8 @@ func (suite *KeeperTestSuite) TestMsgRecvPacket() { expError: clientv2types.ErrCounterpartyNotFound, }, { - name: "failure: invalid proof", + name: "failure: invalid proof", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() { // proof verification fails because the packet commitment is different due to a different sequence. packet.Sequence = 10 @@ -225,47 +284,50 @@ func (suite *KeeperTestSuite) TestMsgRecvPacket() { expError: commitmenttypes.ErrInvalidProof, }, { - name: "failure: invalid acknowledgement", + name: "failure: invalid acknowledgement", + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, malleate: func() { - expRecvRes = types.RecvPacketResult{ - Status: types.PacketStatus_Success, - Acknowledgement: []byte(""), + // modify the callback to return the expected recv result. + path.EndpointB.Chain.GetSimApp().MockModuleV2B.IBCApp.OnRecvPacket = func(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, data types.Payload, relayer sdk.AccAddress) types.RecvPacketResult { + return types.RecvPacketResult{ + Status: types.PacketStatus_Success, + Acknowledgement: []byte(""), + } } }, expError: types.ErrInvalidAcknowledgement, }, + { + name: "failure: async payload with other payloads", + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewAsyncMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + }, + malleate: func() {}, + expError: types.ErrInvalidPacket, + expAckWritten: false, + }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - timeoutTimestamp := suite.chainA.GetTimeoutTimestampSecs() + timeoutTimestamp := s.chainA.GetTimeoutTimestampSecs() var err error - packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)) - suite.Require().NoError(err) - - // default expected receive result is a single successful recv result for moduleB. - expRecvRes = mockv2.MockRecvPacketResult - - tc.malleate() + packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, tc.payloads...) + s.Require().NoError(err) - // expectedAck is derived from the expected recv result. - var expectedAck types.Acknowledgement - if expRecvRes.Status == types.PacketStatus_Success { - expectedAck = types.Acknowledgement{AppAcknowledgements: [][]byte{expRecvRes.Acknowledgement}} - } else { - expectedAck = types.Acknowledgement{AppAcknowledgements: [][]byte{types.ErrorAcknowledgement[:]}} + // default expected acknowledgement is a single successful acknowledgement for moduleB. + expAck = types.Acknowledgement{ + AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement}, } - // modify the callback to return the expected recv result. - path.EndpointB.Chain.GetSimApp().MockModuleV2B.IBCApp.OnRecvPacket = func(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, data types.Payload, relayer sdk.AccAddress) types.RecvPacketResult { - return expRecvRes - } + tc.malleate() // err is checking under expPass err = path.EndpointB.MsgRecvPacket(packet) @@ -273,36 +335,35 @@ func (suite *KeeperTestSuite) TestMsgRecvPacket() { expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) // packet receipt should be written _, ok := ck.GetPacketReceipt(path.EndpointB.Chain.GetContext(), packet.DestinationClient, packet.Sequence) - suite.Require().True(ok) + s.Require().True(ok) ackWritten := ck.HasPacketAcknowledgement(path.EndpointB.Chain.GetContext(), packet.DestinationClient, packet.Sequence) if !tc.expAckWritten { // ack should not be written for async app or if the packet receipt was already present. - suite.Require().False(ackWritten) + s.Require().False(ackWritten) } else { // successful or failed acknowledgement // ack should be written for synchronous app (default mock application behaviour). - suite.Require().True(ackWritten) - expectedBz := types.CommitAcknowledgement(expectedAck) + s.Require().True(ackWritten) + expectedBz := types.CommitAcknowledgement(expAck) actualAckBz := ck.GetPacketAcknowledgement(path.EndpointB.Chain.GetContext(), packet.DestinationClient, packet.Sequence) - suite.Require().Equal(expectedBz, actualAckBz) + s.Require().Equal(expectedBz, actualAckBz) } - } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expError) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expError) _, ok := ck.GetPacketReceipt(path.EndpointB.Chain.GetContext(), packet.SourceClient, packet.Sequence) - suite.Require().False(ok) + s.Require().False(ok) } }) } } -func (suite *KeeperTestSuite) TestMsgAcknowledgement() { +func (s *KeeperTestSuite) TestMsgAcknowledgement() { var ( path *ibctesting.Path packet types.Packet @@ -311,64 +372,119 @@ func (suite *KeeperTestSuite) TestMsgAcknowledgement() { testCases := []struct { name string malleate func() - payload types.Payload + payloads []types.Payload expError error }{ { name: "success", malleate: func() {}, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, }, { name: "success: NoOp", malleate: func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.DeletePacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.DeletePacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence) // Modify the callback to return an error. // This way, we can verify that the callback is not executed in a No-op case. path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnAcknowledgementPacket = func(sdk.Context, string, string, uint64, types.Payload, []byte, sdk.AccAddress) error { - return mock.MockApplicationCallbackError + return mockv1.MockApplicationCallbackError } }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, }, { name: "success: failed ack result", malleate: func() { ack.AppAcknowledgements[0] = types.ErrorAcknowledgement[:] }, - payload: mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, + }, + { + name: "success: multiple payloads", + malleate: func() { + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{ + mockv2.MockRecvPacketResult.Acknowledgement, + mockv2.MockRecvPacketResult.Acknowledgement, + }, + } + }, + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + }, + }, + { + name: "success: multiple payloads with error ack", + malleate: func() { + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{ + types.ErrorAcknowledgement[:], + }, + } + }, + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + }, }, { name: "success: relayer permissioned with msg sender", malleate: func() { - creator := suite.chainA.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(suite.chainB.SenderAccount.GetAddress().String(), creator.String())) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + creator := s.chainA.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(s.chainB.SenderAccount.GetAddress().String(), creator.String())) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) + s.Require().NoError(err) }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, }, { name: "failure: relayer not permissioned", malleate: func() { - creator := suite.chainA.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(suite.chainB.SenderAccount.GetAddress().String())) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + creator := s.chainA.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(s.chainB.SenderAccount.GetAddress().String())) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) + s.Require().NoError(err) }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: ibcerrors.ErrUnauthorized, }, { name: "failure: callback fails", malleate: func() { path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnAcknowledgementPacket = func(sdk.Context, string, string, uint64, types.Payload, []byte, sdk.AccAddress) error { - return mock.MockApplicationCallbackError + return mockv1.MockApplicationCallbackError } }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), - expError: mock.MockApplicationCallbackError, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, + expError: mockv1.MockApplicationCallbackError, + }, + { + name: "failure: callback fails on one of the multiple payloads", + malleate: func() { + // create custom callback that fails on one of the payloads in the test case. + path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnAcknowledgementPacket = func(ctx sdk.Context, sourceClient string, destinationClient string, sequence uint64, data types.Payload, acknowledgement []byte, relayer sdk.AccAddress) error { + if data.DestinationPort == mockv2.ModuleNameB { + return mockv1.MockApplicationCallbackError + } + return nil + } + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{ + mockv2.MockRecvPacketResult.Acknowledgement, + mockv2.MockRecvPacketResult.Acknowledgement, + mockv2.MockRecvPacketResult.Acknowledgement, // this one will not be processed + }, + } + }, + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameA), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameA), + }, + expError: mockv1.MockApplicationCallbackError, }, { name: "failure: counterparty not found", @@ -376,42 +492,43 @@ func (suite *KeeperTestSuite) TestMsgAcknowledgement() { // change the source id to a non-existent channel. packet.SourceClient = "not-existent-channel" }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: clientv2types.ErrCounterpartyNotFound, }, { name: "failure: invalid commitment", malleate: func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence, []byte("foo")) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence, []byte("foo")) }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: types.ErrInvalidPacket, }, { name: "failure: failed membership verification", malleate: func() { - ack.AppAcknowledgements[0] = mock.MockFailPacketData + ack.AppAcknowledgements[0] = mockv1.MockFailPacketData }, - payload: mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: errors.New("failed packet acknowledgement verification"), }, } + for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - timeoutTimestamp := suite.chainA.GetTimeoutTimestampSecs() + timeoutTimestamp := s.chainA.GetTimeoutTimestampSecs() var err error // Send packet from A to B - packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, tc.payload) - suite.Require().NoError(err) + packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, tc.payloads...) + s.Require().NoError(err) err = path.EndpointB.MsgRecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) // Construct expected acknowledgement ack = types.Acknowledgement{AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement}} @@ -423,15 +540,15 @@ func (suite *KeeperTestSuite) TestMsgAcknowledgement() { expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expError, "expected error %q, got %q instead", tc.expError, err) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expError, "expected error %q, got %q instead", tc.expError, err) } }) } } -func (suite *KeeperTestSuite) TestMsgTimeout() { +func (s *KeeperTestSuite) TestMsgTimeout() { var ( path *ibctesting.Path packet types.Packet @@ -440,103 +557,142 @@ func (suite *KeeperTestSuite) TestMsgTimeout() { testCases := []struct { name string malleate func() + payloads []types.Payload expError error }{ { name: "success", malleate: func() { - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, }, { name: "success: no-op", malleate: func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.DeletePacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.DeletePacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence) // Modify the callback to return a different error. // This way, we can verify that the callback is not executed in a No-op case. path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnTimeoutPacket = func(sdk.Context, string, string, uint64, types.Payload, sdk.AccAddress) error { - return mock.MockApplicationCallbackError + return mockv1.MockApplicationCallbackError } - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) + }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, + }, + { + name: "success: multiple payloads", + malleate: func() { + s.Require().NoError(path.EndpointA.UpdateClient()) + }, + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), }, + expError: nil, }, { name: "success: relayer permissioned with msg sender", malleate: func() { - creator := suite.chainA.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(suite.chainB.SenderAccount.GetAddress().String(), creator.String())) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) - suite.Require().NoError(path.EndpointA.UpdateClient()) + creator := s.chainA.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(s.chainB.SenderAccount.GetAddress().String(), creator.String())) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) + s.Require().NoError(err) + s.Require().NoError(path.EndpointA.UpdateClient()) }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, }, { name: "failure: relayer not permissioned", malleate: func() { // update first before permissioning the relayer in this case - suite.Require().NoError(path.EndpointA.UpdateClient()) - creator := suite.chainA.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(suite.chainB.SenderAccount.GetAddress().String())) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + s.Require().NoError(path.EndpointA.UpdateClient()) + creator := s.chainA.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(s.chainB.SenderAccount.GetAddress().String())) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) + s.Require().NoError(err) }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: ibcerrors.ErrUnauthorized, }, { name: "failure: callback fails", malleate: func() { path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnTimeoutPacket = func(sdk.Context, string, string, uint64, types.Payload, sdk.AccAddress) error { - return mock.MockApplicationCallbackError + return mockv1.MockApplicationCallbackError + } + s.Require().NoError(path.EndpointA.UpdateClient()) + }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, + expError: mockv1.MockApplicationCallbackError, + }, + { + name: "failure: callback fails on one of the multiple payloads", + malleate: func() { + // create custom callback that fails on one of the payloads in the test case. + path.EndpointA.Chain.GetSimApp().MockModuleV2A.IBCApp.OnTimeoutPacket = func(ctx sdk.Context, sourceChannel string, destinationChannel string, sequence uint64, data types.Payload, relayer sdk.AccAddress) error { + if bytes.Equal(mockv1.MockFailPacketData, data.Value) { + return mockv1.MockApplicationCallbackError + } + return nil } - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) }, - expError: mock.MockApplicationCallbackError, + payloads: []types.Payload{ + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewErrorMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB), + }, + expError: mockv1.MockApplicationCallbackError, }, { name: "failure: client not found", malleate: func() { // change the source id to a non-existent client. packet.SourceClient = "not-existent-client" - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: clientv2types.ErrCounterpartyNotFound, }, { name: "failure: invalid commitment", malleate: func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence, []byte("foo")) - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence, []byte("foo")) + s.Require().NoError(path.EndpointA.UpdateClient()) }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: types.ErrInvalidPacket, }, { name: "failure: unable to timeout if packet has been received", malleate: func() { - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) - suite.Require().NoError(path.EndpointB.UpdateClient()) - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.Require().NoError(path.EndpointB.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) }, + payloads: []types.Payload{mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)}, expError: commitmenttypes.ErrInvalidProof, }, } + for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() // Send packet from A to B // make timeoutTimestamp 1 second more than sending chain time to ensure it passes SendPacket // and times out successfully after update - timeoutTimestamp := uint64(suite.chainA.GetContext().BlockTime().Add(time.Second).Unix()) - mockData := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + timeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Second).Unix()) var err error - packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, mockData) - suite.Require().NoError(err) - suite.Require().NotEmpty(packet) + packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, tc.payloads...) + s.Require().NoError(err) + s.Require().NotEmpty(packet) tc.malleate() @@ -544,9 +700,9 @@ func (suite *KeeperTestSuite) TestMsgTimeout() { expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - ibctesting.RequireErrorIsOrContains(suite.T(), err, tc.expError, "expected error %q, got %q instead", tc.expError, err) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expError, "expected error %q, got %q instead", tc.expError, err) } }) } diff --git a/modules/core/04-channel/v2/keeper/packet.go b/modules/core/04-channel/v2/keeper/packet.go index ddf14733472..06a39cdfd52 100644 --- a/modules/core/04-channel/v2/keeper/packet.go +++ b/modules/core/04-channel/v2/keeper/packet.go @@ -24,7 +24,9 @@ func (k *Keeper) sendPacket( timeoutTimestamp uint64, payloads []types.Payload, ) (uint64, string, error) { - // lookup counterparty from client identifiers + // lookup counterparty from packet identifiers + // note this will be either the client identifier for IBC V2 paths + // or an aliased channel identifier for IBC V1 paths counterparty, ok := k.clientV2Keeper.GetClientCounterparty(ctx, sourceClient) if !ok { return 0, "", errorsmod.Wrapf(clientv2types.ErrCounterpartyNotFound, "counterparty not found for client: %s", sourceClient) @@ -54,21 +56,27 @@ func (k *Keeper) sendPacket( return 0, "", errorsmod.Wrapf(types.ErrInvalidPacket, "constructed packet failed basic validation: %v", err) } + // Before we do client keeper level checks, we first get underlying base clientID + clientID := packet.SourceClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { + clientID = underlyingClientID + } + // check that the client of counterparty chain is still active - if status := k.ClientKeeper.GetClientStatus(ctx, sourceClient); status != exported.Active { - return 0, "", errorsmod.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", sourceClient, status) + if status := k.ClientKeeper.GetClientStatus(ctx, clientID); status != exported.Active { + return 0, "", errorsmod.Wrapf(clienttypes.ErrClientNotActive, "client (%s) status is %s", clientID, status) } // retrieve latest height and timestamp of the client of counterparty chain - latestHeight := k.ClientKeeper.GetClientLatestHeight(ctx, sourceClient) + latestHeight := k.ClientKeeper.GetClientLatestHeight(ctx, clientID) if latestHeight.IsZero() { - return 0, "", errorsmod.Wrapf(clienttypes.ErrInvalidHeight, "cannot send packet using client (%s) with zero height", sourceClient) + return 0, "", errorsmod.Wrapf(clienttypes.ErrInvalidHeight, "cannot send packet using client (%s) with zero height", clientID) } // client timestamps are in nanoseconds while packet timeouts are in seconds // thus to compare them, we convert the client timestamp to seconds in uint64 // to be consistent with IBC V2 specified timeout behaviour - latestTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, sourceClient, latestHeight) + latestTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, clientID, latestHeight) if err != nil { return 0, "", err } @@ -105,7 +113,9 @@ func (k *Keeper) recvPacket( proof []byte, proofHeight exported.Height, ) error { - // lookup counterparty from client identifiers + // lookup counterparty from packet identifiers + // note this will be either the client identifier for IBC V2 paths + // or an aliased channel identifier for IBC V1 paths counterparty, ok := k.clientV2Keeper.GetClientCounterparty(ctx, packet.DestinationClient) if !ok { return errorsmod.Wrapf(clientv2types.ErrCounterpartyNotFound, "counterparty not found for client: %s", packet.DestinationClient) @@ -135,16 +145,22 @@ func (k *Keeper) recvPacket( commitment := types.CommitPacket(packet) + // Before we do client keeper level checks, we first get underlying base clientID + clientID := packet.DestinationClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.DestinationClient); isAlias { + clientID = underlyingClientID + } + if err := k.ClientKeeper.VerifyMembership( ctx, - packet.DestinationClient, + clientID, proofHeight, 0, 0, proof, merklePath, commitment, ); err != nil { - return errorsmod.Wrapf(err, "failed packet commitment verification for client (%s)", packet.DestinationClient) + return errorsmod.Wrapf(err, "failed packet commitment verification for client (%s)", clientID) } // Set Packet Receipt to prevent timeout from occurring on counterparty @@ -159,12 +175,27 @@ func (k *Keeper) recvPacket( // writeAcknowledgement writes the acknowledgement to the store and emits the packet and acknowledgement // for relayers to relay the acknowledgement to the counterparty chain. -func (k Keeper) writeAcknowledgement( +func (k *Keeper) writeAcknowledgement( ctx sdk.Context, packet types.Packet, ack types.Acknowledgement, ) error { - // lookup counterparty from client identifiers + // Validate the acknowledgement + if err := ack.Validate(); err != nil { + ctx.Logger().Error("write acknowledgement failed", "error", errorsmod.Wrap(err, "invalid acknowledgement")) + return errorsmod.Wrap(err, "invalid acknowledgement") + } + + // Validate the acknowledgement against the payload length + if ack.Success() { + if len(ack.AppAcknowledgements) != len(packet.Payloads) { + return errorsmod.Wrapf(types.ErrInvalidAcknowledgement, "length of app acknowledgement %d does not match length of app payload %d", len(ack.AppAcknowledgements), len(packet.Payloads)) + } + } + + // lookup counterparty from packet identifiers + // note this will be either the client identifier for IBC V2 paths + // or an aliased channel identifier for IBC V1 paths counterparty, ok := k.clientV2Keeper.GetClientCounterparty(ctx, packet.DestinationClient) if !ok { return errorsmod.Wrapf(clientv2types.ErrCounterpartyNotFound, "counterparty not found for client: %s", packet.DestinationClient) @@ -201,12 +232,7 @@ func (k Keeper) writeAcknowledgement( // WriteAcknowledgement writes the acknowledgement and emits events for asynchronous acknowledgements // this is the method to be called by external apps when they want to write an acknowledgement asyncrhonously func (k *Keeper) WriteAcknowledgement(ctx sdk.Context, clientID string, sequence uint64, ack types.Acknowledgement) error { - // Validate the acknowledgement - if err := ack.Validate(); err != nil { - ctx.Logger().Error("write acknowledgement failed", "error", errorsmod.Wrap(err, "invalid acknowledgement")) - return errorsmod.Wrap(err, "invalid acknowledgement") - } - + // get saved async packet from store packet, ok := k.GetAsyncPacket(ctx, clientID, sequence) if !ok { return errorsmod.Wrapf(types.ErrInvalidAcknowledgement, "packet with clientID (%s) and sequence (%d) not found for async acknowledgement", clientID, sequence) @@ -225,7 +251,9 @@ func (k *Keeper) WriteAcknowledgement(ctx sdk.Context, clientID string, sequence } func (k *Keeper) acknowledgePacket(ctx sdk.Context, packet types.Packet, acknowledgement types.Acknowledgement, proof []byte, proofHeight exported.Height) error { - // lookup counterparty from client identifiers + // lookup counterparty from packet identifiers + // note this will be either the client identifier for IBC V2 paths + // or an aliased channel identifier for IBC V1 paths counterparty, ok := k.clientV2Keeper.GetClientCounterparty(ctx, packet.SourceClient) if !ok { return errorsmod.Wrapf(clientv2types.ErrCounterpartyNotFound, "counterparty not found for client: %s", packet.SourceClient) @@ -254,16 +282,22 @@ func (k *Keeper) acknowledgePacket(ctx sdk.Context, packet types.Packet, acknowl path := hostv2.PacketAcknowledgementKey(packet.DestinationClient, packet.Sequence) merklePath := types.BuildMerklePath(counterparty.MerklePrefix, path) + // Before we do client keeper level checks, we first get underlying base clientID + clientID := packet.SourceClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { + clientID = underlyingClientID + } + if err := k.ClientKeeper.VerifyMembership( ctx, - packet.SourceClient, + clientID, proofHeight, 0, 0, proof, merklePath, types.CommitAcknowledgement(acknowledgement), ); err != nil { - return errorsmod.Wrapf(err, "failed packet acknowledgement verification for client (%s)", packet.SourceClient) + return errorsmod.Wrapf(err, "failed packet acknowledgement verification for client (%s)", clientID) } k.DeletePacketCommitment(ctx, packet.SourceClient, packet.Sequence) @@ -288,7 +322,9 @@ func (k *Keeper) timeoutPacket( proof []byte, proofHeight exported.Height, ) error { - // lookup counterparty from client identifiers + // lookup counterparty from packet identifiers + // note this will be either the client identifier for IBC V2 paths + // or an aliased channel identifier for IBC V1 paths counterparty, ok := k.clientV2Keeper.GetClientCounterparty(ctx, packet.SourceClient) if !ok { return errorsmod.Wrapf(clientv2types.ErrCounterpartyNotFound, "counterparty not found for client: %s", packet.SourceClient) @@ -298,11 +334,17 @@ func (k *Keeper) timeoutPacket( return errorsmod.Wrapf(clientv2types.ErrInvalidCounterparty, "counterparty id (%s) does not match packet destination id (%s)", counterparty.ClientId, packet.DestinationClient) } + // Before we do client keeper level checks, we first get underlying base clientID + clientID := packet.SourceClient + if underlyingClientID, isAlias := k.GetClientForAlias(ctx, packet.SourceClient); isAlias { + clientID = underlyingClientID + } + // check that timeout timestamp has passed on the other end // client timestamps are in nanoseconds while packet timeouts are in seconds // so we convert client timestamp to seconds in uint64 to be consistent // with IBC V2 timeout behaviour - proofTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, packet.SourceClient, proofHeight) + proofTimestampNano, err := k.ClientKeeper.GetClientTimestampAtHeight(ctx, clientID, proofHeight) if err != nil { return err } @@ -334,13 +376,13 @@ func (k *Keeper) timeoutPacket( if err := k.ClientKeeper.VerifyNonMembership( ctx, - packet.SourceClient, + clientID, proofHeight, 0, 0, proof, merklePath, ); err != nil { - return errorsmod.Wrapf(err, "failed packet receipt absence verification for client (%s)", packet.SourceClient) + return errorsmod.Wrapf(err, "failed packet receipt absence verification for client (%s)", clientID) } // delete packet commitment to prevent replay diff --git a/modules/core/04-channel/v2/keeper/packet_test.go b/modules/core/04-channel/v2/keeper/packet_test.go index aad5fd9c200..293b3c8491b 100644 --- a/modules/core/04-channel/v2/keeper/packet_test.go +++ b/modules/core/04-channel/v2/keeper/packet_test.go @@ -4,11 +4,17 @@ import ( "fmt" "time" + "github.com/cosmos/cosmos-sdk/runtime" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" clientv2types "github.com/cosmos/ibc-go/v10/modules/core/02-client/v2/types" + "github.com/cosmos/ibc-go/v10/modules/core/04-channel/migrations/v11" + channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" commitmenttypes "github.com/cosmos/ibc-go/v10/modules/core/23-commitment/types" hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" + ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" ibctm "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" ibctesting "github.com/cosmos/ibc-go/v10/testing" mockv2 "github.com/cosmos/ibc-go/v10/testing/mock/v2" @@ -16,7 +22,7 @@ import ( var unusedChannel = "channel-5" -func (suite *KeeperTestSuite) TestSendPacket() { +func (s *KeeperTestSuite) TestSendPacket() { var ( path *ibctesting.Path packet types.Packet @@ -33,12 +39,19 @@ func (suite *KeeperTestSuite) TestSendPacket() { func() {}, nil, }, + { + "success multiple payloads", + func() { + packet.Payloads = append(packet.Payloads, packet.Payloads...) + }, + nil, + }, { "success with later packet", func() { // send the same packet earlier so next packet send should be sequence 2 - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err) + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) + s.Require().NoError(err) expSequence = 2 }, nil, @@ -58,6 +71,13 @@ func (suite *KeeperTestSuite) TestSendPacket() { }, types.ErrInvalidPacket, }, + { + "multiple payload failed packet validation", + func() { + packet.Payloads = append(packet.Payloads, types.Payload{}) + }, + types.ErrInvalidPacket, + }, { "client status invalid", func() { @@ -69,20 +89,20 @@ func (suite *KeeperTestSuite) TestSendPacket() { "client state zero height", func() { clientState := path.EndpointA.GetClientState() cs, ok := clientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // force a consensus state into the store at height zero to allow client status check to pass. consensusState := path.EndpointA.GetConsensusState(cs.LatestHeight) path.EndpointA.SetConsensusState(consensusState, clienttypes.ZeroHeight()) cs.LatestHeight = clienttypes.ZeroHeight() - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, cs) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, cs) }, clienttypes.ErrInvalidHeight, }, { "timeout equal to sending chain blocktime", func() { - packet.TimeoutTimestamp = uint64(suite.chainA.GetContext().BlockTime().Unix()) + packet.TimeoutTimestamp = uint64(s.chainA.GetContext().BlockTime().Unix()) }, types.ErrTimeoutElapsed, }, @@ -95,16 +115,16 @@ func (suite *KeeperTestSuite) TestSendPacket() { } for i, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.name, i, len(testCases)), func() { - suite.SetupTest() // reset + s.Run(fmt.Sprintf("Case %s, %d/%d tests", tc.name, i, len(testCases)), func() { + s.SetupTest() // reset // create clients and set counterparties on both chains - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() payload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) // create standard packet that can be malleated packet = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, @@ -115,29 +135,28 @@ func (suite *KeeperTestSuite) TestSendPacket() { tc.malleate() // send packet - seq, destClient, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) + seq, destClient, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) // verify send packet method instantiated packet with correct sequence and destination channel - suite.Require().Equal(expSequence, seq) - suite.Require().Equal(path.EndpointB.ClientID, destClient) + s.Require().Equal(expSequence, seq) + s.Require().Equal(path.EndpointB.ClientID, destClient) // verify send packet stored the packet commitment correctly expCommitment := types.CommitPacket(packet) - suite.Require().Equal(expCommitment, suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(suite.chainA.GetContext(), packet.SourceClient, seq)) + s.Require().Equal(expCommitment, s.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(s.chainA.GetContext(), packet.SourceClient, seq)) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Equal(uint64(0), seq) - suite.Require().Nil(suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(suite.chainA.GetContext(), packet.SourceClient, seq)) - + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) + s.Require().Equal(uint64(0), seq) + s.Require().Nil(s.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(s.chainA.GetContext(), packet.SourceClient, seq)) } }) } } -func (suite *KeeperTestSuite) TestRecvPacket() { +func (s *KeeperTestSuite) TestRecvPacket() { var ( path *ibctesting.Path err error @@ -178,42 +197,42 @@ func (suite *KeeperTestSuite) TestRecvPacket() { { "failure: packet has timed out", func() { - suite.coordinator.IncrementTimeBy(time.Hour * 20) + s.coordinator.IncrementTimeBy(time.Hour * 20) }, types.ErrTimeoutElapsed, }, { "failure: packet already received", func() { - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) }, types.ErrNoOpMsg, }, { "failure: verify membership failed", func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence, []byte("")) - suite.coordinator.CommitBlock(path.EndpointA.Chain) - suite.Require().NoError(path.EndpointB.UpdateClient()) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SetPacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence, []byte("")) + s.coordinator.CommitBlock(path.EndpointA.Chain) + s.Require().NoError(path.EndpointB.UpdateClient()) }, commitmenttypes.ErrInvalidProof, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() payload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) - // send packet - packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, payload) - suite.Require().NoError(err) + // send packet with multiple payloads + packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, payload, payload) + s.Require().NoError(err) tc.malleate() @@ -221,26 +240,27 @@ func (suite *KeeperTestSuite) TestRecvPacket() { packetKey := hostv2.PacketCommitmentKey(packet.GetSourceClient(), packet.GetSequence()) proof, proofHeight := path.EndpointA.QueryProof(packetKey) - err = suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.RecvPacketTest(suite.chainB.GetContext(), packet, proof, proofHeight) + err = s.chainB.App.GetIBCKeeper().ChannelKeeperV2.RecvPacketTest(s.chainB.GetContext(), packet, proof, proofHeight) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) - _, found := suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.GetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) - suite.Require().True(found) + _, found := s.chainB.App.GetIBCKeeper().ChannelKeeperV2.GetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.Require().True(found) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestWriteAcknowledgement() { +func (s *KeeperTestSuite) TestWriteAcknowledgement() { var ( - packet types.Packet - ack types.Acknowledgement + packet types.Packet + payload types.Payload + ack types.Acknowledgement ) testCases := []struct { @@ -253,12 +273,69 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { func() {}, nil, }, + { + "success with error ack", + func() { + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{types.ErrorAcknowledgement[:]}, + } + }, + nil, + }, + { + "success multiple payloads", + func() { + packet.Payloads = append(packet.Payloads, payload) + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement, mockv2.MockRecvPacketResult.Acknowledgement}, + } + }, + nil, + }, + { + "success multiple payloads with error ack", + func() { + packet.Payloads = append(packet.Payloads, payload) + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{types.ErrorAcknowledgement[:]}, + } + }, + nil, + }, + { + "failure: multiple payloads length doesn't match ack length", + func() { + packet.Payloads = append(packet.Payloads, payload, payload) + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement, mockv2.MockRecvPacketResult.Acknowledgement}, + } + }, + types.ErrInvalidAcknowledgement, + }, + { + "failure: single payload length doesn't match ack", + func() { + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement, mockv2.MockRecvPacketResult.Acknowledgement}, + } + }, + types.ErrInvalidAcknowledgement, + }, + { + "failure: invalid acknowledgement, error acknowledgement with success acknowledgement together", + func() { + ack = types.Acknowledgement{ + AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement, types.ErrorAcknowledgement[:]}, + } + }, + types.ErrInvalidAcknowledgement, + }, { "failure: client not found", func() { packet.DestinationClient = ibctesting.InvalidID - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) }, clientv2types.ErrCounterpartyNotFound, }, @@ -266,8 +343,8 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { "failure: counterparty client identifier different than source client", func() { packet.SourceClient = unusedChannel - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) }, clientv2types.ErrInvalidCounterparty, }, @@ -275,7 +352,7 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { "failure: ack already exists", func() { ackBz := types.CommitAcknowledgement(ack) - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence, ackBz) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketAcknowledgement(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence, ackBz) }, types.ErrAcknowledgementExists, }, @@ -283,29 +360,30 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { "failure: receipt not found for packet", func() { packet.Sequence = 2 - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) }, types.ErrInvalidPacket, }, { "failure: async packet not found", func() { - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.DeleteAsyncPacket(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + packet.Sequence = 2 + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) }, types.ErrInvalidAcknowledgement, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() - payload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + payload = mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) // create standard packet that can be malleated packet = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, @@ -316,34 +394,40 @@ func (suite *KeeperTestSuite) TestWriteAcknowledgement() { AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement}, } - // mock receive with async acknowledgement - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence, packet) - tc.malleate() - err := suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.WriteAcknowledgement(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence, ack) + // mock receive with async acknowledgement + // we mock the receive of a sequence 1 manually so that the malleate can change the packet sequence + // in order to not have the keys do not match the packet sequence + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, 1) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetAsyncPacket(s.chainB.GetContext(), packet.DestinationClient, 1, packet) + + err := s.chainB.App.GetIBCKeeper().ChannelKeeperV2.WriteAcknowledgement(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence, ack) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) - ackCommitment := suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) - suite.Require().Equal(types.CommitAcknowledgement(ack), ackCommitment) + ackCommitment := s.chainB.App.GetIBCKeeper().ChannelKeeperV2.GetPacketAcknowledgement(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.Require().Equal(types.CommitAcknowledgement(ack), ackCommitment) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestAcknowledgePacket() { +func (s *KeeperTestSuite) TestAcknowledgePacket() { var ( packet types.Packet err error ack = types.Acknowledgement{ - AppAcknowledgements: [][]byte{mockv2.MockRecvPacketResult.Acknowledgement}, + AppAcknowledgements: [][]byte{ + mockv2.MockRecvPacketResult.Acknowledgement, + mockv2.MockRecvPacketResult.Acknowledgement, + mockv2.MockRecvPacketResult.Acknowledgement, + }, } freezeClient bool ) @@ -375,7 +459,7 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { { "failure: packet commitment doesn't exist.", func() { - suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.DeletePacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence) + s.chainA.App.GetIBCKeeper().ChannelKeeperV2.DeletePacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence) }, types.ErrNoOpMsg, }, @@ -404,24 +488,24 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() freezeClient = false payload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) - // send packet - packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, payload) - suite.Require().NoError(err) + // send packet with multiple payloads + packet, err = path.EndpointA.MsgSendPacket(timeoutTimestamp, payload, payload, payload) + s.Require().NoError(err) err = path.EndpointB.MsgRecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() @@ -432,23 +516,23 @@ func (suite *KeeperTestSuite) TestAcknowledgePacket() { path.EndpointA.FreezeClient() } - err = suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.AcknowledgePacketTest(suite.chainA.GetContext(), packet, ack, proof, proofHeight) + err = s.chainA.App.GetIBCKeeper().ChannelKeeperV2.AcknowledgePacketTest(s.chainA.GetContext(), packet, ack, proof, proofHeight) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) - commitment := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(suite.chainA.GetContext(), packet.SourceClient, packet.Sequence) - suite.Require().Empty(commitment) + commitment := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(s.chainA.GetContext(), packet.SourceClient, packet.Sequence) + s.Require().Empty(commitment) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestTimeoutPacket() { +func (s *KeeperTestSuite) TestTimeoutPacket() { var ( path *ibctesting.Path packet types.Packet @@ -464,9 +548,20 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { "success", func() { // send packet - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, + packet.TimeoutTimestamp, packet.Payloads) + s.Require().NoError(err, "send packet failed") + }, + nil, + }, + { + "success multiple payloads", + func() { + // send packet with multiple payloads + packet.Payloads = append(packet.Payloads, packet.Payloads...) + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") }, nil, }, @@ -474,9 +569,9 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { "failure: client not found", func() { // send packet - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") packet.SourceClient = ibctesting.InvalidID }, @@ -486,9 +581,9 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { "failure: counterparty client identifier different than destination client", func() { // send packet - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") packet.DestinationClient = unusedChannel }, @@ -497,12 +592,12 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { { "failure: packet has not timed out yet", func() { - packet.TimeoutTimestamp = uint64(suite.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) + packet.TimeoutTimestamp = uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).Unix()) // send packet - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") }, types.ErrTimeoutNotReached, }, @@ -514,9 +609,9 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { { "failure: packet does not match commitment", func() { - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") // try to timeout packet with different data packet.Payloads[0].Value = []byte("different value") @@ -527,9 +622,9 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { "failure: client status invalid", func() { // send packet - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") freezeClient = true }, @@ -539,24 +634,24 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { "failure: verify non-membership failed", func() { // send packet - _, _, err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(suite.chainA.GetContext(), packet.SourceClient, + _, _, err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.SendPacketTest(s.chainA.GetContext(), packet.SourceClient, packet.TimeoutTimestamp, packet.Payloads) - suite.Require().NoError(err, "send packet failed") + s.Require().NoError(err, "send packet failed") // set packet receipt to mock a valid past receive - suite.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(suite.chainB.GetContext(), packet.DestinationClient, packet.Sequence) + s.chainB.App.GetIBCKeeper().ChannelKeeperV2.SetPacketReceipt(s.chainB.GetContext(), packet.DestinationClient, packet.Sequence) }, commitmenttypes.ErrInvalidProof, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() // initialize freezeClient to false freezeClient = false - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupV2() // create default packet with a timed out timestamp @@ -564,7 +659,7 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { // make timeoutTimestamp 1 second more than sending chain time to ensure it passes SendPacket // and times out successfully after update - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Second).Unix()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Second).Unix()) // test cases may mutate timeout values packet = types.NewPacket(1, path.EndpointA.ClientID, path.EndpointB.ClientID, @@ -574,9 +669,9 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { // need to update chainA's client representing chainB to prove missing ack // commit the changes and update the clients - suite.coordinator.CommitBlock(path.EndpointA.Chain) - suite.Require().NoError(path.EndpointB.UpdateClient()) - suite.Require().NoError(path.EndpointA.UpdateClient()) + s.coordinator.CommitBlock(path.EndpointA.Chain) + s.Require().NoError(path.EndpointB.UpdateClient()) + s.Require().NoError(path.EndpointA.UpdateClient()) // get proof of packet receipt absence from chainB receiptKey := hostv2.PacketReceiptKey(packet.DestinationClient, packet.Sequence) @@ -586,18 +681,187 @@ func (suite *KeeperTestSuite) TestTimeoutPacket() { path.EndpointA.FreezeClient() } - err := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.TimeoutPacketTest(suite.chainA.GetContext(), packet, proof, proofHeight) + err := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.TimeoutPacketTest(s.chainA.GetContext(), packet, proof, proofHeight) expPass := tc.expError == nil if expPass { - suite.Require().NoError(err) + s.Require().NoError(err) - commitment := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(suite.chainA.GetContext(), packet.DestinationClient, packet.Sequence) - suite.Require().Nil(commitment, "packet commitment not deleted") + commitment := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetPacketCommitment(s.chainA.GetContext(), packet.DestinationClient, packet.Sequence) + s.Require().Nil(commitment, "packet commitment not deleted") } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } } + +func (s *KeeperTestSuite) TestAliasedChannel() { + path := ibctesting.NewPath(s.chainA, s.chainB) + path.Setup() + + // mock v1 format for both sides of the channel + s.mockV1Format(path.EndpointA) + s.mockV1Format(path.EndpointB) + + // migrate the store for both chains + err := v11.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.App.GetIBCKeeper()) + s.Require().NoError(err, "migrate store failed for chain A") + err = v11.MigrateStore(s.chainB.GetContext(), runtime.NewKVStoreService(s.chainB.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainB.App.AppCodec(), s.chainB.App.GetIBCKeeper()) + s.Require().NoError(err, "migrate store failed for chain B") + + // create v2 path from the original client ids + // the path config is only used for updating + // the packet client ids will be the original channel identifiers + // but they are not validated against the client ids in the path in the tests + pathv2 := ibctesting.NewPath(s.chainA, s.chainB) + pathv2.EndpointA.ClientID = path.EndpointA.ClientID + pathv2.EndpointB.ClientID = path.EndpointB.ClientID + + // send a v1 packet on the channel id + // create default packet with a timed out timestamp + payload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + + // create a timeout timestamp that is 1 hour in the future + timeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestampNano := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).UnixNano()) + + // send v1 packet + sequence, err := path.EndpointA.SendPacket(clienttypes.Height{}, timeoutTimestampNano, ibctesting.MockPacketData) + s.Require().NoError(err) + s.Require().Equal(uint64(1), sequence, "sequence should be 1 for first packet on channel") + packetv1 := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.Height{}, timeoutTimestampNano) + + // relay v1 packet + err = path.RelayPacket(packetv1) + s.Require().NoError(err, "relay v1 packet failed") + + // send v2 packet + msgSendPacket := types.NewMsgSendPacket( + path.EndpointA.ChannelID, + timeoutTimestamp, + path.EndpointA.Chain.SenderAccount.GetAddress().String(), + payload, + ) + res, err := path.EndpointA.Chain.SendMsgs(msgSendPacket) + s.Require().NoError(err, "send v2 packet failed") + + packetv2, err := ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(2), packetv2.Sequence, "sequence should be incremented across protocol versions") + + err = path.EndpointB.UpdateClient() + s.Require().NoError(err) + + // relay v2 packet + err = pathv2.EndpointA.RelayPacket(packetv2) + s.Require().NoError(err) + + // send v1 packet again + sequence, err = path.EndpointA.SendPacket(clienttypes.Height{}, timeoutTimestampNano, ibctesting.MockPacketData) + s.Require().NoError(err) + s.Require().Equal(uint64(3), sequence, "sequence should be 3 for first packet on channel") + packetv1 = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.Height{}, timeoutTimestampNano) + + // relay v1 packet again + err = path.RelayPacket(packetv1) + s.Require().NoError(err, "relay v1 packet failed") +} + +func (s *KeeperTestSuite) TestPostMigrationAliasing() { + path := ibctesting.NewPath(s.chainA, s.chainB) + path.Setup() + + // ensure we can send a v2 packet on the channel automatically + // after v1 channel handshake completes + // create v2 path from the original client ids + // the path config is only used for updating + // the packet client ids will be the original channel identifiers + // but they are not validated against the client ids in the path in the tests + pathv2 := ibctesting.NewPath(s.chainA, s.chainB) + pathv2.EndpointA.ClientID = path.EndpointA.ClientID + pathv2.EndpointB.ClientID = path.EndpointB.ClientID + + // send a v1 packet on the channel id + // create default packet with a timed out timestamp + payload := mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB) + + // create a timeout timestamp that is 1 hour in the future + timeoutTimestamp := uint64(s.chainA.GetContext().BlockTime().Add(time.Hour).Unix()) + timeoutTimestampNano := uint64(s.chainB.GetContext().BlockTime().Add(time.Hour).UnixNano()) + + // send v1 packet + sequence, err := path.EndpointA.SendPacket(clienttypes.Height{}, timeoutTimestampNano, ibctesting.MockPacketData) + s.Require().NoError(err) + s.Require().Equal(uint64(1), sequence, "sequence should be 1 for first packet on channel") + packetv1 := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, clienttypes.Height{}, timeoutTimestampNano) + + // relay v1 packet + err = path.RelayPacket(packetv1) + s.Require().NoError(err, "relay v1 packet failed") + + // send v2 packet + msgSendPacket := types.NewMsgSendPacket( + path.EndpointA.ChannelID, + timeoutTimestamp, + path.EndpointA.Chain.SenderAccount.GetAddress().String(), + payload, + ) + res, err := path.EndpointA.Chain.SendMsgs(msgSendPacket) + s.Require().NoError(err, "send v2 packet failed") + + packetv2, err := ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(2), packetv2.Sequence, "sequence should be incremented across protocol versions") + + err = path.EndpointB.UpdateClient() + s.Require().NoError(err) + + // relay v2 packet + err = pathv2.EndpointA.RelayPacket(packetv2) + s.Require().NoError(err) + + // send a v2 packet on the channel id that will timeout + timedOutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Second).Unix()) + // send v2 packet + msgSendPacketTimeout := types.NewMsgSendPacket( + path.EndpointA.ChannelID, + timedOutTimestamp, + path.EndpointA.Chain.SenderAccount.GetAddress().String(), + payload, + ) + res, err = path.EndpointA.Chain.SendMsgs(msgSendPacketTimeout) + s.Require().NoError(err, "send v2 packet failed") + + packetv2Timeout, err := ibctesting.ParseV2PacketFromEvents(res.Events) + s.Require().NoError(err, "parse v2 packet from events failed") + s.Require().Equal(uint64(2), packetv2.Sequence, "sequence should be incremented across protocol versions") + + s.coordinator.IncrementTime() + + err = path.EndpointA.UpdateClient() + s.Require().NoError(err) + + err = path.EndpointA.MsgTimeoutPacket(packetv2Timeout) + s.Require().NoError(err, "timeout v2 packet failed") +} + +func (s *KeeperTestSuite) mockV1Format(endpoint *ibctesting.Endpoint) { + // mock v1 format by setting the sequence in the old key + seq, ok := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID) + s.Require().True(ok, "next sequence send should exist in v1 format") + + // move the next sequence send back to the old v1 format key + // so we can migrate it in our tests + storeService := runtime.NewKVStoreService(endpoint.Chain.GetSimApp().GetKey(ibcexported.StoreKey)) + store := storeService.OpenKVStore(endpoint.Chain.GetContext()) + err := store.Set(v11.NextSequenceSendV1Key(endpoint.ChannelConfig.PortID, endpoint.ChannelID), sdk.Uint64ToBigEndian(seq)) + s.Require().NoError(err) + err = store.Delete(hostv2.NextSequenceSendKey(endpoint.ChannelID)) + s.Require().NoError(err) + + // Remove counterparty to mock pre migration channels + clientStore := endpoint.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(endpoint.Chain.GetContext(), endpoint.ChannelID) + clientStore.Delete(clientv2types.CounterpartyKey()) +} diff --git a/modules/core/04-channel/v2/module_test.go b/modules/core/04-channel/v2/module_test.go index afe5441682a..959ad8dda21 100644 --- a/modules/core/04-channel/v2/module_test.go +++ b/modules/core/04-channel/v2/module_test.go @@ -23,9 +23,9 @@ type ModuleTestSuite struct { chainC *ibctesting.TestChain } -func (suite *ModuleTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 3) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.chainC = suite.coordinator.GetChain(ibctesting.GetChainID(3)) +func (s *ModuleTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 3) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainC = s.coordinator.GetChain(ibctesting.GetChainID(3)) } diff --git a/modules/core/04-channel/v2/types/acknowledgement.go b/modules/core/04-channel/v2/types/acknowledgement.go index 0fe321614e9..570f537583f 100644 --- a/modules/core/04-channel/v2/types/acknowledgement.go +++ b/modules/core/04-channel/v2/types/acknowledgement.go @@ -4,7 +4,7 @@ import ( "bytes" "crypto/sha256" - proto "github.com/cosmos/gogoproto/proto" + "github.com/cosmos/gogoproto/proto" errorsmod "cosmossdk.io/errors" @@ -23,14 +23,24 @@ func NewAcknowledgement(appAcknowledgements ...[]byte) Acknowledgement { // Validate performs a basic validation of the acknowledgement func (ack Acknowledgement) Validate() error { - if len(ack.AppAcknowledgements) != 1 { - return errorsmod.Wrap(ErrInvalidAcknowledgement, "app acknowledgements must be of length one") + // acknowledgement list should be non-empty + if len(ack.AppAcknowledgements) == 0 { + return errorsmod.Wrap(ErrInvalidAcknowledgement, "app acknowledgements must be non-empty") } - for _, ack := range ack.AppAcknowledgements { - if len(ack) == 0 { + for _, a := range ack.AppAcknowledgements { + // Each app acknowledgement should be non-empty + if len(a) == 0 { return errorsmod.Wrap(ErrInvalidAcknowledgement, "app acknowledgement cannot be empty") } + + // Ensure that the app acknowledgement contains ErrorAcknowledgement + // **if and only if** the app acknowledgement list has a single element + if len(ack.AppAcknowledgements) > 1 { + if bytes.Equal(a, ErrorAcknowledgement[:]) { + return errorsmod.Wrap(ErrInvalidAcknowledgement, "cannot have the error acknowledgement in multi acknowledgement list") + } + } } return nil diff --git a/modules/core/04-channel/v2/types/acknowledgement_test.go b/modules/core/04-channel/v2/types/acknowledgement_test.go index 458e0c6b2e5..e9fd3caf53e 100644 --- a/modules/core/04-channel/v2/types/acknowledgement_test.go +++ b/modules/core/04-channel/v2/types/acknowledgement_test.go @@ -22,8 +22,13 @@ func (s *TypesTestSuite) Test_ValidateAcknowledgement() { nil, }, { - "failure: more than one app acknowledgements", + "success: more than one app acknowledgements", types.NewAcknowledgement([]byte("appAck1"), []byte("appAck2")), + nil, + }, + { + "failure: empty acknowledgement", + types.NewAcknowledgement(), types.ErrInvalidAcknowledgement, }, { @@ -31,6 +36,11 @@ func (s *TypesTestSuite) Test_ValidateAcknowledgement() { types.NewAcknowledgement([]byte("")), types.ErrInvalidAcknowledgement, }, + { + "failure: error acknowledgment in multiple payload list", + types.NewAcknowledgement(types.ErrorAcknowledgement[:], []byte("appAck2")), + types.ErrInvalidAcknowledgement, + }, } for _, tc := range testCases { diff --git a/modules/core/04-channel/v2/types/genesis_test.go b/modules/core/04-channel/v2/types/genesis_test.go index 7beebff1058..099a721bce8 100644 --- a/modules/core/04-channel/v2/types/genesis_test.go +++ b/modules/core/04-channel/v2/types/genesis_test.go @@ -71,7 +71,6 @@ func TestValidateGenesis(t *testing.T) { } for _, tc := range testCases { - err := tc.genState.Validate() expPass := tc.expError == nil diff --git a/modules/core/04-channel/v2/types/keys.go b/modules/core/04-channel/v2/types/keys.go index c1a1eca7019..81d483c22b1 100644 --- a/modules/core/04-channel/v2/types/keys.go +++ b/modules/core/04-channel/v2/types/keys.go @@ -10,6 +10,9 @@ const ( // KeyAsyncPacket defines the key to store the async packet. KeyAsyncPacket = "async_packet" + + // KeyAlias defines the key to store the alias to base client mapping. + KeyAlias = "alias" ) // AsyncPacketKey returns the key under which the packet is stored @@ -23,3 +26,9 @@ func AsyncPacketKey(clientID string, sequence uint64) []byte { func AsyncPacketPrefixKey(clientID string) []byte { return append([]byte(clientID), []byte(KeyAsyncPacket)...) } + +// AliasKey returns the key under which the base clientID will be stored +// for an alias (original v1 channelID) +func AliasKey(alias string) []byte { + return append([]byte(alias), []byte(KeyAlias)...) +} diff --git a/modules/core/04-channel/v2/types/msgs.go b/modules/core/04-channel/v2/types/msgs.go index 0843e6acc32..209369db90a 100644 --- a/modules/core/04-channel/v2/types/msgs.go +++ b/modules/core/04-channel/v2/types/msgs.go @@ -49,8 +49,8 @@ func (msg *MsgSendPacket) ValidateBasic() error { return errorsmod.Wrap(ErrInvalidTimeout, "timeout must not be 0") } - if len(msg.Payloads) != 1 { - return errorsmod.Wrapf(ErrInvalidPayload, "payloads must be of length 1, got %d instead", len(msg.Payloads)) + if len(msg.Payloads) == 0 { + return errorsmod.Wrapf(ErrInvalidPayload, "payload length must be greater than 0") } for _, pd := range msg.Payloads { diff --git a/modules/core/04-channel/v2/types/msgs_test.go b/modules/core/04-channel/v2/types/msgs_test.go index dc251cca0cc..09d4d77b325 100644 --- a/modules/core/04-channel/v2/types/msgs_test.go +++ b/modules/core/04-channel/v2/types/msgs_test.go @@ -37,6 +37,7 @@ func TestTypesTestSuite(t *testing.T) { func (s *TypesTestSuite) TestMsgSendPacketValidateBasic() { var msg *types.MsgSendPacket + var payload types.Payload testCases := []struct { name string malleate func() @@ -46,6 +47,12 @@ func (s *TypesTestSuite) TestMsgSendPacketValidateBasic() { name: "success", malleate: func() {}, }, + { + name: "success, multiple payloads", + malleate: func() { + msg.Payloads = append(msg.Payloads, payload) + }, + }, { name: "failure: invalid source channel", malleate: func() { @@ -63,16 +70,16 @@ func (s *TypesTestSuite) TestMsgSendPacketValidateBasic() { { name: "failure: invalid length for payload", malleate: func() { - msg.Payloads = []types.Payload{{}, {}} + msg.Payloads = []types.Payload{} }, expError: types.ErrInvalidPayload, }, { name: "failure: invalid packetdata", malleate: func() { - msg.Payloads = []types.Payload{} + msg.Payloads = []types.Payload{{}} }, - expError: types.ErrInvalidPayload, + expError: host.ErrInvalidID, }, { name: "failure: invalid payload", @@ -81,6 +88,14 @@ func (s *TypesTestSuite) TestMsgSendPacketValidateBasic() { }, expError: host.ErrInvalidID, }, + { + name: "failure: invalid multiple payload", + malleate: func() { + payload.DestinationPort = "" + msg.Payloads = append(msg.Payloads, payload) + }, + expError: host.ErrInvalidID, + }, { name: "failure: invalid signer", malleate: func() { @@ -91,10 +106,11 @@ func (s *TypesTestSuite) TestMsgSendPacketValidateBasic() { } for _, tc := range testCases { s.Run(tc.name, func() { + payload = types.Payload{SourcePort: ibctesting.MockPort, DestinationPort: ibctesting.MockPort, Version: "ics20-1", Encoding: transfertypes.EncodingJSON, Value: ibctesting.MockPacketData} msg = types.NewMsgSendPacket( ibctesting.FirstChannelID, s.chainA.GetTimeoutTimestamp(), s.chainA.SenderAccount.GetAddress().String(), - types.Payload{SourcePort: ibctesting.MockPort, DestinationPort: ibctesting.MockPort, Version: "ics20-1", Encoding: transfertypes.EncodingJSON, Value: ibctesting.MockPacketData}, + payload, ) tc.malleate() @@ -104,7 +120,8 @@ func (s *TypesTestSuite) TestMsgSendPacketValidateBasic() { if expPass { s.Require().NoError(err) } else { - ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expError) + s.Require().Error(err) + ibctesting.RequireErrorIsOrContains(s.T(), err, tc.expError, err.Error()) } }) } @@ -122,11 +139,17 @@ func (s *TypesTestSuite) TestMsgRecvPacketValidateBasic() { malleate: func() {}, }, { - name: "failure: invalid packet", + name: "success, multiple payloads", + malleate: func() { + msg.Packet.Payloads = append(msg.Packet.Payloads, mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)) + }, + }, + { + name: "failure: invalid payload", malleate: func() { msg.Packet.Payloads = []types.Payload{} }, - expError: types.ErrInvalidPacket, + expError: types.ErrInvalidPayload, }, { name: "failure: invalid proof commitment", @@ -138,9 +161,23 @@ func (s *TypesTestSuite) TestMsgRecvPacketValidateBasic() { { name: "failure: invalid length for packet payloads", malleate: func() { - msg.Packet.Payloads = []types.Payload{{}, {}} + msg.Packet.Payloads = []types.Payload{} }, - expError: types.ErrInvalidPacket, + expError: types.ErrInvalidPayload, + }, + { + name: "failure: invalid individual payload", + malleate: func() { + msg.Packet.Payloads = []types.Payload{{}} + }, + expError: host.ErrInvalidID, + }, + { + name: "failure: invalid multiple payload", + malleate: func() { + msg.Packet.Payloads = append(msg.Packet.Payloads, types.Payload{}) + }, + expError: host.ErrInvalidID, }, { name: "failure: invalid signer", @@ -182,6 +219,12 @@ func (s *TypesTestSuite) TestMsgAcknowledge_ValidateBasic() { name: "success", malleate: func() {}, }, + { + name: "success, multiple payloads", + malleate: func() { + msg.Packet.Payloads = append(msg.Packet.Payloads, mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)) + }, + }, { name: "failure: invalid proof of acknowledgement", malleate: func() { @@ -192,9 +235,23 @@ func (s *TypesTestSuite) TestMsgAcknowledge_ValidateBasic() { { name: "failure: invalid length for packet payloads", malleate: func() { - msg.Packet.Payloads = []types.Payload{{}, {}} + msg.Packet.Payloads = []types.Payload{} }, - expError: types.ErrInvalidPacket, + expError: types.ErrInvalidPayload, + }, + { + name: "failure: invalid individual payload", + malleate: func() { + msg.Packet.Payloads = []types.Payload{{}} + }, + expError: host.ErrInvalidID, + }, + { + name: "failure: invalid multiple payload", + malleate: func() { + msg.Packet.Payloads = append(msg.Packet.Payloads, types.Payload{}) + }, + expError: host.ErrInvalidID, }, { name: "failure: invalid signer", @@ -253,6 +310,12 @@ func (s *TypesTestSuite) TestMsgTimeoutValidateBasic() { name: "success", malleate: func() {}, }, + { + name: "success, multiple payloads", + malleate: func() { + msg.Packet.Payloads = append(msg.Packet.Payloads, mockv2.NewMockPayload(mockv2.ModuleNameA, mockv2.ModuleNameB)) + }, + }, { name: "failure: invalid signer", malleate: func() { @@ -263,9 +326,23 @@ func (s *TypesTestSuite) TestMsgTimeoutValidateBasic() { { name: "failure: invalid length for packet payloads", malleate: func() { - msg.Packet.Payloads = []types.Payload{{}, {}} + msg.Packet.Payloads = []types.Payload{} }, - expError: types.ErrInvalidPacket, + expError: types.ErrInvalidPayload, + }, + { + name: "failure: invalid individual payload", + malleate: func() { + msg.Packet.Payloads = []types.Payload{{}} + }, + expError: host.ErrInvalidID, + }, + { + name: "failure: invalid multiple payload", + malleate: func() { + msg.Packet.Payloads = append(msg.Packet.Payloads, types.Payload{}) + }, + expError: host.ErrInvalidID, }, { name: "failure: invalid packet", diff --git a/modules/core/04-channel/v2/types/packet.go b/modules/core/04-channel/v2/types/packet.go index cf2c63b11bf..85e0c41891f 100644 --- a/modules/core/04-channel/v2/types/packet.go +++ b/modules/core/04-channel/v2/types/packet.go @@ -33,8 +33,8 @@ func NewPayload(sourcePort, destPort, version, encoding string, value []byte) Pa // ValidateBasic validates that a Packet satisfies the basic requirements. func (p Packet) ValidateBasic() error { - if len(p.Payloads) != 1 { - return errorsmod.Wrap(ErrInvalidPacket, "payloads must contain exactly one payload") + if len(p.Payloads) == 0 { + return errorsmod.Wrapf(ErrInvalidPayload, "payload length must be greater than 0") } totalPayloadsSize := 0 @@ -46,7 +46,7 @@ func (p Packet) ValidateBasic() error { } if totalPayloadsSize > channeltypesv1.MaximumPayloadsSize { - return errorsmod.Wrapf(ErrInvalidPacket, "packet data bytes cannot exceed %d bytes", channeltypesv1.MaximumPayloadsSize) + return errorsmod.Wrapf(ErrInvalidPayload, "packet data bytes cannot exceed %d bytes", channeltypesv1.MaximumPayloadsSize) } if err := host.ChannelIdentifierValidator(p.SourceClient); err != nil { diff --git a/modules/core/04-channel/v2/types/packet_test.go b/modules/core/04-channel/v2/types/packet_test.go index 9a2f967473d..f32666daaa4 100644 --- a/modules/core/04-channel/v2/types/packet_test.go +++ b/modules/core/04-channel/v2/types/packet_test.go @@ -17,6 +17,7 @@ import ( // TestValidateBasic tests the ValidateBasic function of Packet func TestValidateBasic(t *testing.T) { var packet types.Packet + var payload types.Payload testCases := []struct { name string malleate func() @@ -34,28 +35,42 @@ func TestValidateBasic(t *testing.T) { }, nil, }, + { + "success, multiple payloads", + func() { + packet.Payloads = append(packet.Payloads, payload) + }, + nil, + }, { "failure: invalid single payloads size", func() { // bytes that are larger than MaxPayloadsSize packet.Payloads[0].Value = make([]byte, channeltypesv1.MaximumPayloadsSize+1) }, - types.ErrInvalidPacket, + types.ErrInvalidPayload, + }, + { + "failure: invalid total payloads size", + func() { + payload.Value = make([]byte, channeltypesv1.MaximumPayloadsSize-1) + packet.Payloads = append(packet.Payloads, payload) + }, + types.ErrInvalidPayload, }, - // TODO: add test cases for multiple payloads when enabled (#7008) { "failure: payloads is nil", func() { packet.Payloads = nil }, - types.ErrInvalidPacket, + types.ErrInvalidPayload, }, { "failure: empty payload", func() { packet.Payloads = []types.Payload{} }, - types.ErrInvalidPacket, + types.ErrInvalidPayload, }, { "failure: invalid payload source port ID", @@ -123,13 +138,14 @@ func TestValidateBasic(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - packet = types.NewPacket(1, ibctesting.FirstChannelID, ibctesting.SecondChannelID, uint64(time.Now().Unix()), types.Payload{ + payload = types.Payload{ SourcePort: ibctesting.MockPort, DestinationPort: ibctesting.MockPort, Version: "ics20-v2", Encoding: transfertypes.EncodingProtobuf, Value: mock.MockPacketData, - }) + } + packet = types.NewPacket(1, ibctesting.FirstChannelID, ibctesting.SecondChannelID, uint64(time.Now().Unix()), payload) tc.malleate() diff --git a/modules/core/04-channel/v2/types/query.pb.go b/modules/core/04-channel/v2/types/query.pb.go index 90491129635..1f1a8030612 100644 --- a/modules/core/04-channel/v2/types/query.pb.go +++ b/modules/core/04-channel/v2/types/query.pb.go @@ -1365,6 +1365,7 @@ func _Query_UnreceivedAcks_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.channel.v2.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/core/04-channel/v2/types/tx.pb.go b/modules/core/04-channel/v2/types/tx.pb.go index 923b9d8f210..8ea2ed1003b 100644 --- a/modules/core/04-channel/v2/types/tx.pb.go +++ b/modules/core/04-channel/v2/types/tx.pb.go @@ -623,6 +623,7 @@ func _Msg_Acknowledgement_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.core.channel.v2.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/core/05-port/keeper/keeper.go b/modules/core/05-port/keeper/keeper.go index 3c0dcf6232e..e12d2b06b85 100644 --- a/modules/core/05-port/keeper/keeper.go +++ b/modules/core/05-port/keeper/keeper.go @@ -24,7 +24,7 @@ func NewKeeper() *Keeper { } // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+exported.ModuleName+"/"+types.SubModuleName) } diff --git a/modules/core/05-port/keeper/keeper_test.go b/modules/core/05-port/keeper/keeper_test.go index 659beece1f4..6f59e0fafc8 100644 --- a/modules/core/05-port/keeper/keeper_test.go +++ b/modules/core/05-port/keeper/keeper_test.go @@ -18,12 +18,12 @@ type KeeperTestSuite struct { keeper *keeper.Keeper } -func (suite *KeeperTestSuite) SetupTest() { +func (s *KeeperTestSuite) SetupTest() { isCheckTx := false - app := simapp.Setup(suite.T(), isCheckTx) + app := simapp.Setup(s.T(), isCheckTx) - suite.ctx = app.NewContext(isCheckTx) - suite.keeper = app.IBCKeeper.PortKeeper + s.ctx = app.NewContext(isCheckTx) + s.keeper = app.IBCKeeper.PortKeeper } func TestKeeperTestSuite(t *testing.T) { diff --git a/modules/core/05-port/types/module.go b/modules/core/05-port/types/module.go index 2d2779ad126..67f33a0b855 100644 --- a/modules/core/05-port/types/module.go +++ b/modules/core/05-port/types/module.go @@ -104,6 +104,15 @@ type IBCModule interface { packet channeltypes.Packet, relayer sdk.AccAddress, ) error + + // SetICS4Wrapper sets the ICS4Wrapper. This function may be used after + // the module's initialization to set the middleware which is above this + // module in the IBC application stack. + // The ICS4Wrapper **must** be used for sending packets and writing acknowledgements + // to ensure that the middleware can intercept and process these calls. + // Do not use the channel keeper directly to send packets or write acknowledgements + // as this will bypass the middleware. + SetICS4Wrapper(wrapper ICS4Wrapper) } // ICS4Wrapper implements the ICS4 interfaces that IBC applications use to send packets and acknowledgements. @@ -135,6 +144,10 @@ type ICS4Wrapper interface { type Middleware interface { IBCModule ICS4Wrapper + + // SetUnderlyingModule sets the underlying IBC module. This function may be used after + // the middleware's initialization to set the ibc module which is below this middleware. + SetUnderlyingApplication(IBCModule) } // PacketDataUnmarshaler defines an optional interface which allows a middleware to @@ -145,3 +158,8 @@ type PacketDataUnmarshaler interface { // the packet data can be unmarshaled based on the channel version. UnmarshalPacketData(ctx sdk.Context, portID string, channelID string, bz []byte) (any, string, error) } + +type PacketUnmarshalerModule interface { + PacketDataUnmarshaler + IBCModule +} diff --git a/modules/core/05-port/types/router.go b/modules/core/05-port/types/router.go index 5c7014526f4..5e6e6aa5bd8 100644 --- a/modules/core/05-port/types/router.go +++ b/modules/core/05-port/types/router.go @@ -31,7 +31,7 @@ func (rtr *Router) Seal() { } // Sealed returns a boolean signifying if the Router is sealed or not. -func (rtr Router) Sealed() bool { +func (rtr *Router) Sealed() bool { return rtr.sealed } diff --git a/modules/core/05-port/types/stack.go b/modules/core/05-port/types/stack.go new file mode 100644 index 00000000000..90bfd7488cf --- /dev/null +++ b/modules/core/05-port/types/stack.go @@ -0,0 +1,57 @@ +package types + +type IBCStackBuilder struct { + middlewares []Middleware + baseModule IBCModule + channelKeeper ICS4Wrapper +} + +func NewIBCStackBuilder(chanKeeper ICS4Wrapper) *IBCStackBuilder { + return &IBCStackBuilder{ + channelKeeper: chanKeeper, + } +} + +func (b *IBCStackBuilder) Next(middleware Middleware) *IBCStackBuilder { + b.middlewares = append(b.middlewares, middleware) + return b +} + +func (b *IBCStackBuilder) Base(baseModule IBCModule) *IBCStackBuilder { + if baseModule == nil { + panic("base module cannot be nil") + } + if b.baseModule != nil { + panic("base module already set") + } + b.baseModule = baseModule + return b +} + +func (b *IBCStackBuilder) Build() IBCModule { + if b.baseModule == nil { + panic("base module cannot be nil") + } + if len(b.middlewares) == 0 { + panic("middlewares cannot be empty") + } + if b.channelKeeper == nil { + panic("channel keeper cannot be nil") + } + + // Build the stack by moving up the middleware list + // and setting the underlying application for each middleware + // and the ICS4wrapper for the underlying module. + underlyingModule := b.baseModule + for i := range len(b.middlewares) { + b.middlewares[i].SetUnderlyingApplication(underlyingModule) + underlyingModule.SetICS4Wrapper(b.middlewares[i]) + underlyingModule = b.middlewares[i] + } + + // set the top level channel keeper as the ICS4Wrapper + // for the lop level middleware + b.middlewares[len(b.middlewares)-1].SetICS4Wrapper(b.channelKeeper) + + return b.middlewares[len(b.middlewares)-1] +} diff --git a/modules/core/23-commitment/types/codec_test.go b/modules/core/23-commitment/types/codec_test.go index 6a6e9acae9b..67c57eeaf74 100644 --- a/modules/core/23-commitment/types/codec_test.go +++ b/modules/core/23-commitment/types/codec_test.go @@ -11,7 +11,7 @@ import ( "github.com/cosmos/ibc-go/v10/modules/core/23-commitment/types/v2" ) -func (suite *MerkleTestSuite) TestCodecTypeRegistration() { +func (s *MerkleTestSuite) TestCodecTypeRegistration() { testCases := []struct { name string typeURL string @@ -40,16 +40,16 @@ func (suite *MerkleTestSuite) TestCodecTypeRegistration() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { encodingCfg := moduletestutil.MakeTestEncodingConfig(ibc.AppModuleBasic{}) msg, err := encodingCfg.Codec.InterfaceRegistry().Resolve(tc.typeURL) if tc.expErr == nil { - suite.NotNil(msg) - suite.Require().NoError(err) + s.Require().NotNil(msg) + s.Require().NoError(err) } else { - suite.Nil(msg) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Nil(msg) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/core/23-commitment/types/commitment_test.go b/modules/core/23-commitment/types/commitment_test.go index 2000a4814d7..4fa6376a333 100644 --- a/modules/core/23-commitment/types/commitment_test.go +++ b/modules/core/23-commitment/types/commitment_test.go @@ -21,19 +21,19 @@ type MerkleTestSuite struct { iavlStore *iavl.Store } -func (suite *MerkleTestSuite) SetupTest() { +func (s *MerkleTestSuite) SetupTest() { db := dbm.NewMemDB() - suite.store = rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + s.store = rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - suite.storeKey = storetypes.NewKVStoreKey("iavlStoreKey") + s.storeKey = storetypes.NewKVStoreKey("iavlStoreKey") - suite.store.MountStoreWithDB(suite.storeKey, storetypes.StoreTypeIAVL, nil) - err := suite.store.LoadVersion(0) - suite.Require().NoError(err) + s.store.MountStoreWithDB(s.storeKey, storetypes.StoreTypeIAVL, nil) + err := s.store.LoadVersion(0) + s.Require().NoError(err) var ok bool - suite.iavlStore, ok = suite.store.GetCommitStore(suite.storeKey).(*iavl.Store) - suite.Require().True(ok) + s.iavlStore, ok = s.store.GetCommitStore(s.storeKey).(*iavl.Store) + s.Require().True(ok) } func TestMerkleTestSuite(t *testing.T) { diff --git a/modules/core/23-commitment/types/merkle.go b/modules/core/23-commitment/types/merkle.go index 79ee37c413a..6ef5949a0bf 100644 --- a/modules/core/23-commitment/types/merkle.go +++ b/modules/core/23-commitment/types/merkle.go @@ -82,13 +82,13 @@ func ApplyPrefix(prefix exported.Prefix, path v2.MerklePath) (v2.MerklePath, err // VerifyMembership verifies the membership of a merkle proof against the given root, path, and value. // Note that the path is expected as []string{, }. -func (proof MerkleProof) VerifyMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, value []byte) error { +func (p MerkleProof) VerifyMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path, value []byte) error { mpath, ok := path.(v2.MerklePath) if !ok { return errorsmod.Wrapf(ErrInvalidProof, "path %v is not of type MerklePath", path) } - if err := validateVerificationArgs(proof, mpath, specs, root); err != nil { + if err := validateVerificationArgs(p, mpath, specs, root); err != nil { return err } @@ -99,25 +99,25 @@ func (proof MerkleProof) VerifyMembership(specs []*ics23.ProofSpec, root exporte // Since every proof in chain is a membership proof we can use verifyChainedMembershipProof from index 0 // to validate entire proof - return verifyChainedMembershipProof(root.GetHash(), specs, proof.Proofs, mpath, value, 0) + return verifyChainedMembershipProof(root.GetHash(), specs, p.Proofs, mpath, value, 0) } // VerifyNonMembership verifies the absence of a merkle proof against the given root and path. // VerifyNonMembership verifies a chained proof where the absence of a given path is proven // at the lowest subtree and then each subtree's inclusion is proved up to the final root. -func (proof MerkleProof) VerifyNonMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path) error { +func (p MerkleProof) VerifyNonMembership(specs []*ics23.ProofSpec, root exported.Root, path exported.Path) error { mpath, ok := path.(v2.MerklePath) if !ok { return errorsmod.Wrapf(ErrInvalidProof, "path %v is not of type MerkleProof", path) } - if err := validateVerificationArgs(proof, mpath, specs, root); err != nil { + if err := validateVerificationArgs(p, mpath, specs, root); err != nil { return err } // VerifyNonMembership will verify the absence of key in lowest subtree, and then chain inclusion proofs // of all subroots up to final root - subroot, err := proof.Proofs[0].Calculate() + subroot, err := p.Proofs[0].Calculate() if err != nil { return errorsmod.Wrapf(ErrInvalidProof, "could not calculate root for proof index 0, merkle tree is likely empty. %v", err) } @@ -127,9 +127,9 @@ func (proof MerkleProof) VerifyNonMembership(specs []*ics23.ProofSpec, root expo return errorsmod.Wrapf(ErrInvalidProof, "could not retrieve key bytes for key: %s", mpath.KeyPath[len(mpath.KeyPath)-1]) } - np := proof.Proofs[0].GetNonexist() + np := p.Proofs[0].GetNonexist() if np == nil { - return errorsmod.Wrapf(ErrInvalidProof, "commitment proof must be non-existence proof for verifying non-membership. got: %T", proof.Proofs[0]) + return errorsmod.Wrapf(ErrInvalidProof, "commitment proof must be non-existence proof for verifying non-membership. got: %T", p.Proofs[0]) } if err := np.Verify(specs[0], subroot, key); err != nil { @@ -137,7 +137,7 @@ func (proof MerkleProof) VerifyNonMembership(specs []*ics23.ProofSpec, root expo } // Verify chained membership proof starting from index 1 with value = subroot - return verifyChainedMembershipProof(root.GetHash(), specs, proof.Proofs, mpath, subroot, 1) + return verifyChainedMembershipProof(root.GetHash(), specs, p.Proofs, mpath, subroot, 1) } // verifyChainedMembershipProof takes a list of proofs and specs and verifies each proof sequentially ensuring that the value is committed to diff --git a/modules/core/23-commitment/types/merkle_test.go b/modules/core/23-commitment/types/merkle_test.go index a7208c08fb2..c9bde3dddba 100644 --- a/modules/core/23-commitment/types/merkle_test.go +++ b/modules/core/23-commitment/types/merkle_test.go @@ -12,20 +12,20 @@ import ( "github.com/cosmos/ibc-go/v10/modules/core/23-commitment/types/v2" ) -func (suite *MerkleTestSuite) TestVerifyMembership() { - suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := suite.store.Commit() +func (s *MerkleTestSuite) TestVerifyMembership() { + s.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := s.store.Commit() - res, err := suite.store.Query(&storetypes.RequestQuery{ - Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + res, err := s.store.Query(&storetypes.RequestQuery{ + Path: fmt.Sprintf("/%s/key", s.storeKey.Name()), // required path to get key/value+proof Data: []byte("MYKEY"), Prove: true, }) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.ProofOps) + s.Require().NoError(err) + s.Require().NotNil(res.ProofOps) proof, err := types.ConvertProofs(res.ProofOps) - require.NoError(suite.T(), err) + s.Require().NoError(err) cases := []struct { name string @@ -35,17 +35,17 @@ func (suite *MerkleTestSuite) TestVerifyMembership() { malleate func() shouldPass bool }{ - {"valid proof", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, true}, // valid proof - {"wrong value", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, []byte("WRONGVALUE"), func() {}, false}, // invalid proof with wrong value - {"nil value", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, []byte(nil), func() {}, false}, // invalid proof with nil value - {"wrong key", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("NOTMYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong key - {"wrong path 1", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY"), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path - {"wrong path 2", cid.Hash, [][]byte{[]byte(suite.storeKey.Name())}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path - {"wrong path 3", cid.Hash, [][]byte{[]byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path - {"wrong storekey", cid.Hash, [][]byte{[]byte("otherStoreKey"), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong store prefix - {"wrong root", []byte("WRONGROOT"), [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong root - {"nil root", []byte(nil), [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with nil root - {"proof is wrong length", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() { + {"valid proof", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, true}, // valid proof + {"wrong value", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, []byte("WRONGVALUE"), func() {}, false}, // invalid proof with wrong value + {"nil value", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, []byte(nil), func() {}, false}, // invalid proof with nil value + {"wrong key", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("NOTMYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong key + {"wrong path 1", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY"), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path + {"wrong path 2", cid.Hash, [][]byte{[]byte(s.storeKey.Name())}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path + {"wrong path 3", cid.Hash, [][]byte{[]byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong path + {"wrong storekey", cid.Hash, [][]byte{[]byte("otherStoreKey"), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong store prefix + {"wrong root", []byte("WRONGROOT"), [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with wrong root + {"nil root", []byte(nil), [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() {}, false}, // invalid proof with nil root + {"proof is wrong length", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, []byte("MYVALUE"), func() { proof = types.MerkleProof{ Proofs: proof.Proofs[1:], } @@ -54,7 +54,7 @@ func (suite *MerkleTestSuite) TestVerifyMembership() { } for i, tc := range cases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() root := types.NewMerkleRoot(tc.root) @@ -64,30 +64,30 @@ func (suite *MerkleTestSuite) TestVerifyMembership() { if tc.shouldPass { // nolint: scopelint - suite.Require().NoError(err, "test case %d should have passed", i) + s.Require().NoError(err, "test case %d should have passed", i) } else { // nolint: scopelint - suite.Require().Error(err, "test case %d should have failed", i) + s.Require().Error(err, "test case %d should have failed", i) } }) } } -func (suite *MerkleTestSuite) TestVerifyNonMembership() { - suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := suite.store.Commit() +func (s *MerkleTestSuite) TestVerifyNonMembership() { + s.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := s.store.Commit() // Get Proof - res, err := suite.store.Query(&storetypes.RequestQuery{ - Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + res, err := s.store.Query(&storetypes.RequestQuery{ + Path: fmt.Sprintf("/%s/key", s.storeKey.Name()), // required path to get key/value+proof Data: []byte("MYABSENTKEY"), Prove: true, }) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.ProofOps) + s.Require().NoError(err) + s.Require().NotNil(res.ProofOps) proof, err := types.ConvertProofs(res.ProofOps) - require.NoError(suite.T(), err) + s.Require().NoError(err) cases := []struct { name string @@ -96,16 +96,16 @@ func (suite *MerkleTestSuite) TestVerifyNonMembership() { malleate func() shouldPass bool }{ - {"valid proof", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYABSENTKEY")}, func() {}, true}, // valid proof - {"wrong key", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, func() {}, false}, // invalid proof with existent key - {"wrong path 1", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY"), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong path - {"wrong path 2", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYABSENTKEY"), []byte("MYKEY")}, func() {}, false}, // invalid proof with wrong path - {"wrong path 3", cid.Hash, [][]byte{[]byte(suite.storeKey.Name())}, func() {}, false}, // invalid proof with wrong path - {"wrong path 4", cid.Hash, [][]byte{[]byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong path - {"wrong storeKey", cid.Hash, [][]byte{[]byte("otherStoreKey"), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong store prefix - {"wrong root", []byte("WRONGROOT"), [][]byte{[]byte(suite.storeKey.Name()), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong root - {"nil root", []byte(nil), [][]byte{[]byte(suite.storeKey.Name()), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with nil root - {"proof is wrong length", cid.Hash, [][]byte{[]byte(suite.storeKey.Name()), []byte("MYKEY")}, func() { + {"valid proof", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYABSENTKEY")}, func() {}, true}, // valid proof + {"wrong key", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, func() {}, false}, // invalid proof with existent key + {"wrong path 1", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY"), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong path + {"wrong path 2", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYABSENTKEY"), []byte("MYKEY")}, func() {}, false}, // invalid proof with wrong path + {"wrong path 3", cid.Hash, [][]byte{[]byte(s.storeKey.Name())}, func() {}, false}, // invalid proof with wrong path + {"wrong path 4", cid.Hash, [][]byte{[]byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong path + {"wrong storeKey", cid.Hash, [][]byte{[]byte("otherStoreKey"), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong store prefix + {"wrong root", []byte("WRONGROOT"), [][]byte{[]byte(s.storeKey.Name()), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with wrong root + {"nil root", []byte(nil), [][]byte{[]byte(s.storeKey.Name()), []byte("MYABSENTKEY")}, func() {}, false}, // invalid proof with nil root + {"proof is wrong length", cid.Hash, [][]byte{[]byte(s.storeKey.Name()), []byte("MYKEY")}, func() { proof = types.MerkleProof{ Proofs: proof.Proofs[1:], } @@ -114,7 +114,7 @@ func (suite *MerkleTestSuite) TestVerifyNonMembership() { } for i, tc := range cases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() root := types.NewMerkleRoot(tc.root) @@ -124,10 +124,10 @@ func (suite *MerkleTestSuite) TestVerifyNonMembership() { if tc.shouldPass { // nolint: scopelint - suite.Require().NoError(err, "test case %d should have passed", i) + s.Require().NoError(err, "test case %d should have passed", i) } else { // nolint: scopelint - suite.Require().Error(err, "test case %d should have failed", i) + s.Require().Error(err, "test case %d should have failed", i) } }) } diff --git a/modules/core/23-commitment/types/utils_test.go b/modules/core/23-commitment/types/utils_test.go index 48314d32318..daabdac04de 100644 --- a/modules/core/23-commitment/types/utils_test.go +++ b/modules/core/23-commitment/types/utils_test.go @@ -3,8 +3,6 @@ package types_test import ( "fmt" - "github.com/stretchr/testify/require" - storetypes "cosmossdk.io/store/types" "github.com/cometbft/cometbft/proto/tendermint/crypto" @@ -12,13 +10,13 @@ import ( "github.com/cosmos/ibc-go/v10/modules/core/23-commitment/types" ) -func (suite *MerkleTestSuite) TestConvertProofs() { - suite.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := suite.store.Commit() +func (s *MerkleTestSuite) TestConvertProofs() { + s.iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) + cid := s.store.Commit() root := types.NewMerkleRoot(cid.Hash) - existsPath := types.NewMerklePath([]byte(suite.storeKey.Name()), []byte("MYKEY")) - nonexistPath := types.NewMerklePath([]byte(suite.storeKey.Name()), []byte("NOTMYKEY")) + existsPath := types.NewMerklePath([]byte(s.storeKey.Name()), []byte("MYKEY")) + nonexistPath := types.NewMerklePath([]byte(s.storeKey.Name()), []byte("NOTMYKEY")) value := []byte("MYVALUE") var proofOps *crypto.ProofOps @@ -31,13 +29,13 @@ func (suite *MerkleTestSuite) TestConvertProofs() { { "success for ExistenceProof", func() { - res, err := suite.store.Query(&storetypes.RequestQuery{ - Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + res, err := s.store.Query(&storetypes.RequestQuery{ + Path: fmt.Sprintf("/%s/key", s.storeKey.Name()), // required path to get key/value+proof Data: []byte("MYKEY"), Prove: true, }) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.ProofOps) + s.Require().NoError(err) + s.Require().NotNil(res.ProofOps) proofOps = res.ProofOps }, @@ -46,13 +44,13 @@ func (suite *MerkleTestSuite) TestConvertProofs() { { "success for NonexistenceProof", func() { - res, err := suite.store.Query(&storetypes.RequestQuery{ - Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), + res, err := s.store.Query(&storetypes.RequestQuery{ + Path: fmt.Sprintf("/%s/key", s.storeKey.Name()), Data: []byte("NOTMYKEY"), Prove: true, }) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.ProofOps) + s.Require().NoError(err) + s.Require().NotNil(res.ProofOps) proofOps = res.ProofOps }, @@ -68,13 +66,13 @@ func (suite *MerkleTestSuite) TestConvertProofs() { { "proof op data is nil", func() { - res, err := suite.store.Query(&storetypes.RequestQuery{ - Path: fmt.Sprintf("/%s/key", suite.storeKey.Name()), // required path to get key/value+proof + res, err := s.store.Query(&storetypes.RequestQuery{ + Path: fmt.Sprintf("/%s/key", s.storeKey.Name()), // required path to get key/value+proof Data: []byte("MYKEY"), Prove: true, }) - require.NoError(suite.T(), err) - require.NotNil(suite.T(), res.ProofOps) + s.Require().NoError(err) + s.Require().NotNil(s.T(), res.ProofOps) proofOps = res.ProofOps proofOps.Ops[0].Data = nil @@ -84,22 +82,21 @@ func (suite *MerkleTestSuite) TestConvertProofs() { } for _, tc := range testcases { - tc.malleate() proof, err := types.ConvertProofs(proofOps) if tc.expErr == nil { - suite.Require().NoError(err, "ConvertProofs unexpectedly returned error for case: %s", tc.name) + s.Require().NoError(err, "ConvertProofs unexpectedly returned error for case: %s", tc.name) if tc.keyExists { err := proof.VerifyMembership(types.GetSDKSpecs(), &root, existsPath, value) - suite.Require().NoError(err, "converted proof failed to verify membership for case: %s", tc.name) + s.Require().NoError(err, "converted proof failed to verify membership for case: %s", tc.name) } else { err := proof.VerifyNonMembership(types.GetSDKSpecs(), &root, nonexistPath) - suite.Require().NoError(err, "converted proof failed to verify non-membership for case: %s", tc.name) + s.Require().NoError(err, "converted proof failed to verify non-membership for case: %s", tc.name) } } else { - suite.Require().Error(err, "ConvertProofs passed on invalid case for case: %s", tc.name) - suite.Require().ErrorIs(err, tc.expErr, "unexpected error returned for case: %s", tc.name) + s.Require().Error(err, "ConvertProofs passed on invalid case for case: %s", tc.name) + s.Require().ErrorIs(err, tc.expErr, "unexpected error returned for case: %s", tc.name) } } } diff --git a/modules/core/23-commitment/types/v2/merkle_test.go b/modules/core/23-commitment/types/v2/merkle_test.go index f0701bd10f9..b12bfac89a1 100644 --- a/modules/core/23-commitment/types/v2/merkle_test.go +++ b/modules/core/23-commitment/types/v2/merkle_test.go @@ -1,46 +1,48 @@ -package v2 +package v2_test import ( "errors" "testing" "github.com/stretchr/testify/require" + + commitmenttypesv2 "github.com/cosmos/ibc-go/v10/modules/core/23-commitment/types/v2" ) func TestMerklePathValidation(t *testing.T) { cases := []struct { name string - path MerklePath + path commitmenttypesv2.MerklePath expPrefixErr error expPathErr error }{ { "success: prefix and path", - NewMerklePath([]byte("key1"), []byte("key2")), + commitmenttypesv2.NewMerklePath([]byte("key1"), []byte("key2")), nil, nil, }, { "prefix with empty last key", - NewMerklePath([]byte("key1"), []byte("")), + commitmenttypesv2.NewMerklePath([]byte("key1"), []byte("")), nil, errors.New("key at index 1 cannot be empty"), }, { "prefix with single empty key", - NewMerklePath([]byte("")), + commitmenttypesv2.NewMerklePath([]byte("")), nil, errors.New("key at index 0 cannot be empty"), }, { "failure: empty path", - NewMerklePath(), + commitmenttypesv2.NewMerklePath(), errors.New("path cannot have length 0"), errors.New("path cannot have length 0"), }, { "failure: prefix with empty first key", - NewMerklePath([]byte(""), []byte("key2")), + commitmenttypesv2.NewMerklePath([]byte(""), []byte("key2")), errors.New("key at index 0 cannot be empty"), errors.New("key at index 0 cannot be empty"), }, diff --git a/modules/core/24-host/channel_keys.go b/modules/core/24-host/channel_keys.go index 89969892ac9..d7b980349a9 100644 --- a/modules/core/24-host/channel_keys.go +++ b/modules/core/24-host/channel_keys.go @@ -12,9 +12,9 @@ const ( // ChannelKey returns the store key for a particular channel func ChannelKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s", KeyChannelEndPrefix, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s", KeyChannelEndPrefix, ChannelPath(portID, channelID)) } -func channelPath(portID, channelID string) string { +func ChannelPath(portID, channelID string) string { return fmt.Sprintf("%s/%s/%s/%s", KeyPortPrefix, portID, KeyChannelPrefix, channelID) } diff --git a/modules/core/24-host/packet_keys.go b/modules/core/24-host/packet_keys.go index 4d567818d78..2a236640cb2 100644 --- a/modules/core/24-host/packet_keys.go +++ b/modules/core/24-host/packet_keys.go @@ -15,23 +15,20 @@ const ( // ICS04 // The following paths are the keys to the store as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#store-paths - -// NextSequenceSendKey returns the store key for the send sequence of a particular -// channel binded to a specific port. -func NextSequenceSendKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s", KeyNextSeqSendPrefix, channelPath(portID, channelID)) -} +// NOTE: NextSequenceSendKey has been removed and we only use the IBC v2 key in this repo. +// We can safely do this since the NextSequenceSendKey is not proven to counterparties, thus we can use any key format we want. +// so long as they do not collide with other keys in the store. // NextSequenceRecvKey returns the store key for the receive sequence of a particular // channel binded to a specific port func NextSequenceRecvKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s", KeyNextSeqRecvPrefix, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s", KeyNextSeqRecvPrefix, ChannelPath(portID, channelID)) } // NextSequenceAckKey returns the store key for the acknowledgement sequence of // a particular channel binded to a specific port. func NextSequenceAckKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s", KeyNextSeqAckPrefix, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s", KeyNextSeqAckPrefix, ChannelPath(portID, channelID)) } // PacketCommitmentKey returns the store key of under which a packet commitment @@ -42,7 +39,7 @@ func PacketCommitmentKey(portID, channelID string, sequence uint64) []byte { // PacketCommitmentPrefixKey defines the prefix for commitments to packet data fields store path. func PacketCommitmentPrefixKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s/%s", KeyPacketCommitmentPrefix, channelPath(portID, channelID), KeySequencePrefix) + return fmt.Appendf(nil, "%s/%s/%s", KeyPacketCommitmentPrefix, ChannelPath(portID, channelID), KeySequencePrefix) } // PacketAcknowledgementKey returns the store key of under which a packet @@ -53,18 +50,18 @@ func PacketAcknowledgementKey(portID, channelID string, sequence uint64) []byte // PacketAcknowledgementPrefixKey defines the prefix for commitments to packet data fields store path. func PacketAcknowledgementPrefixKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s/%s", KeyPacketAckPrefix, channelPath(portID, channelID), KeySequencePrefix) + return fmt.Appendf(nil, "%s/%s/%s", KeyPacketAckPrefix, ChannelPath(portID, channelID), KeySequencePrefix) } // PacketReceiptKey returns the store key of under which a packet // receipt is stored func PacketReceiptKey(portID, channelID string, sequence uint64) []byte { - return fmt.Appendf(nil, "%s/%s/%s", KeyPacketReceiptPrefix, channelPath(portID, channelID), sequencePath(sequence)) + return fmt.Appendf(nil, "%s/%s/%s", KeyPacketReceiptPrefix, ChannelPath(portID, channelID), sequencePath(sequence)) } // RecvStartSequenceKey returns the store key for the recv start sequence of a particular channel func RecvStartSequenceKey(portID, channelID string) []byte { - return fmt.Appendf(nil, "%s/%s", KeyRecvStartSequence, channelPath(portID, channelID)) + return fmt.Appendf(nil, "%s/%s", KeyRecvStartSequence, ChannelPath(portID, channelID)) } func sequencePath(sequence uint64) string { diff --git a/modules/core/24-host/parse_test.go b/modules/core/24-host/parse_test.go index 72c1a13a443..630d7cc2114 100644 --- a/modules/core/24-host/parse_test.go +++ b/modules/core/24-host/parse_test.go @@ -38,7 +38,6 @@ func TestParseIdentifier(t *testing.T) { } for _, tc := range testCases { - seq, err := host.ParseIdentifier(tc.identifier, tc.prefix) require.Equal(t, tc.expSeq, seq) @@ -95,7 +94,7 @@ func TestMustParseConnectionPath(t *testing.T) { if tc.expErr == nil { require.NotPanics(t, func() { connID := host.MustParseConnectionPath(tc.path) - require.Equal(t, connID, tc.expected) + require.Equal(t, tc.expected, connID) }) } else { require.Panics(t, func() { diff --git a/modules/core/24-host/v2/packet_keys.go b/modules/core/24-host/v2/packet_keys.go index 9a811985eff..94bb2c606bd 100644 --- a/modules/core/24-host/v2/packet_keys.go +++ b/modules/core/24-host/v2/packet_keys.go @@ -10,6 +10,7 @@ const ( PacketCommitmentBasePrefix = byte(1) PacketReceiptBasePrefix = byte(2) PacketAcknowledgementBasePrefix = byte(3) + KeyNextSeqSendPrefix = "nextSequenceSend/" ) // PacketCommitmentPrefixKey returns the store key prefix under which packet commitments for a particular channel are stored. @@ -50,5 +51,5 @@ func PacketAcknowledgementKey(channelID string, sequence uint64) []byte { // NextSequenceSendKey returns the store key for the next sequence send of a given channelID. func NextSequenceSendKey(channelID string) []byte { - return fmt.Appendf(nil, "nextSequenceSend/%s", channelID) + return fmt.Appendf(nil, "%s/%s", KeyNextSeqSendPrefix, channelID) } diff --git a/modules/core/24-host/validate_test.go b/modules/core/24-host/validate_test.go index c8130813f9f..b157091bec4 100644 --- a/modules/core/24-host/validate_test.go +++ b/modules/core/24-host/validate_test.go @@ -1,4 +1,4 @@ -package host +package host_test import ( "errors" @@ -7,6 +7,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + host "github.com/cosmos/ibc-go/v10/modules/core/24-host" ) // 195 characters @@ -35,11 +37,10 @@ func TestDefaultIdentifierValidator(t *testing.T) { } for _, tc := range testCases { - - err := ClientIdentifierValidator(tc.id) - err1 := ConnectionIdentifierValidator(tc.id) - err2 := ChannelIdentifierValidator(tc.id) - err3 := PortIdentifierValidator(tc.id) + err := host.ClientIdentifierValidator(tc.id) + err1 := host.ConnectionIdentifierValidator(tc.id) + err2 := host.ChannelIdentifierValidator(tc.id) + err3 := host.PortIdentifierValidator(tc.id) if tc.expErr == nil { require.NoError(t, err, tc.msg) require.NoError(t, err1, tc.msg) @@ -71,8 +72,7 @@ func TestPortIdentifierValidator(t *testing.T) { } for _, tc := range testCases { - - err := PortIdentifierValidator(tc.id) + err := host.PortIdentifierValidator(tc.id) if tc.expErr == nil { require.NoError(t, err, tc.msg) } else { @@ -104,8 +104,7 @@ func TestPathValidator(t *testing.T) { } for _, tc := range testCases { - - f := NewPathValidator(func(path string) error { + f := host.NewPathValidator(func(path string) error { return nil }) @@ -122,7 +121,7 @@ func TestPathValidator(t *testing.T) { } func TestCustomPathValidator(t *testing.T) { - validateFn := NewPathValidator(func(path string) error { + validateFn := host.NewPathValidator(func(path string) error { if !strings.HasPrefix(path, "id_") { return fmt.Errorf("identifier %s must start with 'id_", path) } @@ -141,7 +140,6 @@ func TestCustomPathValidator(t *testing.T) { } for _, tc := range testCases { - err := validateFn(tc.id) if tc.expErr == nil { require.NoError(t, err, tc.msg) diff --git a/modules/core/ante/ante.go b/modules/core/ante/ante.go index 87cb0787b5c..9fbdea5d25e 100644 --- a/modules/core/ante/ante.go +++ b/modules/core/ante/ante.go @@ -1,6 +1,8 @@ package ante import ( + "errors" + errorsmod "cosmossdk.io/errors" sdk "github.com/cosmos/cosmos-sdk/types" @@ -144,10 +146,10 @@ func (rrd RedundantRelayDecorator) recvPacketCheckTx(ctx sdk.Context, msg *chann cacheCtx, writeFn := ctx.CacheContext() _, err := rrd.k.ChannelKeeper.RecvPacket(cacheCtx, msg.Packet, msg.ProofCommitment, msg.ProofHeight) - switch err { - case nil: + switch { + case err == nil: writeFn() - case channeltypes.ErrNoOpMsg: + case errors.Is(err, channeltypes.ErrNoOpMsg): return &channeltypes.MsgRecvPacketResponse{Result: channeltypes.NOOP}, nil default: return nil, errorsmod.Wrap(err, "receive packet verification failed") @@ -164,10 +166,10 @@ func (rrd RedundantRelayDecorator) recvPacketReCheckTx(ctx sdk.Context, msg *cha cacheCtx, writeFn := ctx.CacheContext() err := rrd.k.ChannelKeeper.RecvPacketReCheckTx(cacheCtx, msg.Packet) - switch err { - case nil: + switch { + case err == nil: writeFn() - case channeltypes.ErrNoOpMsg: + case errors.Is(err, channeltypes.ErrNoOpMsg): return &channeltypes.MsgRecvPacketResponse{Result: channeltypes.NOOP}, nil default: return nil, errorsmod.Wrap(err, "receive packet verification failed") diff --git a/modules/core/ante/ante_test.go b/modules/core/ante/ante_test.go index ef736e7ed1c..83dd6414c6c 100644 --- a/modules/core/ante/ante_test.go +++ b/modules/core/ante/ante_test.go @@ -11,6 +11,12 @@ import ( codectypes "github.com/cosmos/cosmos-sdk/codec/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cometbft/cometbft/crypto/tmhash" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmtprotoversion "github.com/cometbft/cometbft/proto/tendermint/version" + cmttypes "github.com/cometbft/cometbft/types" + cmtversion "github.com/cometbft/cometbft/version" + clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" channeltypes "github.com/cosmos/ibc-go/v10/modules/core/04-channel/types" channeltypesv2 "github.com/cosmos/ibc-go/v10/modules/core/04-channel/v2/types" @@ -37,15 +43,15 @@ type AnteTestSuite struct { } // SetupTest creates a coordinator with 2 test chains. -func (suite *AnteTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func (s *AnteTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) - suite.coordinator.CommitNBlocks(suite.chainA, 2) - suite.coordinator.CommitNBlocks(suite.chainB, 2) - suite.path = ibctesting.NewPath(suite.chainA, suite.chainB) - suite.path.Setup() + s.coordinator.CommitNBlocks(s.chainA, 2) + s.coordinator.CommitNBlocks(s.chainB, 2) + s.path = ibctesting.NewPath(s.chainA, s.chainB) + s.path.Setup() } // TestAnteTestSuite runs all the tests within this package. @@ -54,171 +60,171 @@ func TestAnteTestSuite(t *testing.T) { } // createRecvPacketMessage creates a RecvPacket message for a packet sent from chain A to chain B. -func (suite *AnteTestSuite) createRecvPacketMessage(isRedundant bool) *channeltypes.MsgRecvPacket { - sequence, err := suite.path.EndpointA.SendPacket(clienttypes.NewHeight(2, 0), 0, ibctesting.MockPacketData) - suite.Require().NoError(err) +func (s *AnteTestSuite) createRecvPacketMessage(isRedundant bool) *channeltypes.MsgRecvPacket { + sequence, err := s.path.EndpointA.SendPacket(clienttypes.NewHeight(2, 0), 0, ibctesting.MockPacketData) + s.Require().NoError(err) packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, - suite.path.EndpointA.ChannelConfig.PortID, suite.path.EndpointA.ChannelID, - suite.path.EndpointB.ChannelConfig.PortID, suite.path.EndpointB.ChannelID, + s.path.EndpointA.ChannelConfig.PortID, s.path.EndpointA.ChannelID, + s.path.EndpointB.ChannelConfig.PortID, s.path.EndpointB.ChannelID, clienttypes.NewHeight(2, 0), 0) if isRedundant { - err = suite.path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + err = s.path.EndpointB.RecvPacket(packet) + s.Require().NoError(err) } - err = suite.path.EndpointB.UpdateClient() - suite.Require().NoError(err) + err = s.path.EndpointB.UpdateClient() + s.Require().NoError(err) packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) - return channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } // createRecvPacketMessageV2 creates a V2 RecvPacket message for a packet sent from chain A to chain B. -func (suite *AnteTestSuite) createRecvPacketMessageV2(isRedundant bool) *channeltypesv2.MsgRecvPacket { - packet, err := suite.path.EndpointA.MsgSendPacket(suite.chainA.GetTimeoutTimestampSecs(), mock.NewMockPayload(mock.ModuleNameA, mock.ModuleNameB)) - suite.Require().NoError(err) +func (s *AnteTestSuite) createRecvPacketMessageV2(isRedundant bool) *channeltypesv2.MsgRecvPacket { + packet, err := s.path.EndpointA.MsgSendPacket(s.chainA.GetTimeoutTimestampSecs(), mock.NewMockPayload(mock.ModuleNameA, mock.ModuleNameB)) + s.Require().NoError(err) if isRedundant { - err = suite.path.EndpointB.MsgRecvPacket(packet) - suite.Require().NoError(err) + err = s.path.EndpointB.MsgRecvPacket(packet) + s.Require().NoError(err) } - err = suite.path.EndpointB.UpdateClient() - suite.Require().NoError(err) + err = s.path.EndpointB.UpdateClient() + s.Require().NoError(err) packetKey := hostv2.PacketCommitmentKey(packet.SourceClient, packet.Sequence) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) - return channeltypesv2.NewMsgRecvPacket(packet, proof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypesv2.NewMsgRecvPacket(packet, proof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } // createAcknowledgementMessage creates an Acknowledgement message for a packet sent from chain B to chain A. -func (suite *AnteTestSuite) createAcknowledgementMessage(isRedundant bool) sdk.Msg { - sequence, err := suite.path.EndpointB.SendPacket(clienttypes.NewHeight(2, 0), 0, ibctesting.MockPacketData) - suite.Require().NoError(err) +func (s *AnteTestSuite) createAcknowledgementMessage(isRedundant bool) sdk.Msg { + sequence, err := s.path.EndpointB.SendPacket(clienttypes.NewHeight(2, 0), 0, ibctesting.MockPacketData) + s.Require().NoError(err) packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, - suite.path.EndpointB.ChannelConfig.PortID, suite.path.EndpointB.ChannelID, - suite.path.EndpointA.ChannelConfig.PortID, suite.path.EndpointA.ChannelID, + s.path.EndpointB.ChannelConfig.PortID, s.path.EndpointB.ChannelID, + s.path.EndpointA.ChannelConfig.PortID, s.path.EndpointA.ChannelID, clienttypes.NewHeight(2, 0), 0) - err = suite.path.EndpointA.RecvPacket(packet) - suite.Require().NoError(err) + err = s.path.EndpointA.RecvPacket(packet) + s.Require().NoError(err) if isRedundant { - err = suite.path.EndpointB.AcknowledgePacket(packet, ibctesting.MockAcknowledgement) - suite.Require().NoError(err) + err = s.path.EndpointB.AcknowledgePacket(packet, ibctesting.MockAcknowledgement) + s.Require().NoError(err) } packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) - return channeltypes.NewMsgAcknowledgement(packet, ibctesting.MockAcknowledgement, proof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypes.NewMsgAcknowledgement(packet, ibctesting.MockAcknowledgement, proof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } // createAcknowledgementMessageV2 creates a V2 Acknowledgement message for a packet sent from chain B to chain A. -func (suite *AnteTestSuite) createAcknowledgementMessageV2(isRedundant bool) *channeltypesv2.MsgAcknowledgement { - packet, err := suite.path.EndpointB.MsgSendPacket(suite.chainB.GetTimeoutTimestampSecs(), mock.NewMockPayload(mock.ModuleNameA, mock.ModuleNameB)) - suite.Require().NoError(err) +func (s *AnteTestSuite) createAcknowledgementMessageV2(isRedundant bool) *channeltypesv2.MsgAcknowledgement { + packet, err := s.path.EndpointB.MsgSendPacket(s.chainB.GetTimeoutTimestampSecs(), mock.NewMockPayload(mock.ModuleNameA, mock.ModuleNameB)) + s.Require().NoError(err) - err = suite.path.EndpointA.MsgRecvPacket(packet) - suite.Require().NoError(err) + err = s.path.EndpointA.MsgRecvPacket(packet) + s.Require().NoError(err) ack := channeltypesv2.Acknowledgement{AppAcknowledgements: [][]byte{mock.MockRecvPacketResult.Acknowledgement}} if isRedundant { - err = suite.path.EndpointB.MsgAcknowledgePacket(packet, ack) - suite.Require().NoError(err) + err = s.path.EndpointB.MsgAcknowledgePacket(packet, ack) + s.Require().NoError(err) } packetKey := hostv2.PacketAcknowledgementKey(packet.DestinationClient, packet.Sequence) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) - return channeltypesv2.NewMsgAcknowledgement(packet, ack, proof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypesv2.NewMsgAcknowledgement(packet, ack, proof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } // createTimeoutMessage creates an Timeout message for a packet sent from chain B to chain A. -func (suite *AnteTestSuite) createTimeoutMessage(isRedundant bool) sdk.Msg { - height := suite.chainA.LatestCommittedHeader.GetHeight() +func (s *AnteTestSuite) createTimeoutMessage(isRedundant bool) sdk.Msg { + height := s.chainA.LatestCommittedHeader.GetHeight() timeoutHeight := clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+1) - sequence, err := suite.path.EndpointB.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + sequence, err := s.path.EndpointB.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) + s.Require().NoError(err) - suite.coordinator.CommitNBlocks(suite.chainA, 3) + s.coordinator.CommitNBlocks(s.chainA, 3) - err = suite.path.EndpointB.UpdateClient() - suite.Require().NoError(err) + err = s.path.EndpointB.UpdateClient() + s.Require().NoError(err) packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, - suite.path.EndpointB.ChannelConfig.PortID, suite.path.EndpointB.ChannelID, - suite.path.EndpointA.ChannelConfig.PortID, suite.path.EndpointA.ChannelID, + s.path.EndpointB.ChannelConfig.PortID, s.path.EndpointB.ChannelID, + s.path.EndpointA.ChannelConfig.PortID, s.path.EndpointA.ChannelID, timeoutHeight, 0) if isRedundant { - err = suite.path.EndpointB.TimeoutPacket(packet) - suite.Require().NoError(err) + err = s.path.EndpointB.TimeoutPacket(packet) + s.Require().NoError(err) } packetKey := host.PacketReceiptKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) - return channeltypes.NewMsgTimeout(packet, sequence, proof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypes.NewMsgTimeout(packet, sequence, proof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } // createTimeoutMessageV2 creates a V2 Timeout message for a packet sent from chain B to chain A. -func (suite *AnteTestSuite) createTimeoutMessageV2(isRedundant bool) *channeltypesv2.MsgTimeout { - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().Add(time.Second).Unix()) - packet, err := suite.path.EndpointB.MsgSendPacket(timeoutTimestamp, mock.NewMockPayload(mock.ModuleNameA, mock.ModuleNameB)) - suite.Require().NoError(err) +func (s *AnteTestSuite) createTimeoutMessageV2(isRedundant bool) *channeltypesv2.MsgTimeout { + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().Add(time.Second).Unix()) + packet, err := s.path.EndpointB.MsgSendPacket(timeoutTimestamp, mock.NewMockPayload(mock.ModuleNameA, mock.ModuleNameB)) + s.Require().NoError(err) - suite.coordinator.IncrementTimeBy(time.Hour) - err = suite.path.EndpointB.UpdateClient() - suite.Require().NoError(err) + s.coordinator.IncrementTimeBy(time.Hour) + err = s.path.EndpointB.UpdateClient() + s.Require().NoError(err) if isRedundant { - err = suite.path.EndpointB.MsgTimeoutPacket(packet) - suite.Require().NoError(err) + err = s.path.EndpointB.MsgTimeoutPacket(packet) + s.Require().NoError(err) } packetKey := hostv2.PacketReceiptKey(packet.SourceClient, packet.Sequence) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) - return channeltypesv2.NewMsgTimeout(packet, proof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypesv2.NewMsgTimeout(packet, proof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } // createTimeoutOnCloseMessage creates an TimeoutOnClose message for a packet sent from chain B to chain A. -func (suite *AnteTestSuite) createTimeoutOnCloseMessage(isRedundant bool) sdk.Msg { - height := suite.chainA.LatestCommittedHeader.GetHeight() +func (s *AnteTestSuite) createTimeoutOnCloseMessage(isRedundant bool) sdk.Msg { + height := s.chainA.LatestCommittedHeader.GetHeight() timeoutHeight := clienttypes.NewHeight(height.GetRevisionNumber(), height.GetRevisionHeight()+1) - sequence, err := suite.path.EndpointB.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) - suite.path.EndpointA.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) + sequence, err := s.path.EndpointB.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) + s.Require().NoError(err) + s.path.EndpointA.UpdateChannel(func(channel *channeltypes.Channel) { channel.State = channeltypes.CLOSED }) packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, - suite.path.EndpointB.ChannelConfig.PortID, suite.path.EndpointB.ChannelID, - suite.path.EndpointA.ChannelConfig.PortID, suite.path.EndpointA.ChannelID, + s.path.EndpointB.ChannelConfig.PortID, s.path.EndpointB.ChannelID, + s.path.EndpointA.ChannelConfig.PortID, s.path.EndpointA.ChannelID, timeoutHeight, 0) if isRedundant { - err = suite.path.EndpointB.TimeoutOnClose(packet) - suite.Require().NoError(err) + err = s.path.EndpointB.TimeoutOnClose(packet) + s.Require().NoError(err) } packetKey := host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - proof, proofHeight := suite.chainA.QueryProof(packetKey) + proof, proofHeight := s.chainA.QueryProof(packetKey) channelKey := host.ChannelKey(packet.GetDestPort(), packet.GetDestChannel()) - closedProof, _ := suite.chainA.QueryProof(channelKey) + closedProof, _ := s.chainA.QueryProof(channelKey) - return channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, closedProof, proofHeight, suite.path.EndpointA.Chain.SenderAccount.GetAddress().String()) + return channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, closedProof, proofHeight, s.path.EndpointA.Chain.SenderAccount.GetAddress().String()) } -func (suite *AnteTestSuite) createUpdateClientMessage() sdk.Msg { - endpoint := suite.path.EndpointB +func (s *AnteTestSuite) createUpdateClientMessage() sdk.Msg { + endpoint := s.path.EndpointB // ensure counterparty has committed state endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain) @@ -242,74 +248,113 @@ func (suite *AnteTestSuite) createUpdateClientMessage() sdk.Msg { return msg } -func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { +func (s *AnteTestSuite) createMaliciousUpdateClientMessage() sdk.Msg { + endpoint := s.path.EndpointB + + // ensure counterparty has committed state + endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain) + + trustedHeight, ok := endpoint.GetClientLatestHeight().(clienttypes.Height) + if !ok { + require.True(endpoint.Chain.TB, ok, "bad height conversion") + } + + currentHeader := endpoint.Counterparty.Chain.LatestCommittedHeader.Header + + validators := endpoint.Counterparty.Chain.Vals.Validators + signers := endpoint.Counterparty.Chain.Signers + + // Signers must be in the same order as + // the validators when signing. + signerArr := make([]cmttypes.PrivValidator, len(validators)) + for i, v := range validators { + signerArr[i] = signers[v.Address.String()] + } + cmtTrustedVals, ok := endpoint.Counterparty.Chain.TrustedValidators[trustedHeight.RevisionHeight] + if !ok { + require.True(endpoint.Chain.TB, ok, "no validators") + } + + maliciousHeader, err := createMaliciousTMHeader(endpoint.Counterparty.Chain.ChainID, int64(trustedHeight.RevisionHeight+1), trustedHeight, currentHeader.Time, endpoint.Counterparty.Chain.Vals, cmtTrustedVals, signerArr, currentHeader) + require.NoError(endpoint.Chain.TB, err, "invalid header update") + + msg, err := clienttypes.NewMsgUpdateClient( + endpoint.ClientID, maliciousHeader, + endpoint.Chain.SenderAccount.GetAddress().String(), + ) + require.NoError(endpoint.Chain.TB, err, "msg update") + + return msg +} + +func (s *AnteTestSuite) TestAnteDecoratorCheckTx() { testCases := []struct { name string - malleate func(suite *AnteTestSuite) []sdk.Msg + malleate func(s *AnteTestSuite) []sdk.Msg expError error }{ { "success on one new RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { // the RecvPacket message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createRecvPacketMessage(false)} + return []sdk.Msg{s.createRecvPacketMessage(false)} }, nil, }, { "success on one new V2 RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { - suite.path.SetupV2() + func(s *AnteTestSuite) []sdk.Msg { + s.path.SetupV2() // the RecvPacket message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createRecvPacketMessageV2(false)} + return []sdk.Msg{s.createRecvPacketMessageV2(false)} }, nil, }, { "success on one new Acknowledgement message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { // the Acknowledgement message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createAcknowledgementMessage(false)} + return []sdk.Msg{s.createAcknowledgementMessage(false)} }, nil, }, { "success on one new V2 Acknowledgement message", - func(suite *AnteTestSuite) []sdk.Msg { - suite.path.SetupV2() + func(s *AnteTestSuite) []sdk.Msg { + s.path.SetupV2() // the Acknowledgement message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createAcknowledgementMessageV2(false)} + return []sdk.Msg{s.createAcknowledgementMessageV2(false)} }, nil, }, { "success on one new Timeout message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { // the Timeout message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createTimeoutMessage(false)} + return []sdk.Msg{s.createTimeoutMessage(false)} }, nil, }, { "success on one new Timeout V2 message", - func(suite *AnteTestSuite) []sdk.Msg { - suite.path.SetupV2() + func(s *AnteTestSuite) []sdk.Msg { + s.path.SetupV2() // the Timeout message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createTimeoutMessageV2(false)} + return []sdk.Msg{s.createTimeoutMessageV2(false)} }, nil, }, { "success on one new TimeoutOnClose message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { // the TimeoutOnClose message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createTimeoutOnCloseMessage(false)} + return []sdk.Msg{s.createTimeoutOnCloseMessage(false)} }, nil, }, { "success on three new messages of each type", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { var msgs []sdk.Msg // none of the messages of each type has been submitted to the chain yet, @@ -318,18 +363,20 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { // from A to B for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(false)) + msgs = append(msgs, s.createRecvPacketMessage(false)) } // from B to A for i := 1; i <= 9; i++ { switch { case i >= 1 && i <= 3: - msgs = append(msgs, suite.createAcknowledgementMessage(false)) + msgs = append(msgs, s.createAcknowledgementMessage(false)) case i >= 4 && i <= 6: - msgs = append(msgs, suite.createTimeoutMessage(false)) + msgs = append(msgs, s.createTimeoutMessage(false)) case i >= 7 && i <= 9: - msgs = append(msgs, suite.createTimeoutOnCloseMessage(false)) + msgs = append(msgs, s.createTimeoutOnCloseMessage(false)) + default: + // This should never be reached as the loop covers all cases } } return msgs @@ -338,7 +385,7 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "success on three redundant messages of RecvPacket, Acknowledgement and TimeoutOnClose, and one new Timeout message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { var msgs []sdk.Msg // we pass three messages of RecvPacket, Acknowledgement and TimeoutOnClose that @@ -348,18 +395,20 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { // from A to B for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(true)) + msgs = append(msgs, s.createRecvPacketMessage(true)) } // from B to A for i := 1; i <= 7; i++ { switch { case i >= 1 && i <= 3: - msgs = append(msgs, suite.createAcknowledgementMessage(true)) + msgs = append(msgs, s.createAcknowledgementMessage(true)) case i == 4: - msgs = append(msgs, suite.createTimeoutMessage(false)) + msgs = append(msgs, s.createTimeoutMessage(false)) case i >= 5 && i <= 7: - msgs = append(msgs, suite.createTimeoutOnCloseMessage(true)) + msgs = append(msgs, s.createTimeoutOnCloseMessage(true)) + default: + // This should never be reached as the loop covers all cases } } return msgs @@ -368,7 +417,7 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "success on one new message and two redundant messages of each type", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { var msgs []sdk.Msg // For each type there is a new message and two messages that are redundant @@ -378,18 +427,20 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { // from A to B for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(i != 2)) + msgs = append(msgs, s.createRecvPacketMessage(i != 2)) } // from B to A for i := 1; i <= 9; i++ { switch { case i >= 1 && i <= 3: - msgs = append(msgs, suite.createAcknowledgementMessage(i != 2)) + msgs = append(msgs, s.createAcknowledgementMessage(i != 2)) case i >= 4 && i <= 6: - msgs = append(msgs, suite.createTimeoutMessage(i != 5)) + msgs = append(msgs, s.createTimeoutMessage(i != 5)) case i >= 7 && i <= 9: - msgs = append(msgs, suite.createTimeoutOnCloseMessage(i != 8)) + msgs = append(msgs, s.createTimeoutOnCloseMessage(i != 8)) + default: + // This should never be reached as the loop covers all cases } } return msgs @@ -398,93 +449,80 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "success on one new UpdateClient message", - func(suite *AnteTestSuite) []sdk.Msg { - return []sdk.Msg{suite.createUpdateClientMessage()} + func(s *AnteTestSuite) []sdk.Msg { + return []sdk.Msg{s.createUpdateClientMessage()} }, nil, }, { "success on three new UpdateClient messages", - func(suite *AnteTestSuite) []sdk.Msg { - return []sdk.Msg{suite.createUpdateClientMessage(), suite.createUpdateClientMessage(), suite.createUpdateClientMessage()} + func(s *AnteTestSuite) []sdk.Msg { + return []sdk.Msg{s.createUpdateClientMessage(), s.createUpdateClientMessage(), s.createUpdateClientMessage()} }, nil, }, { "success on three new Updateclient messages and one new RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { return []sdk.Msg{ - suite.createUpdateClientMessage(), - suite.createUpdateClientMessage(), - suite.createUpdateClientMessage(), - suite.createRecvPacketMessage(false), - } - }, - nil, - }, - { - "success on three redundant RecvPacket messages and one SubmitMisbehaviour message", - func(suite *AnteTestSuite) []sdk.Msg { - msgs := []sdk.Msg{suite.createUpdateClientMessage()} - - for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(true)) + s.createUpdateClientMessage(), + s.createUpdateClientMessage(), + s.createUpdateClientMessage(), + s.createRecvPacketMessage(false), } - - // append non packet and update message to msgs to ensure multimsg tx should pass - msgs = append(msgs, &clienttypes.MsgSubmitMisbehaviour{}) //nolint:staticcheck // we're using the deprecated message for testing - return msgs }, nil, }, { "success on app callback error, app callbacks are skipped for performance", - func(suite *AnteTestSuite) []sdk.Msg { - suite.chainB.GetSimApp().IBCMockModule.IBCApp.OnRecvPacket = func( + func(s *AnteTestSuite) []sdk.Msg { + s.chainB.GetSimApp().IBCMockModule.IBCApp.OnRecvPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress, ) exported.Acknowledgement { panic(errors.New("failed OnRecvPacket mock callback")) } // the RecvPacket message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createRecvPacketMessage(false)} + return []sdk.Msg{s.createRecvPacketMessage(false)} }, nil, }, { "no success on one redundant RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { - return []sdk.Msg{suite.createRecvPacketMessage(true)} + func(s *AnteTestSuite) []sdk.Msg { + return []sdk.Msg{s.createRecvPacketMessage(true)} }, channeltypes.ErrRedundantTx, }, { "no success on one redundant V2 RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { - suite.path.SetupV2() - return []sdk.Msg{suite.createRecvPacketMessageV2(true)} + func(s *AnteTestSuite) []sdk.Msg { + s.path.SetupV2() + return []sdk.Msg{s.createRecvPacketMessageV2(true)} }, channeltypes.ErrRedundantTx, }, { "no success on three redundant messages of each type", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { var msgs []sdk.Msg // from A to B for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(true)) + msgs = append(msgs, s.createRecvPacketMessage(true)) } // from B to A for i := 1; i <= 9; i++ { switch { case i >= 1 && i <= 3: - msgs = append(msgs, suite.createAcknowledgementMessage(true)) + msgs = append(msgs, s.createAcknowledgementMessage(true)) case i >= 4 && i <= 6: - msgs = append(msgs, suite.createTimeoutMessage(true)) + msgs = append(msgs, s.createTimeoutMessage(true)) case i >= 7 && i <= 9: - msgs = append(msgs, suite.createTimeoutOnCloseMessage(true)) + msgs = append(msgs, s.createTimeoutOnCloseMessage(true)) + default: + // This should never be reached as the loop covers all cases } } return msgs @@ -493,11 +531,24 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "no success on one new UpdateClient message and three redundant RecvPacket messages", - func(suite *AnteTestSuite) []sdk.Msg { - msgs := []sdk.Msg{suite.createUpdateClientMessage()} + func(s *AnteTestSuite) []sdk.Msg { + msgs := []sdk.Msg{s.createUpdateClientMessage()} + + for i := 1; i <= 3; i++ { + msgs = append(msgs, s.createRecvPacketMessage(true)) + } + + return msgs + }, + channeltypes.ErrRedundantTx, + }, + { + "no success on one new malicious UpdateClient message and three redundant RecvPacket messages", + func(s *AnteTestSuite) []sdk.Msg { + msgs := []sdk.Msg{s.createMaliciousUpdateClientMessage()} for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(true)) + msgs = append(msgs, s.createRecvPacketMessage(true)) } return msgs @@ -506,9 +557,9 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "no success on one new UpdateClient message: invalid client identifier", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { clientMsg, err := codectypes.NewAnyWithValue(&ibctm.Header{}) - suite.Require().NoError(err) + s.Require().NoError(err) msgs := []sdk.Msg{&clienttypes.MsgUpdateClient{ClientId: ibctesting.InvalidID, ClientMessage: clientMsg}} return msgs @@ -517,9 +568,9 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "no success on one new UpdateClient message: client module not found", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { clientMsg, err := codectypes.NewAnyWithValue(&ibctm.Header{}) - suite.Require().NoError(err) + s.Require().NoError(err) msgs := []sdk.Msg{&clienttypes.MsgUpdateClient{ClientId: clienttypes.FormatClientIdentifier("08-wasm", 1), ClientMessage: clientMsg}} return msgs @@ -528,34 +579,36 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "no success on one new UpdateClient message: no consensus state for trusted height", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { clientMsg, err := codectypes.NewAnyWithValue(&ibctm.Header{TrustedHeight: clienttypes.NewHeight(1, 10000)}) - suite.Require().NoError(err) + s.Require().NoError(err) - msgs := []sdk.Msg{&clienttypes.MsgUpdateClient{ClientId: suite.path.EndpointA.ClientID, ClientMessage: clientMsg}} + msgs := []sdk.Msg{&clienttypes.MsgUpdateClient{ClientId: s.path.EndpointA.ClientID, ClientMessage: clientMsg}} return msgs }, clienttypes.ErrConsensusStateNotFound, }, { "no success on three new UpdateClient messages and three redundant messages of each type", - func(suite *AnteTestSuite) []sdk.Msg { - msgs := []sdk.Msg{suite.createUpdateClientMessage(), suite.createUpdateClientMessage(), suite.createUpdateClientMessage()} + func(s *AnteTestSuite) []sdk.Msg { + msgs := []sdk.Msg{s.createUpdateClientMessage(), s.createUpdateClientMessage(), s.createUpdateClientMessage()} // from A to B for i := 1; i <= 3; i++ { - msgs = append(msgs, suite.createRecvPacketMessage(true)) + msgs = append(msgs, s.createRecvPacketMessage(true)) } // from B to A for i := 1; i <= 9; i++ { switch { case i >= 1 && i <= 3: - msgs = append(msgs, suite.createAcknowledgementMessage(true)) + msgs = append(msgs, s.createAcknowledgementMessage(true)) case i >= 4 && i <= 6: - msgs = append(msgs, suite.createTimeoutMessage(true)) + msgs = append(msgs, s.createTimeoutMessage(true)) case i >= 7 && i <= 9: - msgs = append(msgs, suite.createTimeoutOnCloseMessage(true)) + msgs = append(msgs, s.createTimeoutOnCloseMessage(true)) + default: + // This should never be reached as the loop covers all cases } } return msgs @@ -564,14 +617,14 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "no success on one new message and one invalid message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { packet := channeltypes.NewPacket(ibctesting.MockPacketData, 2, - suite.path.EndpointA.ChannelConfig.PortID, suite.path.EndpointA.ChannelID, - suite.path.EndpointB.ChannelConfig.PortID, suite.path.EndpointB.ChannelID, + s.path.EndpointA.ChannelConfig.PortID, s.path.EndpointA.ChannelID, + s.path.EndpointB.ChannelConfig.PortID, s.path.EndpointB.ChannelID, clienttypes.NewHeight(2, 0), 0) return []sdk.Msg{ - suite.createRecvPacketMessage(false), + s.createRecvPacketMessage(false), channeltypes.NewMsgRecvPacket(packet, []byte("proof"), clienttypes.NewHeight(1, 1), "signer"), } }, @@ -579,23 +632,23 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { }, { "no success on one new message and one redundant message in the same block", - func(suite *AnteTestSuite) []sdk.Msg { - msg := suite.createRecvPacketMessage(false) + func(s *AnteTestSuite) []sdk.Msg { + msg := s.createRecvPacketMessage(false) // We want to be able to run check tx with the non-redundant message without // committing it to a block, so that the when check tx runs with the redundant // message they are both in the same block - k := suite.chainB.App.GetIBCKeeper() + k := s.chainB.App.GetIBCKeeper() decorator := ante.NewRedundantRelayDecorator(k) - checkCtx := suite.chainB.GetContext().WithIsCheckTx(true) - next := func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { return ctx, nil } - txBuilder := suite.chainB.TxConfig.NewTxBuilder() + checkCtx := s.chainB.GetContext().WithIsCheckTx(true) + next := func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { return ctx, nil } + txBuilder := s.chainB.TxConfig.NewTxBuilder() err := txBuilder.SetMsgs([]sdk.Msg{msg}...) - suite.Require().NoError(err) + s.Require().NoError(err) tx := txBuilder.GetTx() _, err = decorator.AnteHandle(checkCtx, tx, false, next) - suite.Require().NoError(err) + s.Require().NoError(err) return []sdk.Msg{msg} }, @@ -604,76 +657,76 @@ func (suite *AnteTestSuite) TestAnteDecoratorCheckTx() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite - suite.SetupTest() + s.SetupTest() - k := suite.chainB.App.GetIBCKeeper() + k := s.chainB.App.GetIBCKeeper() decorator := ante.NewRedundantRelayDecorator(k) - msgs := tc.malleate(suite) + msgs := tc.malleate(s) - deliverCtx := suite.chainB.GetContext().WithIsCheckTx(false) - checkCtx := suite.chainB.GetContext().WithIsCheckTx(true) + deliverCtx := s.chainB.GetContext().WithIsCheckTx(false) + checkCtx := s.chainB.GetContext().WithIsCheckTx(true) // create multimsg tx - txBuilder := suite.chainB.TxConfig.NewTxBuilder() + txBuilder := s.chainB.TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs...) - suite.Require().NoError(err) + s.Require().NoError(err) tx := txBuilder.GetTx() - next := func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { return ctx, nil } + next := func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { return ctx, nil } _, err = decorator.AnteHandle(deliverCtx, tx, false, next) - suite.Require().NoError(err, "antedecorator should not error on DeliverTx") + s.Require().NoError(err, "antedecorator should not error on DeliverTx") _, err = decorator.AnteHandle(checkCtx, tx, false, next) if tc.expError == nil { - suite.Require().NoError(err, "non-strict decorator did not pass as expected") + s.Require().NoError(err, "non-strict decorator did not pass as expected") } else { - suite.Require().ErrorIs(err, tc.expError, "non-strict antehandler did not return error as expected") + s.Require().ErrorIs(err, tc.expError, "non-strict antehandler did not return error as expected") } }) } } -func (suite *AnteTestSuite) TestAnteDecoratorReCheckTx() { +func (s *AnteTestSuite) TestAnteDecoratorReCheckTx() { testCases := []struct { name string - malleate func(suite *AnteTestSuite) []sdk.Msg + malleate func(s *AnteTestSuite) []sdk.Msg expError error }{ { "success on one new RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { // the RecvPacket message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createRecvPacketMessage(false)} + return []sdk.Msg{s.createRecvPacketMessage(false)} }, nil, }, { "success on one new V2 RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { - suite.path.SetupV2() + func(s *AnteTestSuite) []sdk.Msg { + s.path.SetupV2() // the RecvPacket message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createRecvPacketMessageV2(false)} + return []sdk.Msg{s.createRecvPacketMessageV2(false)} }, nil, }, { "success on one redundant and one new RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { + func(s *AnteTestSuite) []sdk.Msg { return []sdk.Msg{ - suite.createRecvPacketMessage(true), - suite.createRecvPacketMessage(false), + s.createRecvPacketMessage(true), + s.createRecvPacketMessage(false), } }, nil, }, { "success on invalid proof (proof checks occur in checkTx)", - func(suite *AnteTestSuite) []sdk.Msg { - msg := suite.createRecvPacketMessage(false) + func(s *AnteTestSuite) []sdk.Msg { + msg := s.createRecvPacketMessage(false) msg.ProofCommitment = []byte("invalid-proof") return []sdk.Msg{msg} }, @@ -681,65 +734,119 @@ func (suite *AnteTestSuite) TestAnteDecoratorReCheckTx() { }, { "success on app callback error, app callbacks are skipped for performance", - func(suite *AnteTestSuite) []sdk.Msg { - suite.chainB.GetSimApp().IBCMockModule.IBCApp.OnRecvPacket = func( + func(s *AnteTestSuite) []sdk.Msg { + s.chainB.GetSimApp().IBCMockModule.IBCApp.OnRecvPacket = func( ctx sdk.Context, channelVersion string, packet channeltypes.Packet, relayer sdk.AccAddress, ) exported.Acknowledgement { panic(errors.New("failed OnRecvPacket mock callback")) } // the RecvPacket message has not been submitted to the chain yet, so it will succeed - return []sdk.Msg{suite.createRecvPacketMessage(false)} + return []sdk.Msg{s.createRecvPacketMessage(false)} }, nil, }, { "no success on one redundant RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { - return []sdk.Msg{suite.createRecvPacketMessage(true)} + func(s *AnteTestSuite) []sdk.Msg { + return []sdk.Msg{s.createRecvPacketMessage(true)} }, channeltypes.ErrRedundantTx, }, { "no success on one redundant V2 RecvPacket message", - func(suite *AnteTestSuite) []sdk.Msg { - suite.path.SetupV2() - return []sdk.Msg{suite.createRecvPacketMessageV2(true)} + func(s *AnteTestSuite) []sdk.Msg { + s.path.SetupV2() + return []sdk.Msg{s.createRecvPacketMessageV2(true)} }, channeltypes.ErrRedundantTx, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite - suite.SetupTest() + s.SetupTest() - k := suite.chainB.App.GetIBCKeeper() + k := s.chainB.App.GetIBCKeeper() decorator := ante.NewRedundantRelayDecorator(k) - msgs := tc.malleate(suite) + msgs := tc.malleate(s) - deliverCtx := suite.chainB.GetContext().WithIsCheckTx(false) - reCheckCtx := suite.chainB.GetContext().WithIsReCheckTx(true) + deliverCtx := s.chainB.GetContext().WithIsCheckTx(false) + reCheckCtx := s.chainB.GetContext().WithIsReCheckTx(true) // create multimsg tx - txBuilder := suite.chainB.TxConfig.NewTxBuilder() + txBuilder := s.chainB.TxConfig.NewTxBuilder() err := txBuilder.SetMsgs(msgs...) - suite.Require().NoError(err) + s.Require().NoError(err) tx := txBuilder.GetTx() - next := func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { return ctx, nil } - + next := func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { return ctx, nil } _, err = decorator.AnteHandle(deliverCtx, tx, false, next) - suite.Require().NoError(err, "antedecorator should not error on DeliverTx") + s.Require().NoError(err, "antedecorator should not error on DeliverTx") _, err = decorator.AnteHandle(reCheckCtx, tx, false, next) if tc.expError == nil { - suite.Require().NoError(err, "non-strict decorator did not pass as expected") + s.Require().NoError(err, "non-strict decorator did not pass as expected") } else { - suite.Require().ErrorIs(err, tc.expError, "non-strict antehandler did not return error as expected") + s.Require().ErrorIs(err, tc.expError, "non-strict antehandler did not return error as expected") } }) } } + +// createMaliciousTMHeader creates a header with the provided trusted height with an invalid app hash. +func createMaliciousTMHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, tmValSet, tmTrustedVals *cmttypes.ValidatorSet, signers []cmttypes.PrivValidator, oldHeader *cmtproto.Header) (*ibctm.Header, error) { + const ( + invalidHashValue = "invalid_hash" + ) + + tmHeader := cmttypes.Header{ + Version: cmtprotoversion.Consensus{Block: cmtversion.BlockProtocol, App: 2}, + ChainID: chainID, + Height: blockHeight, + Time: timestamp, + LastBlockID: ibctesting.MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)), + LastCommitHash: oldHeader.GetLastCommitHash(), + ValidatorsHash: tmValSet.Hash(), + NextValidatorsHash: tmValSet.Hash(), + DataHash: tmhash.Sum([]byte(invalidHashValue)), + ConsensusHash: tmhash.Sum([]byte(invalidHashValue)), + AppHash: tmhash.Sum([]byte(invalidHashValue)), + LastResultsHash: tmhash.Sum([]byte(invalidHashValue)), + EvidenceHash: tmhash.Sum([]byte(invalidHashValue)), + ProposerAddress: tmValSet.Proposer.Address, //nolint:staticcheck + } + + hhash := tmHeader.Hash() + blockID := ibctesting.MakeBlockID(hhash, 3, tmhash.Sum([]byte(invalidHashValue))) + voteSet := cmttypes.NewVoteSet(chainID, blockHeight, 1, cmtproto.PrecommitType, tmValSet) + + extCommit, err := cmttypes.MakeExtCommit(blockID, blockHeight, 1, voteSet, signers, timestamp, false) + if err != nil { + return nil, err + } + + signedHeader := &cmtproto.SignedHeader{ + Header: tmHeader.ToProto(), + Commit: extCommit.ToCommit().ToProto(), + } + + valSet, err := tmValSet.ToProto() + if err != nil { + return nil, err + } + + trustedVals, err := tmTrustedVals.ToProto() + if err != nil { + return nil, err + } + + return &ibctm.Header{ + SignedHeader: signedHeader, + ValidatorSet: valSet, + TrustedHeight: trustedHeight, + TrustedValidators: trustedVals, + }, nil +} diff --git a/modules/core/api/module.go b/modules/core/api/module.go index b102f35e221..b1b73b40912 100644 --- a/modules/core/api/module.go +++ b/modules/core/api/module.go @@ -69,3 +69,10 @@ type PacketDataUnmarshaler interface { // the payload is provided and the packet data interface is returned UnmarshalPacketData(payload channeltypesv2.Payload) (any, error) } + +// PacketUnmarshalerModuleV2 is an interface that combines the IBCModuleV2 and PacketDataUnmarshaler +// interfaces to assert that the underlying application supports both. +type PacketUnmarshalerModuleV2 interface { + IBCModule + PacketDataUnmarshaler +} diff --git a/modules/core/api/router.go b/modules/core/api/router.go index 43b434d8cc5..dfa16285a88 100644 --- a/modules/core/api/router.go +++ b/modules/core/api/router.go @@ -10,68 +10,120 @@ import ( // Router contains all the module-defined callbacks required by IBC Protocol V2. type Router struct { - // routes is a map associating port prefixes to the IBCModules implementations. + // routes is a map from portID to IBCModule routes map[string]IBCModule + // prefixRoutes is a map from portID prefix to IBCModule + prefixRoutes map[string]IBCModule } // NewRouter creates a new Router instance. func NewRouter() *Router { return &Router{ - routes: make(map[string]IBCModule), + routes: make(map[string]IBCModule), + prefixRoutes: make(map[string]IBCModule), } } -// AddRoute registers a route for a given port ID prefix to a given IBCModule. -// There can be up to one prefix registered for a given port ID in the router. +// AddRoute registers a route for a given portID to a given IBCModule. // // Panics: -// - if a prefix of `portIDprefix` is already a registered route. -// - if `portIDprefix` is a prefix of already registered route. -func (rtr *Router) AddRoute(portIDprefix string, cbs IBCModule) *Router { - if !sdk.IsAlphaNumeric(portIDprefix) { +// - if a route with the same portID has already been registered +// - if the portID is not alphanumeric +func (rtr *Router) AddRoute(portID string, cbs IBCModule) *Router { + if !sdk.IsAlphaNumeric(portID) { panic(errors.New("route expressions can only contain alphanumeric characters")) } - for prefix := range rtr.routes { + if _, ok := rtr.routes[portID]; ok { + panic(fmt.Errorf("route %s has already been registered", portID)) + } + + for prefix := range rtr.prefixRoutes { + // Prevent existing prefix routes from colliding with the new direct route to avoid confusing behavior. + if strings.HasPrefix(portID, prefix) { + panic(fmt.Errorf("route %s is already matched by registered prefix route: %s", portID, prefix)) + } + } + + rtr.routes[portID] = cbs + + return rtr +} + +// AddPrefixRoute registers a route for a given portID prefix to a given IBCModule. +// A prefix route matches any portID that starts with the given prefix. +// +// Panics: +// - if `portIDPrefix` is not alphanumeric. +// - if a direct route `portIDPrefix` has already been registered. +// - if a prefix of `portIDPrefix` is already registered as a prefix. +// - if `portIDPrefix` is a prefix of am already registered prefix. +func (rtr *Router) AddPrefixRoute(portIDPrefix string, cbs IBCModule) *Router { + if !sdk.IsAlphaNumeric(portIDPrefix) { + panic(errors.New("route prefix can only contain alphanumeric characters")) + } + + // If the prefix is a prefix of an already registered route, we panic to avoid confusing behavior. + for portID := range rtr.routes { + if strings.HasPrefix(portID, portIDPrefix) { + panic(fmt.Errorf("route prefix %s is a prefix for already registered route: %s", portIDPrefix, portID)) + } + } + + for prefix := range rtr.prefixRoutes { // Prevent two scenarios: // * Adding a string that prefix is already registered e.g. // add prefix "portPrefix" and try to add "portPrefixSomeSuffix". - // * Adding a string that is a prefix of already registered route e.g. + // * Adding a string that is a prefix of already registered prefix route e.g. // add prefix "portPrefix" and try to add "port". - if strings.HasPrefix(portIDprefix, prefix) { - panic(fmt.Errorf("route %s has already been covered by registered prefix: %s", portIDprefix, prefix)) + if strings.HasPrefix(portIDPrefix, prefix) { + panic(fmt.Errorf("route prefix %s has already been covered by registered prefix: %s", portIDPrefix, prefix)) } - if strings.HasPrefix(prefix, portIDprefix) { - panic(fmt.Errorf("route %s is a prefix for already registered route: %s", portIDprefix, prefix)) + if strings.HasPrefix(prefix, portIDPrefix) { + panic(fmt.Errorf("route prefix %s is a prefix for already registered prefix: %s", portIDPrefix, prefix)) } } - rtr.routes[portIDprefix] = cbs + rtr.prefixRoutes[portIDPrefix] = cbs return rtr } // Route returns the IBCModule for a given portID. func (rtr *Router) Route(portID string) IBCModule { - _, route, ok := rtr.getRoute(portID) + cbs, ok := rtr.getRoute(portID) if !ok { panic(fmt.Sprintf("no route for %s", portID)) } - return route + + return cbs } -// HasRoute returns true along with a prefix if the router has a module -// registered for the given portID or its prefix. Returns false otherwise. -func (rtr *Router) HasRoute(portID string) (bool, string) { - prefix, _, ok := rtr.getRoute(portID) - return ok, prefix +// HasRoute returns true if the Router has a module registered (whether it's a direct or a prefix route) +// for the portID or false if no module is registered for it. +func (rtr *Router) HasRoute(portID string) bool { + _, ok := rtr.getRoute(portID) + return ok } -func (rtr *Router) getRoute(portID string) (string, IBCModule, bool) { - for prefix, module := range rtr.routes { +// getRoute is a helper function that retrieves the IBCModule for a given portID. +func (rtr *Router) getRoute(portID string) (IBCModule, bool) { + // Direct routes take precedence over prefix routes + route, ok := rtr.routes[portID] + if ok { + return route, true + } + + // If the portID is not found as a direct route, check for prefix routes + for prefix, cbs := range rtr.prefixRoutes { + // Note that this iteration is deterministic because there can only ever be one prefix route + // that matches a given portID. This is because of the checks in AddPrefixRoute preventing + // any colliding prefixes to be added. if strings.HasPrefix(portID, prefix) { - return prefix, module, true + return cbs, true } } - return "", nil, false + + // At this point neither a direct route nor a prefix route was found + return nil, false } diff --git a/modules/core/api/router_test.go b/modules/core/api/router_test.go index 5d11a8d78be..5bf887e353b 100644 --- a/modules/core/api/router_test.go +++ b/modules/core/api/router_test.go @@ -5,7 +5,7 @@ import ( mockv2 "github.com/cosmos/ibc-go/v10/testing/mock/v2" ) -func (suite *APITestSuite) TestRouter() { +func (s *APITestSuite) TestRouter() { var router *api.Router testCases := []struct { @@ -19,7 +19,7 @@ func (suite *APITestSuite) TestRouter() { router.AddRoute("port01", &mockv2.IBCModule{}) }, assertionFn: func() { - suite.Require().True(router.HasRoute("port01")) + s.Require().True(router.HasRoute("port01")) }, }, { @@ -30,70 +30,101 @@ func (suite *APITestSuite) TestRouter() { router.AddRoute("port03", &mockv2.IBCModule{}) }, assertionFn: func() { - suite.Require().True(router.HasRoute("port01")) - suite.Require().True(router.HasRoute("port02")) - suite.Require().True(router.HasRoute("port03")) + s.Require().True(router.HasRoute("port01")) + s.Require().True(router.HasRoute("port02")) + s.Require().True(router.HasRoute("port03")) }, }, { name: "success: prefix based routing works", malleate: func() { - router.AddRoute("somemodule", &mockv2.IBCModule{}) + router.AddPrefixRoute("somemodule", &mockv2.IBCModule{}) router.AddRoute("port01", &mockv2.IBCModule{}) }, assertionFn: func() { - suite.Require().True(router.HasRoute("somemodule")) - suite.Require().True(router.HasRoute("somemoduleport01")) - ok, prefix := router.HasRoute("somemoduleport01") - suite.Require().Equal(true, ok) - suite.Require().Equal("somemodule", prefix) - suite.Require().NotNil(router.Route("somemoduleport01")) - suite.Require().True(router.HasRoute("port01")) + s.Require().True(router.HasRoute("somemodule")) + s.Require().True(router.HasRoute("somemoduleport01")) + s.Require().NotNil(router.Route("somemoduleport01")) + s.Require().True(router.HasRoute("port01")) }, }, { - name: "failure: panics on duplicate module", + name: "failure: panics on adding direct route after overlapping prefix route", + malleate: func() { + router.AddPrefixRoute("someModule", &mockv2.IBCModule{}) + }, + assertionFn: func() { + s.Require().PanicsWithError("route someModuleWithSpecificPath is already matched by registered prefix route: someModule", func() { + router.AddRoute("someModuleWithSpecificPath", &mockv2.IBCModule{}) + }) + }, + }, + { + name: "failure: panics on adding prefix route after overlapping direct route", + malleate: func() { + router.AddRoute("someModuleWithSpecificPath", &mockv2.IBCModule{}) + }, + assertionFn: func() { + s.Require().PanicsWithError("route prefix someModule is a prefix for already registered route: someModuleWithSpecificPath", func() { + router.AddPrefixRoute("someModule", &mockv2.IBCModule{}) + }) + }, + }, + { + name: "failure: panics on duplicate route", malleate: func() { router.AddRoute("port01", &mockv2.IBCModule{}) }, assertionFn: func() { - suite.Require().PanicsWithError("route port01 has already been covered by registered prefix: port01", func() { + s.Require().PanicsWithError("route port01 has already been registered", func() { router.AddRoute("port01", &mockv2.IBCModule{}) }) }, }, { - name: "failure: panics invalid-name", - malleate: func() {}, + name: "failure: panics on duplicate route / prefix route", + malleate: func() { + router.AddRoute("port01", &mockv2.IBCModule{}) + }, assertionFn: func() { - suite.Require().PanicsWithError("route expressions can only contain alphanumeric characters", func() { - router.AddRoute("port-02", &mockv2.IBCModule{}) + s.Require().PanicsWithError("route prefix port01 is a prefix for already registered route: port01", func() { + router.AddPrefixRoute("port01", &mockv2.IBCModule{}) }) }, }, { - name: "failure: panics conflicting routes registered", + name: "failure: panics on duplicate prefix route", + malleate: func() { + router.AddPrefixRoute("port01", &mockv2.IBCModule{}) + }, + assertionFn: func() { + s.Require().PanicsWithError("route prefix port01 has already been covered by registered prefix: port01", func() { + router.AddPrefixRoute("port01", &mockv2.IBCModule{}) + }) + }, + }, + { + name: "failure: panics invalid-name", malleate: func() {}, assertionFn: func() { - suite.Require().PanicsWithError("route someModuleWithSpecificPath has already been covered by registered prefix: someModule", func() { - router.AddRoute("someModule", &mockv2.IBCModule{}) - router.AddRoute("someModuleWithSpecificPath", &mockv2.IBCModule{}) + s.Require().PanicsWithError("route expressions can only contain alphanumeric characters", func() { + router.AddRoute("port-02", &mockv2.IBCModule{}) }) }, }, { - name: "failure: panics conflicting routes registered, when shorter prefix is added", + name: "failure: panics conflicting prefix routes registered, when shorter prefix is added", malleate: func() {}, assertionFn: func() { - suite.Require().PanicsWithError("route someLonger is a prefix for already registered route: someLongerPrefixModule", func() { - router.AddRoute("someLongerPrefixModule", &mockv2.IBCModule{}) - router.AddRoute("someLonger", &mockv2.IBCModule{}) + s.Require().PanicsWithError("route prefix someLonger is a prefix for already registered prefix: someLongerPrefixModule", func() { + router.AddPrefixRoute("someLongerPrefixModule", &mockv2.IBCModule{}) + router.AddPrefixRoute("someLonger", &mockv2.IBCModule{}) }) }, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { router = api.NewRouter() tc.malleate() diff --git a/modules/core/genesis_test.go b/modules/core/genesis_test.go index 2698356af24..0ed5c444a32 100644 --- a/modules/core/genesis_test.go +++ b/modules/core/genesis_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - proto "github.com/cosmos/gogoproto/proto" + "github.com/cosmos/gogoproto/proto" testifysuite "github.com/stretchr/testify/suite" "github.com/cosmos/cosmos-sdk/codec" @@ -50,19 +50,19 @@ type IBCTestSuite struct { } // SetupTest creates a coordinator with 2 test chains. -func (suite *IBCTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func (s *IBCTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) } func TestIBCTestSuite(t *testing.T) { testifysuite.Run(t, new(IBCTestSuite)) } -func (suite *IBCTestSuite) TestValidateGenesis() { - header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, suite.chainA.ProposedHeader.Height, clienttypes.NewHeight(0, uint64(suite.chainA.ProposedHeader.Height-1)), suite.chainA.ProposedHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) +func (s *IBCTestSuite) TestValidateGenesis() { + header := s.chainA.CreateTMClientHeader(s.chainA.ChainID, s.chainA.ProposedHeader.Height, clienttypes.NewHeight(0, uint64(s.chainA.ProposedHeader.Height-1)), s.chainA.ProposedHeader.Time, s.chainA.Vals, s.chainA.Vals, s.chainA.Vals, s.chainA.Signers) testCases := []struct { name string @@ -80,7 +80,7 @@ func (suite *IBCTestSuite) TestValidateGenesis() { ClientGenesis: clienttypes.NewGenesisState( []clienttypes.IdentifiedClientState{ clienttypes.NewIdentifiedClientState( - clientID, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + clientID, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []clienttypes.ClientConsensusStates{ @@ -186,7 +186,7 @@ func (suite *IBCTestSuite) TestValidateGenesis() { ClientGenesis: clienttypes.NewGenesisState( []clienttypes.IdentifiedClientState{ clienttypes.NewIdentifiedClientState( - clientID, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + clientID, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, nil, @@ -281,26 +281,25 @@ func (suite *IBCTestSuite) TestValidateGenesis() { } for _, tc := range testCases { - tc := tc err := tc.genState.Validate() if tc.expError == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err, tc.name) + s.Require().Contains(err.Error(), tc.expError.Error()) } } } -func (suite *IBCTestSuite) TestInitGenesis() { - header := suite.chainA.CreateTMClientHeader(suite.chainA.ChainID, suite.chainA.ProposedHeader.Height, clienttypes.NewHeight(0, uint64(suite.chainA.ProposedHeader.Height-1)), suite.chainA.ProposedHeader.Time, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Vals, suite.chainA.Signers) +func (s *IBCTestSuite) TestInitGenesis() { + header := s.chainA.CreateTMClientHeader(s.chainA.ChainID, s.chainA.ProposedHeader.Height, clienttypes.NewHeight(0, uint64(s.chainA.ProposedHeader.Height-1)), s.chainA.ProposedHeader.Time, s.chainA.Vals, s.chainA.Vals, s.chainA.Vals, s.chainA.Signers) packet := channelv2types.NewPacket( 1, "07-tendermint-0", "07-tendermint-1", - uint64(suite.chainA.GetContext().BlockTime().Unix()), mockv2.NewMockPayload("src", "dst"), + uint64(s.chainA.GetContext().BlockTime().Unix()), mockv2.NewMockPayload("src", "dst"), ) bz, err := proto.Marshal(&packet) - suite.Require().NoError(err) + s.Require().NoError(err) testCases := []struct { name string @@ -316,7 +315,7 @@ func (suite *IBCTestSuite) TestInitGenesis() { ClientGenesis: clienttypes.NewGenesisState( []clienttypes.IdentifiedClientState{ clienttypes.NewIdentifiedClientState( - clientID, ibctm.NewClientState(suite.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), + clientID, ibctm.NewClientState(s.chainA.ChainID, ibctm.DefaultTrustLevel, ibctesting.TrustingPeriod, ibctesting.UnbondingPeriod, ibctesting.MaxClockDrift, clientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath), ), }, []clienttypes.ClientConsensusStates{ @@ -418,17 +417,15 @@ func (suite *IBCTestSuite) TestInitGenesis() { } for _, tc := range testCases { - tc := tc + app := simapp.Setup(s.T(), false) - app := simapp.Setup(suite.T(), false) - - suite.NotPanics(func() { + s.Require().NotPanics(func() { ibc.InitGenesis(app.NewContext(false), *app.IBCKeeper, tc.genState) }) } } -func (suite *IBCTestSuite) TestExportGenesis() { +func (s *IBCTestSuite) TestExportGenesis() { testCases := []struct { msg string malleate func() @@ -437,39 +434,39 @@ func (suite *IBCTestSuite) TestExportGenesis() { "success", func() { // creates clients - ibctesting.NewPath(suite.chainA, suite.chainB).Setup() + ibctesting.NewPath(s.chainA, s.chainB).Setup() // create extra clients - ibctesting.NewPath(suite.chainA, suite.chainB).SetupClients() - ibctesting.NewPath(suite.chainA, suite.chainB).SetupClients() + ibctesting.NewPath(s.chainA, s.chainB).SetupClients() + ibctesting.NewPath(s.chainA, s.chainB).SetupClients() }, }, } for _, tc := range testCases { - suite.Run(fmt.Sprintf("Case %s", tc.msg), func() { - suite.SetupTest() + s.Run(fmt.Sprintf("Case %s", tc.msg), func() { + s.SetupTest() tc.malleate() var gs *types.GenesisState - suite.NotPanics(func() { - gs = ibc.ExportGenesis(suite.chainA.GetContext(), *suite.chainA.App.GetIBCKeeper()) + s.Require().NotPanics(func() { + gs = ibc.ExportGenesis(s.chainA.GetContext(), *s.chainA.App.GetIBCKeeper()) }) // init genesis based on export - suite.NotPanics(func() { - ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.GetIBCKeeper(), gs) + s.Require().NotPanics(func() { + ibc.InitGenesis(s.chainA.GetContext(), *s.chainA.App.GetIBCKeeper(), gs) }) - suite.NotPanics(func() { - cdc := codec.NewProtoCodec(suite.chainA.GetSimApp().InterfaceRegistry()) + s.Require().NotPanics(func() { + cdc := codec.NewProtoCodec(s.chainA.GetSimApp().InterfaceRegistry()) genState := cdc.MustMarshalJSON(gs) cdc.MustUnmarshalJSON(genState, gs) }) // init genesis based on marshal and unmarshal - suite.NotPanics(func() { - ibc.InitGenesis(suite.chainA.GetContext(), *suite.chainA.App.GetIBCKeeper(), gs) + s.Require().NotPanics(func() { + ibc.InitGenesis(s.chainA.GetContext(), *s.chainA.App.GetIBCKeeper(), gs) }) }) } diff --git a/modules/core/keeper/keeper.go b/modules/core/keeper/keeper.go index 78c4a1e617f..bb556f6484e 100644 --- a/modules/core/keeper/keeper.go +++ b/modules/core/keeper/keeper.go @@ -18,7 +18,6 @@ import ( portkeeper "github.com/cosmos/ibc-go/v10/modules/core/05-port/keeper" porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" "github.com/cosmos/ibc-go/v10/modules/core/api" - "github.com/cosmos/ibc-go/v10/modules/core/types" ) // Keeper defines each ICS keeper for IBC @@ -37,7 +36,7 @@ type Keeper struct { // NewKeeper creates a new ibc Keeper func NewKeeper( - cdc codec.BinaryCodec, storeService corestore.KVStoreService, paramSpace types.ParamSubspace, + cdc codec.BinaryCodec, storeService corestore.KVStoreService, upgradeKeeper clienttypes.UpgradeKeeper, authority string, ) *Keeper { // panic if any of the keepers passed in is empty @@ -49,12 +48,12 @@ func NewKeeper( panic(errors.New("authority must be non-empty")) } - clientKeeper := clientkeeper.NewKeeper(cdc, storeService, paramSpace, upgradeKeeper) + clientKeeper := clientkeeper.NewKeeper(cdc, storeService, upgradeKeeper) clientV2Keeper := clientv2keeper.NewKeeper(cdc, clientKeeper) - connectionKeeper := connectionkeeper.NewKeeper(cdc, storeService, paramSpace, clientKeeper) + connectionKeeper := connectionkeeper.NewKeeper(cdc, storeService, clientKeeper) portKeeper := portkeeper.NewKeeper() - channelKeeper := channelkeeper.NewKeeper(cdc, storeService, clientKeeper, connectionKeeper) - channelKeeperV2 := channelkeeperv2.NewKeeper(cdc, storeService, clientKeeper, clientV2Keeper, channelKeeper, connectionKeeper) + channelKeeperV2 := channelkeeperv2.NewKeeper(cdc, storeService, clientKeeper, clientV2Keeper, connectionKeeper) + channelKeeper := channelkeeper.NewKeeper(cdc, storeService, clientKeeper, connectionKeeper, clientV2Keeper, channelKeeperV2) return &Keeper{ cdc: cdc, diff --git a/modules/core/keeper/keeper_test.go b/modules/core/keeper/keeper_test.go index ceb0458d792..39fa02cfd89 100644 --- a/modules/core/keeper/keeper_test.go +++ b/modules/core/keeper/keeper_test.go @@ -24,16 +24,16 @@ type KeeperTestSuite struct { chainB *ibctesting.TestChain } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) // TODO: remove // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) - suite.coordinator.CommitNBlocks(suite.chainA, 2) - suite.coordinator.CommitNBlocks(suite.chainB, 2) + s.coordinator.CommitNBlocks(s.chainA, 2) + s.coordinator.CommitNBlocks(s.chainB, 2) } func TestKeeperTestSuite(t *testing.T) { @@ -42,7 +42,7 @@ func TestKeeperTestSuite(t *testing.T) { // Test ibckeeper.NewKeeper used to initialize IBCKeeper when creating an app instance. // It verifies if ibckeeper.NewKeeper panic when any of the keepers passed in is empty. -func (suite *KeeperTestSuite) TestNewKeeper() { +func (s *KeeperTestSuite) TestNewKeeper() { var ( upgradeKeeper clienttypes.UpgradeKeeper newIBCKeeperFn func() @@ -74,9 +74,8 @@ func (suite *KeeperTestSuite) TestNewKeeper() { malleate: func() { newIBCKeeperFn = func() { ibckeeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(ibcexported.ModuleName), + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), upgradeKeeper, "", // authority ) @@ -87,38 +86,35 @@ func (suite *KeeperTestSuite) TestNewKeeper() { } for _, tc := range testCases { + s.SetupTest() - suite.SetupTest() - - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // set default behaviour newIBCKeeperFn = func() { ibckeeper.NewKeeper( - suite.chainA.GetSimApp().AppCodec(), - runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), - suite.chainA.GetSimApp().GetSubspace(ibcexported.ModuleName), + s.chainA.GetSimApp().AppCodec(), + runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), upgradeKeeper, - suite.chainA.App.GetIBCKeeper().GetAuthority(), + s.chainA.App.GetIBCKeeper().GetAuthority(), ) } - upgradeKeeper = suite.chainA.GetSimApp().UpgradeKeeper + upgradeKeeper = s.chainA.GetSimApp().UpgradeKeeper tc.malleate() if tc.expPanic != "" { - suite.Require().Panics(func() { + s.Require().Panics(func() { newIBCKeeperFn() }, "expected panic but no panic occurred") defer func() { if r := recover(); r != nil { - suite.Require().Contains(r.(error).Error(), tc.expPanic, "unexpected panic message") + s.Require().Contains(r.(error).Error(), tc.expPanic, "unexpected panic message") } }() - } else { - suite.Require().NotPanics(newIBCKeeperFn, "unexpected panic occurred") + s.Require().NotPanics(newIBCKeeperFn, "unexpected panic occurred") } }) } diff --git a/modules/core/keeper/msg_server.go b/modules/core/keeper/msg_server.go index b69f7787d0e..9fa957a679d 100644 --- a/modules/core/keeper/msg_server.go +++ b/modules/core/keeper/msg_server.go @@ -2,6 +2,7 @@ package keeper import ( "context" + "errors" errorsmod "cosmossdk.io/errors" @@ -121,24 +122,6 @@ func (k *Keeper) UpgradeClient(goCtx context.Context, msg *clienttypes.MsgUpgrad return &clienttypes.MsgUpgradeClientResponse{}, nil } -// SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. -// Warning: DEPRECATED -// This handler is redundant as `MsgUpdateClient` is now capable of handling both a Header and a Misbehaviour -func (k *Keeper) SubmitMisbehaviour(goCtx context.Context, msg *clienttypes.MsgSubmitMisbehaviour) (*clienttypes.MsgSubmitMisbehaviourResponse, error) { //nolint:staticcheck // for now, we're using msgsubmitmisbehaviour. - ctx := sdk.UnwrapSDKContext(goCtx) - - misbehaviour, err := clienttypes.UnpackClientMessage(msg.Misbehaviour) - if err != nil { - return nil, err - } - - if err = k.ClientKeeper.UpdateClient(ctx, msg.ClientId, misbehaviour); err != nil { - return nil, err - } - - return &clienttypes.MsgSubmitMisbehaviourResponse{}, nil -} - // RecoverClient defines a rpc handler method for MsgRecoverClient. func (k *Keeper) RecoverClient(goCtx context.Context, msg *clienttypes.MsgRecoverClient) (*clienttypes.MsgRecoverClientResponse, error) { if k.GetAuthority() != msg.Signer { @@ -448,11 +431,10 @@ func (k *Keeper) RecvPacket(goCtx context.Context, msg *channeltypes.MsgRecvPack cacheCtx, writeFn := ctx.CacheContext() channelVersion, err := k.ChannelKeeper.RecvPacket(cacheCtx, msg.Packet, msg.ProofCommitment, msg.ProofHeight) - switch err { - case nil: + switch { + case err == nil: writeFn() - case channeltypes.ErrNoOpMsg: - // no-ops do not need event emission as they will be ignored + case errors.Is(err, channeltypes.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "port-id", msg.Packet.SourcePort, "channel-id", msg.Packet.SourceChannel) return &channeltypes.MsgRecvPacketResponse{Result: channeltypes.NOOP}, nil default: @@ -513,11 +495,10 @@ func (k *Keeper) Timeout(goCtx context.Context, msg *channeltypes.MsgTimeout) (* cacheCtx, writeFn := ctx.CacheContext() channelVersion, err := k.ChannelKeeper.TimeoutPacket(cacheCtx, msg.Packet, msg.ProofUnreceived, msg.ProofHeight, msg.NextSequenceRecv) - switch err { - case nil: + switch { + case err == nil: writeFn() - case channeltypes.ErrNoOpMsg: - // no-ops do not need event emission as they will be ignored + case errors.Is(err, channeltypes.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "port-id", msg.Packet.SourcePort, "channel-id", msg.Packet.SourceChannel) return &channeltypes.MsgTimeoutResponse{Result: channeltypes.NOOP}, nil default: @@ -562,11 +543,10 @@ func (k *Keeper) TimeoutOnClose(goCtx context.Context, msg *channeltypes.MsgTime cacheCtx, writeFn := ctx.CacheContext() channelVersion, err := k.ChannelKeeper.TimeoutOnClose(cacheCtx, msg.Packet, msg.ProofUnreceived, msg.ProofClose, msg.ProofHeight, msg.NextSequenceRecv) - switch err { - case nil: + switch { + case err == nil: writeFn() - case channeltypes.ErrNoOpMsg: - // no-ops do not need event emission as they will be ignored + case errors.Is(err, channeltypes.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "port-id", msg.Packet.SourcePort, "channel-id", msg.Packet.SourceChannel) return &channeltypes.MsgTimeoutOnCloseResponse{Result: channeltypes.NOOP}, nil default: @@ -615,11 +595,10 @@ func (k *Keeper) Acknowledgement(goCtx context.Context, msg *channeltypes.MsgAck cacheCtx, writeFn := ctx.CacheContext() channelVersion, err := k.ChannelKeeper.AcknowledgePacket(cacheCtx, msg.Packet, msg.Acknowledgement, msg.ProofAcked, msg.ProofHeight) - switch err { - case nil: + switch { + case err == nil: writeFn() - case channeltypes.ErrNoOpMsg: - // no-ops do not need event emission as they will be ignored + case errors.Is(err, channeltypes.ErrNoOpMsg): ctx.Logger().Debug("no-op on redundant relay", "port-id", msg.Packet.SourcePort, "channel-id", msg.Packet.SourceChannel) return &channeltypes.MsgAcknowledgementResponse{Result: channeltypes.NOOP}, nil default: diff --git a/modules/core/keeper/msg_server_test.go b/modules/core/keeper/msg_server_test.go index 8af56f9ee3d..e1e6dcdc78d 100644 --- a/modules/core/keeper/msg_server_test.go +++ b/modules/core/keeper/msg_server_test.go @@ -29,7 +29,7 @@ var ( // TestRegisterCounterparty tests that counterpartyInfo is correctly stored // and only if the submittor is the same submittor as prior createClient msg -func (suite *KeeperTestSuite) TestRegisterCounterparty() { +func (s *KeeperTestSuite) TestRegisterCounterparty() { var path *ibctesting.Path testCases := []struct { name string @@ -52,7 +52,7 @@ func (suite *KeeperTestSuite) TestRegisterCounterparty() { "creator is different than expected", func() { path.SetupClients() - path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.SetClientCreator(suite.chainA.GetContext(), path.EndpointA.ClientID, sdk.AccAddress(ibctesting.TestAccAddress)) + path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.SetClientCreator(s.chainA.GetContext(), path.EndpointA.ClientID, sdk.AccAddress(ibctesting.TestAccAddress)) }, ibcerrors.ErrUnauthorized, }, @@ -65,25 +65,25 @@ func (suite *KeeperTestSuite) TestRegisterCounterparty() { }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() merklePrefix := [][]byte{[]byte("ibc"), []byte("channel-7")} - msg := clientv2types.NewMsgRegisterCounterparty(path.EndpointA.ClientID, merklePrefix, path.EndpointB.ClientID, suite.chainA.SenderAccount.GetAddress().String()) - _, err := suite.chainA.App.GetIBCKeeper().RegisterCounterparty(suite.chainA.GetContext(), msg) + msg := clientv2types.NewMsgRegisterCounterparty(path.EndpointA.ClientID, merklePrefix, path.EndpointB.ClientID, s.chainA.SenderAccount.GetAddress().String()) + _, err := s.chainA.App.GetIBCKeeper().RegisterCounterparty(s.chainA.GetContext(), msg) if tc.expError != nil { - suite.Require().Error(err) - suite.Require().True(errors.Is(err, tc.expError)) + s.Require().Error(err) + s.Require().True(errors.Is(err, tc.expError)) } else { - suite.Require().NoError(err) - counterpartyInfo, ok := suite.chainA.App.GetIBCKeeper().ClientV2Keeper.GetClientCounterparty(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(ok) - suite.Require().Equal(counterpartyInfo, clientv2types.NewCounterpartyInfo(merklePrefix, path.EndpointB.ClientID)) - nextSeqSend, ok := suite.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetNextSequenceSend(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(ok) - suite.Require().Equal(nextSeqSend, uint64(1)) + s.Require().NoError(err) + counterpartyInfo, ok := s.chainA.App.GetIBCKeeper().ClientV2Keeper.GetClientCounterparty(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(ok) + s.Require().Equal(counterpartyInfo, clientv2types.NewCounterpartyInfo(merklePrefix, path.EndpointB.ClientID)) + nextSeqSend, ok := s.chainA.App.GetIBCKeeper().ChannelKeeperV2.GetNextSequenceSend(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(ok) + s.Require().Equal(nextSeqSend, uint64(1)) } }) } @@ -94,7 +94,7 @@ func (suite *KeeperTestSuite) TestRegisterCounterparty() { // tests high level properties like ordering and basic sanity checks. More // rigorous testing of 'RecvPacket' can be found in the // 04-channel/keeper/packet_test.go. -func (suite *KeeperTestSuite) TestHandleRecvPacket() { +func (s *KeeperTestSuite) TestHandleRecvPacket() { var ( packet channeltypes.Packet path *ibctesting.Path @@ -113,7 +113,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) }, nil, false, false, false}, @@ -121,7 +121,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) }, nil, false, false, false}, @@ -132,7 +132,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { // attempts to receive packet with sequence 10 without receiving packet with sequence 1 for i := uint64(1); i < 10; i++ { sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) } @@ -141,7 +141,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockFailPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockFailPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) }, nil, true, false, false}, @@ -150,7 +150,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibcmock.MockAsyncPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) }, nil, false, true, false}, @@ -158,7 +158,7 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibcmock.MockAsyncPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibcmock.MockAsyncPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) }, nil, false, true, false}, @@ -169,14 +169,14 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { // attempts to receive packet with sequence 10 without receiving packet with sequence 1 for i := uint64(1); i < 10; i++ { sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) } }, errors.New("packet sequence is out of order"), false, false, false}, {"channel does not exist", func() { // any non-nil value of packet is valid - suite.Require().NotNil(packet) + s.Require().NotNil(packet) }, errors.New("channel not found"), false, false, false}, {"packet not sent", func() { path.Setup() @@ -188,29 +188,29 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, false, false, true}, {"successful no-op: UNORDERED - packet already received (replay)", func() { // mock will panic if application callback is called twice on the same packet path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, false, false, true}, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -225,55 +225,54 @@ func (suite *KeeperTestSuite) TestHandleRecvPacket() { proof, proofHeight = path.EndpointA.QueryProof(packetKey) } - msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress().String()) + msg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, s.chainB.SenderAccount.GetAddress().String()) - ctx := suite.chainB.GetContext() - _, err := suite.chainB.App.GetIBCKeeper().RecvPacket(ctx, msg) + ctx := s.chainB.GetContext() + _, err := s.chainB.App.GetIBCKeeper().RecvPacket(ctx, msg) events := ctx.EventManager().Events() if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // replay should not fail since it will be treated as a no-op - _, err := suite.chainB.App.GetIBCKeeper().RecvPacket(suite.chainB.GetContext(), msg) - suite.Require().NoError(err) + _, err := s.chainB.App.GetIBCKeeper().RecvPacket(s.chainB.GetContext(), msg) + s.Require().NoError(err) if tc.expRevert { // context events should contain error events - suite.Require().Contains(events, internalerrors.ConvertToErrorEvents(sdk.Events{ibcmock.NewMockRecvPacketEvent()})[0]) - suite.Require().NotContains(events, ibcmock.NewMockRecvPacketEvent()) + s.Require().Contains(events, internalerrors.ConvertToErrorEvents(sdk.Events{ibcmock.NewMockRecvPacketEvent()})[0]) + s.Require().NotContains(events, ibcmock.NewMockRecvPacketEvent()) } else { if tc.replay { // context should not contain application events - suite.Require().NotContains(events, ibcmock.NewMockRecvPacketEvent()) - suite.Require().NotContains(events, internalerrors.ConvertToErrorEvents(sdk.Events{ibcmock.NewMockRecvPacketEvent()})[0]) + s.Require().NotContains(events, ibcmock.NewMockRecvPacketEvent()) + s.Require().NotContains(events, internalerrors.ConvertToErrorEvents(sdk.Events{ibcmock.NewMockRecvPacketEvent()})[0]) } else { // context events should contain application events - suite.Require().Contains(events, ibcmock.NewMockRecvPacketEvent()) + s.Require().Contains(events, ibcmock.NewMockRecvPacketEvent()) } } // verify if ack was written - ack, found := suite.chainB.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + ack, found := s.chainB.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(s.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) if tc.async { - suite.Require().Nil(ack) - suite.Require().False(found) - + s.Require().Nil(ack) + s.Require().False(found) } else { - suite.Require().NotNil(ack) - suite.Require().True(found) + s.Require().NotNil(ack) + s.Require().True(found) } } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *KeeperTestSuite) TestUpdateClient() { +func (s *KeeperTestSuite) TestUpdateClient() { var path *ibctesting.Path testCases := []struct { name string @@ -288,30 +287,30 @@ func (suite *KeeperTestSuite) TestUpdateClient() { { "success: update client, with v2 params set to correct relayer", func() { - creator := suite.chainA.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(suite.chainB.SenderAccount.GetAddress().String(), creator.String())) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + creator := s.chainA.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(s.chainB.SenderAccount.GetAddress().String(), creator.String())) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) + s.Require().NoError(err) }, nil, }, { "failure: update client with invalid relayer", func() { - creator := suite.chainA.SenderAccount.GetAddress() - msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(suite.chainB.SenderAccount.GetAddress().String())) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + creator := s.chainA.SenderAccount.GetAddress() + msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, creator.String(), clientv2types.NewConfig(s.chainB.SenderAccount.GetAddress().String())) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) + s.Require().NoError(err) }, ibcerrors.ErrUnauthorized, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() tc.malleate() @@ -319,16 +318,16 @@ func (suite *KeeperTestSuite) TestUpdateClient() { err := path.EndpointA.UpdateClient() if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *KeeperTestSuite) TestRecoverClient() { +func (s *KeeperTestSuite) TestRecoverClient() { var msg *clienttypes.MsgRecoverClient testCases := []struct { @@ -358,46 +357,46 @@ func (suite *KeeperTestSuite) TestRecoverClient() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) + subjectPath := ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() subject := subjectPath.EndpointA.ClientID - subjectClientState := suite.chainA.GetClientState(subject) + subjectClientState := s.chainA.GetClientState(subject) - substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath := ibctesting.NewPath(s.chainA, s.chainB) substitutePath.SetupClients() substitute := substitutePath.EndpointA.ClientID // update substitute twice err := substitutePath.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) err = substitutePath.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) tmClientState, ok := subjectClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.FrozenHeight = tmClientState.LatestHeight - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), subject, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), subject, tmClientState) - msg = clienttypes.NewMsgRecoverClient(suite.chainA.App.GetIBCKeeper().GetAuthority(), subject, substitute) + msg = clienttypes.NewMsgRecoverClient(s.chainA.App.GetIBCKeeper().GetAuthority(), subject, substitute) tc.malleate() - _, err = suite.chainA.App.GetIBCKeeper().RecoverClient(suite.chainA.GetContext(), msg) + _, err = s.chainA.App.GetIBCKeeper().RecoverClient(s.chainA.GetContext(), msg) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // Assert that client status is now Active - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) - suite.Require().NoError(err) - suite.Require().Equal(lightClientModule.Status(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID), exported.Active) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), subjectPath.EndpointA.ClientID) + s.Require().NoError(err) + s.Require().Equal(lightClientModule.Status(s.chainA.GetContext(), subjectPath.EndpointA.ClientID), exported.Active) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } @@ -408,7 +407,7 @@ func (suite *KeeperTestSuite) TestRecoverClient() { // occurs. It test high level properties like ordering and basic sanity // checks. More rigorous testing of 'AcknowledgePacket' // can be found in the 04-channel/keeper/packet_test.go. -func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { +func (s *KeeperTestSuite) TestHandleAcknowledgePacket() { var ( packet channeltypes.Packet path *ibctesting.Path @@ -425,21 +424,21 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, false}, {"success: UNORDERED", func() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, false}, {"success: UNORDERED acknowledge out of order packet", func() { // setup uses an UNORDERED channel @@ -448,11 +447,11 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { // attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment) for i := uint64(1); i < 10; i++ { sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) } }, nil, false}, {"failure: ORDERED acknowledge out of order packet", func() { @@ -462,22 +461,22 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { // attempts to acknowledge ack with sequence 10 without acknowledging ack with sequence 1 (removing packet commitment for i := uint64(1); i < 10; i++ { sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) } }, errors.New("packet sequence is out of order"), false}, {"channel does not exist", func() { // any non-nil value of packet is valid - suite.Require().NotNil(packet) + s.Require().NotNil(packet) }, errors.New("channel not found"), false}, {"packet not received", func() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) }, errors.New("invalid proof"), false}, @@ -486,34 +485,34 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.AcknowledgePacket(packet, ibctesting.MockAcknowledgement) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, true}, {"successful no-op: UNORDERED - packet already acknowledged (replay)", func() { path.Setup() sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) err = path.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) err = path.EndpointA.AcknowledgePacket(packet, ibctesting.MockAcknowledgement) - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, true}, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -526,34 +525,34 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { proof, proofHeight = path.EndpointB.QueryProof(packetKey) } - msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement.Acknowledgement(), proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String()) + msg := channeltypes.NewMsgAcknowledgement(packet, ibcmock.MockAcknowledgement.Acknowledgement(), proof, proofHeight, s.chainA.SenderAccount.GetAddress().String()) - ctx := suite.chainA.GetContext() - _, err := suite.chainA.App.GetIBCKeeper().Acknowledgement(ctx, msg) + ctx := s.chainA.GetContext() + _, err := s.chainA.App.GetIBCKeeper().Acknowledgement(ctx, msg) events := ctx.EventManager().Events() if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // verify packet commitment was deleted on source chain - has := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - suite.Require().False(has) + has := s.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(s.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + s.Require().False(has) // replay should not error as it is treated as a no-op - _, err := suite.chainA.App.GetIBCKeeper().Acknowledgement(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + _, err := s.chainA.App.GetIBCKeeper().Acknowledgement(s.chainA.GetContext(), msg) + s.Require().NoError(err) if tc.replay { // context should not contain application events - suite.Require().NotContains(events, ibcmock.NewMockAckPacketEvent()) + s.Require().NotContains(events, ibcmock.NewMockAckPacketEvent()) } else { // context events should contain application events - suite.Require().Contains(events, ibcmock.NewMockAckPacketEvent()) + s.Require().Contains(events, ibcmock.NewMockAckPacketEvent()) } } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } @@ -564,7 +563,7 @@ func (suite *KeeperTestSuite) TestHandleAcknowledgePacket() { // high level properties like ordering and basic sanity checks. More // rigorous testing of 'TimeoutPacket' and 'TimeoutExecuted' can be found in // the 04-channel/keeper/timeout_test.go. -func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { +func (s *KeeperTestSuite) TestHandleTimeoutPacket() { var ( packet channeltypes.Packet packetKey []byte @@ -581,16 +580,16 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { path.SetChannelOrdered() path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA client to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) @@ -598,16 +597,16 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { {"success: UNORDERED", func() { path.Setup() - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) - timeoutTimestamp := uint64(suite.chainB.GetContext().BlockTime().UnixNano()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) + timeoutTimestamp := uint64(s.chainB.GetContext().BlockTime().UnixNano()) // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, timeoutTimestamp, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA client to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, timeoutTimestamp) packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) @@ -619,17 +618,17 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { // attempts to timeout the last packet sent without timing out the first packet // packet sequences begin at 1 for i := uint64(1); i < maxSequence; i++ { - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) } err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) }, nil, false}, @@ -640,23 +639,23 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { // attempts to timeout the last packet sent without timing out the first packet // packet sequences begin at 1 for i := uint64(1); i < maxSequence; i++ { - timeoutHeight := clienttypes.GetSelfHeight(suite.chainB.GetContext()) + timeoutHeight := clienttypes.GetSelfHeight(s.chainB.GetContext()) // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) } err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) }, nil, false}, {"channel does not exist", func() { // any non-nil value of packet is valid - suite.Require().NotNil(packet) + s.Require().NotNil(packet) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) }, errors.New("channel not found"), false}, @@ -668,9 +667,9 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() @@ -682,36 +681,35 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { proof, proofHeight = path.EndpointB.QueryProof(packetKey) } - msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, suite.chainA.SenderAccount.GetAddress().String()) + msg := channeltypes.NewMsgTimeout(packet, 1, proof, proofHeight, s.chainA.SenderAccount.GetAddress().String()) - ctx := suite.chainA.GetContext() - _, err := suite.chainA.App.GetIBCKeeper().Timeout(ctx, msg) + ctx := s.chainA.GetContext() + _, err := s.chainA.App.GetIBCKeeper().Timeout(ctx, msg) events := ctx.EventManager().Events() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // replay should not return an error as it is treated as a no-op - _, err := suite.chainA.App.GetIBCKeeper().Timeout(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + _, err := s.chainA.App.GetIBCKeeper().Timeout(s.chainA.GetContext(), msg) + s.Require().NoError(err) // verify packet commitment was deleted on source chain - has := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - suite.Require().False(has) + has := s.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(s.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + s.Require().False(has) if tc.noop { // context should not contain application events - suite.Require().NotContains(events, ibcmock.NewMockTimeoutPacketEvent()) + s.Require().NotContains(events, ibcmock.NewMockTimeoutPacketEvent()) } else { // context should contain application events - suite.Require().Contains(events, ibcmock.NewMockTimeoutPacketEvent()) + s.Require().Contains(events, ibcmock.NewMockTimeoutPacketEvent()) } - } else { - suite.Require().Error(err) + s.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expErr.Error()) + s.Require().Contains(err.Error(), tc.expErr.Error()) } }) } @@ -722,7 +720,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutPacket() { // commitment occurs. It tests high level properties like ordering and basic // sanity checks. More rigorous testing of 'TimeoutOnClose' and // 'TimeoutExecuted' can be found in the 04-channel/keeper/timeout_test.go. -func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { +func (s *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { var ( packet channeltypes.Packet packetKey []byte @@ -740,11 +738,11 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA client to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) @@ -757,11 +755,11 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA client to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) @@ -778,13 +776,13 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { for i := uint64(1); i < maxSequence; i++ { // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) } err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) @@ -800,13 +798,13 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { for i := uint64(1); i < maxSequence; i++ { // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) } err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) @@ -815,7 +813,7 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { }, nil}, {"channel does not exist", func() { // any non-nil value of packet is valid - suite.Require().NotNil(packet) + s.Require().NotNil(packet) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) }, errors.New("channel not found")}, @@ -833,11 +831,11 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { // create packet commitment sequence, err := path.EndpointA.SendPacket(timeoutHeight, 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // need to update chainA client to prove missing ack err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) packet = channeltypes.NewPacket(ibctesting.MockPacketData, sequence, path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, timeoutHeight, 0) packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) @@ -845,41 +843,40 @@ func (suite *KeeperTestSuite) TestHandleTimeoutOnClosePacket() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + path = ibctesting.NewPath(s.chainA, s.chainB) tc.malleate() - proof, proofHeight := suite.chainB.QueryProof(packetKey) + proof, proofHeight := s.chainB.QueryProof(packetKey) channelKey := host.ChannelKey(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID) - closedProof, _ := suite.chainB.QueryProof(channelKey) + closedProof, _ := s.chainB.QueryProof(channelKey) - msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, closedProof, proofHeight, suite.chainA.SenderAccount.GetAddress().String()) + msg := channeltypes.NewMsgTimeoutOnClose(packet, 1, proof, closedProof, proofHeight, s.chainA.SenderAccount.GetAddress().String()) - _, err := suite.chainA.App.GetIBCKeeper().TimeoutOnClose(suite.chainA.GetContext(), msg) + _, err := s.chainA.App.GetIBCKeeper().TimeoutOnClose(s.chainA.GetContext(), msg) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // replay should not return an error as it will be treated as a no-op - _, err := suite.chainA.App.GetIBCKeeper().TimeoutOnClose(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + _, err := s.chainA.App.GetIBCKeeper().TimeoutOnClose(s.chainA.GetContext(), msg) + s.Require().NoError(err) // verify packet commitment was deleted on source chain - has := suite.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(suite.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - suite.Require().False(has) - + has := s.chainA.App.GetIBCKeeper().ChannelKeeper.HasPacketCommitment(s.chainA.GetContext(), packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) + s.Require().False(has) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } -func (suite *KeeperTestSuite) TestUpgradeClient() { +func (s *KeeperTestSuite) TestUpgradeClient() { var ( path *ibctesting.Path newChainID string @@ -906,29 +903,29 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { } // last Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) - upgradedClientBz, err := clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) - upgradedConsStateBz, err := clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState) - suite.Require().NoError(err) + upgradedClientBz, err := clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) + upgradedConsStateBz, err := clienttypes.MarshalConsensusState(s.chainA.App.AppCodec(), upgradedConsState) + s.Require().NoError(err) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for testing - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for testing + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for testing + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for testing // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) latestHeight := path.EndpointA.GetClientLatestHeight() - upgradeClientProof, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), latestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), latestHeight.GetRevisionHeight()) + upgradeClientProof, _ := s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), latestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ := s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), latestHeight.GetRevisionHeight()) msg, err = clienttypes.NewMsgUpgradeClient(path.EndpointA.ClientID, upgradedClient, upgradedConsState, - upgradeClientProof, upgradedConsensusStateProof, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + upgradeClientProof, upgradedConsensusStateProof, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, expErr: nil, }, @@ -944,55 +941,54 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { } // last Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) - upgradedClientBz, err := clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) - upgradedConsStateBz, err := clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState) - suite.Require().NoError(err) + upgradedClientBz, err := clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) + upgradedConsStateBz, err := clienttypes.MarshalConsensusState(s.chainA.App.AppCodec(), upgradedConsState) + s.Require().NoError(err) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for testing - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for testing + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for testing + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for testing // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - msg, err = clienttypes.NewMsgUpgradeClient(path.EndpointA.ClientID, upgradedClient, upgradedConsState, nil, nil, suite.chainA.SenderAccount.GetAddress().String()) - suite.Require().NoError(err) + msg, err = clienttypes.NewMsgUpgradeClient(path.EndpointA.ClientID, upgradedClient, upgradedConsState, nil, nil, s.chainA.SenderAccount.GetAddress().String()) + s.Require().NoError(err) }, expErr: errors.New("invalid merkle proof"), }, } for _, tc := range cases { - - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() var err error clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) revisionNumber := clienttypes.ParseChainID(clientState.ChainId) newChainID, err = clienttypes.SetRevisionNumber(clientState.ChainId, revisionNumber+1) - suite.Require().NoError(err) + s.Require().NoError(err) newClientHeight = clienttypes.NewHeight(revisionNumber+1, clientState.LatestHeight.GetRevisionHeight()+1) tc.setup() - ctx := suite.chainA.GetContext() - _, err = suite.chainA.GetSimApp().GetIBCKeeper().UpgradeClient(ctx, msg) + ctx := s.chainA.GetContext() + _, err = s.chainA.GetSimApp().GetIBCKeeper().UpgradeClient(ctx, msg) if tc.expErr == nil { - suite.Require().NoError(err, "upgrade handler failed on valid case: %s", tc.name) - newClient, ok := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(ok) + s.Require().NoError(err, "upgrade handler failed on valid case: %s", tc.name) + newClient, ok := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(ok) newChainSpecifiedClient := newClient.(*ibctm.ClientState).ZeroCustomFields() - suite.Require().Equal(upgradedClient, newChainSpecifiedClient) + s.Require().Equal(upgradedClient, newChainSpecifiedClient) expectedEvents := sdk.Events{ sdk.NewEvent( @@ -1004,16 +1000,16 @@ func (suite *KeeperTestSuite) TestUpgradeClient() { }.ToABCIEvents() expectedEvents = sdk.MarkEventsToIndex(expectedEvents, map[string]struct{}{}) - ibctesting.AssertEvents(&suite.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) + ibctesting.AssertEvents(&s.Suite, expectedEvents, ctx.EventManager().Events().ToABCIEvents()) } else { - suite.Require().Error(err, "upgrade handler passed on invalid case: %s", tc.name) - suite.Require().Contains(err.Error(), tc.expErr.Error()) + s.Require().Error(err, "upgrade handler passed on invalid case: %s", tc.name) + s.Require().Contains(err.Error(), tc.expErr.Error()) } } } // TestIBCSoftwareUpgrade tests the IBCSoftwareUpgrade rpc handler -func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { +func (s *KeeperTestSuite) TestIBCSoftwareUpgrade() { var msg *clienttypes.MsgIBCSoftwareUpgrade testCases := []struct { name string @@ -1028,7 +1024,7 @@ func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { { "failure: invalid authority address", func() { - msg.Signer = suite.chainA.SenderAccount.GetAddress().String() + msg.Signer = s.chainA.SenderAccount.GetAddress().String() }, ibcerrors.ErrUnauthorized, }, @@ -1049,17 +1045,17 @@ func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - validAuthority := suite.chainA.App.GetIBCKeeper().GetAuthority() + validAuthority := s.chainA.App.GetIBCKeeper().GetAuthority() plan := upgradetypes.Plan{ Name: "upgrade IBC clients", Height: 1000, } // update trusting period clientState, ok := path.EndpointB.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) clientState.TrustingPeriod += 100 var err error @@ -1069,35 +1065,35 @@ func (suite *KeeperTestSuite) TestIBCSoftwareUpgrade() { clientState, ) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - _, err = suite.chainA.App.GetIBCKeeper().IBCSoftwareUpgrade(suite.chainA.GetContext(), msg) + _, err = s.chainA.App.GetIBCKeeper().IBCSoftwareUpgrade(s.chainA.GetContext(), msg) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // upgrade plan is stored - storedPlan, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(suite.chainA.GetContext()) - suite.Require().NoError(err) - suite.Require().Equal(plan, storedPlan) + storedPlan, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradePlan(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Equal(plan, storedPlan) // upgraded client state is stored - bz, err := suite.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(suite.chainA.GetContext(), plan.Height) - suite.Require().NoError(err) - upgradedClientState, err := clienttypes.UnmarshalClientState(suite.chainA.App.AppCodec(), bz) - suite.Require().NoError(err) - suite.Require().Equal(clientState.ZeroCustomFields(), upgradedClientState) + bz, err := s.chainA.GetSimApp().UpgradeKeeper.GetUpgradedClient(s.chainA.GetContext(), plan.Height) + s.Require().NoError(err) + upgradedClientState, err := clienttypes.UnmarshalClientState(s.chainA.App.AppCodec(), bz) + s.Require().NoError(err) + s.Require().Equal(clientState.ZeroCustomFields(), upgradedClientState) } else { - suite.Require().True(errors.Is(err, tc.expError)) + s.Require().True(errors.Is(err, tc.expError)) } }) } } // TestUpdateClientParams tests the UpdateClientParams rpc handler -func (suite *KeeperTestSuite) TestUpdateClientParams() { - signer := suite.chainA.App.GetIBCKeeper().GetAuthority() +func (s *KeeperTestSuite) TestUpdateClientParams() { + signer := s.chainA.App.GetIBCKeeper().GetAuthority() testCases := []struct { name string msg *clienttypes.MsgUpdateParams @@ -1131,24 +1127,24 @@ func (suite *KeeperTestSuite) TestUpdateClientParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientParams(suite.chainA.GetContext(), tc.msg) + s.Run(tc.name, func() { + s.SetupTest() + _, err := s.chainA.App.GetIBCKeeper().UpdateClientParams(s.chainA.GetContext(), tc.msg) if tc.expError == nil { - suite.Require().NoError(err) - p := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(tc.msg.Params, p) + s.Require().NoError(err) + p := s.chainA.App.GetIBCKeeper().ClientKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(tc.msg.Params, p) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } // TestUpdateConnectionParams tests the UpdateConnectionParams rpc handler -func (suite *KeeperTestSuite) TestUpdateConnectionParams() { - signer := suite.chainA.App.GetIBCKeeper().GetAuthority() +func (s *KeeperTestSuite) TestUpdateConnectionParams() { + signer := s.chainA.App.GetIBCKeeper().GetAuthority() testCases := []struct { name string msg *connectiontypes.MsgUpdateParams @@ -1182,22 +1178,22 @@ func (suite *KeeperTestSuite) TestUpdateConnectionParams() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - _, err := suite.chainA.App.GetIBCKeeper().UpdateConnectionParams(suite.chainA.GetContext(), tc.msg) + s.Run(tc.name, func() { + s.SetupTest() + _, err := s.chainA.App.GetIBCKeeper().UpdateConnectionParams(s.chainA.GetContext(), tc.msg) if tc.expErr == nil { - suite.Require().NoError(err) - p := suite.chainA.App.GetIBCKeeper().ConnectionKeeper.GetParams(suite.chainA.GetContext()) - suite.Require().Equal(tc.msg.Params, p) + s.Require().NoError(err) + p := s.chainA.App.GetIBCKeeper().ConnectionKeeper.GetParams(s.chainA.GetContext()) + s.Require().Equal(tc.msg.Params, p) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expErr.Error()) } }) } } -func (suite *KeeperTestSuite) TestUpdateClientConfig() { +func (s *KeeperTestSuite) TestUpdateClientConfig() { var ( path *ibctesting.Path signer string @@ -1211,40 +1207,40 @@ func (suite *KeeperTestSuite) TestUpdateClientConfig() { { "success: valid authority and default config", func() { - signer = suite.chainA.App.GetIBCKeeper().GetAuthority() + signer = s.chainA.App.GetIBCKeeper().GetAuthority() }, nil, }, { "success: valid creator and default config", func() { - signer = suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(suite.chainA.GetContext(), path.EndpointA.ClientID).String() + signer = s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(s.chainA.GetContext(), path.EndpointA.ClientID).String() }, nil, }, { "success: valid authority and custom config", func() { - signer = suite.chainA.App.GetIBCKeeper().GetAuthority() - config = clientv2types.NewConfig(suite.chainB.SenderAccount.String(), suite.chainA.SenderAccount.String()) + signer = s.chainA.App.GetIBCKeeper().GetAuthority() + config = clientv2types.NewConfig(s.chainB.SenderAccount.String(), s.chainA.SenderAccount.String()) }, nil, }, { "success: valid creator and default config", func() { - signer = suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(suite.chainA.GetContext(), path.EndpointA.ClientID).String() - config = clientv2types.NewConfig(suite.chainB.SenderAccount.String(), suite.chainA.SenderAccount.String()) + signer = s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(s.chainA.GetContext(), path.EndpointA.ClientID).String() + config = clientv2types.NewConfig(s.chainB.SenderAccount.String(), s.chainA.SenderAccount.String()) }, nil, }, { "success: valid creator and setting config to empty after it has been set", func() { - signer = suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(suite.chainA.GetContext(), path.EndpointA.ClientID).String() - config = clientv2types.NewConfig(suite.chainB.SenderAccount.String(), suite.chainA.SenderAccount.String()) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, signer, config)) - suite.Require().NoError(err) + signer = s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(s.chainA.GetContext(), path.EndpointA.ClientID).String() + config = clientv2types.NewConfig(s.chainB.SenderAccount.String(), s.chainA.SenderAccount.String()) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, signer, config)) + s.Require().NoError(err) config = clientv2types.DefaultConfig() }, nil, @@ -1252,28 +1248,28 @@ func (suite *KeeperTestSuite) TestUpdateClientConfig() { { "success: valid creator and setting config to different config after it has been set", func() { - signer = suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(suite.chainA.GetContext(), path.EndpointA.ClientID).String() - config = clientv2types.NewConfig(suite.chainA.SenderAccount.String()) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, signer, config)) - suite.Require().NoError(err) - config = clientv2types.NewConfig(suite.chainB.SenderAccount.String(), suite.chainA.SenderAccount.String()) + signer = s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(s.chainA.GetContext(), path.EndpointA.ClientID).String() + config = clientv2types.NewConfig(s.chainA.SenderAccount.String()) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, signer, config)) + s.Require().NoError(err) + config = clientv2types.NewConfig(s.chainB.SenderAccount.String(), s.chainA.SenderAccount.String()) }, nil, }, { "failure: invalid signer", func() { - signer = suite.chainB.SenderAccount.GetAddress().String() - config = clientv2types.NewConfig(suite.chainB.SenderAccount.String()) + signer = s.chainB.SenderAccount.GetAddress().String() + config = clientv2types.NewConfig(s.chainB.SenderAccount.String()) }, ibcerrors.ErrUnauthorized, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() config = clientv2types.DefaultConfig() @@ -1281,21 +1277,21 @@ func (suite *KeeperTestSuite) TestUpdateClientConfig() { tc.malleate() msg := clientv2types.NewMsgUpdateClientConfig(path.EndpointA.ClientID, signer, config) - _, err := suite.chainA.App.GetIBCKeeper().UpdateClientConfig(suite.chainA.GetContext(), msg) + _, err := s.chainA.App.GetIBCKeeper().UpdateClientConfig(s.chainA.GetContext(), msg) if tc.expError == nil { - suite.Require().NoError(err) - c := suite.chainA.App.GetIBCKeeper().ClientV2Keeper.GetConfig(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().Equal(config, c) + s.Require().NoError(err) + c := s.chainA.App.GetIBCKeeper().ClientV2Keeper.GetConfig(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().Equal(config, c) } else { - suite.Require().Error(err) - suite.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expError.Error()) } }) } } // TestDeleteClientCreator tests the DeleteClientCreator message handler -func (suite *KeeperTestSuite) TestDeleteClientCreator() { +func (s *KeeperTestSuite) TestDeleteClientCreator() { var ( path *ibctesting.Path clientID string @@ -1310,7 +1306,7 @@ func (suite *KeeperTestSuite) TestDeleteClientCreator() { { "success: valid creator deletes itself", func() { - creator := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(suite.chainA.GetContext(), clientID) + creator := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(s.chainA.GetContext(), clientID) msg = clienttypes.NewMsgDeleteClientCreator(clientID, creator.String()) }, nil, @@ -1318,7 +1314,7 @@ func (suite *KeeperTestSuite) TestDeleteClientCreator() { { "success: valid authority deletes client creator", func() { - msg = clienttypes.NewMsgDeleteClientCreator(clientID, suite.chainA.App.GetIBCKeeper().GetAuthority()) + msg = clienttypes.NewMsgDeleteClientCreator(clientID, s.chainA.App.GetIBCKeeper().GetAuthority()) }, nil, }, @@ -1326,10 +1322,10 @@ func (suite *KeeperTestSuite) TestDeleteClientCreator() { "failure: deleting a client creator that was already deleted", func() { // First delete the creator - authority := suite.chainA.App.GetIBCKeeper().GetAuthority() + authority := s.chainA.App.GetIBCKeeper().GetAuthority() deleteMsg := clienttypes.NewMsgDeleteClientCreator(clientID, authority) - _, err := suite.chainA.App.GetIBCKeeper().DeleteClientCreator(suite.chainA.GetContext(), deleteMsg) - suite.Require().NoError(err) + _, err := s.chainA.App.GetIBCKeeper().DeleteClientCreator(s.chainA.GetContext(), deleteMsg) + s.Require().NoError(err) // Now try to delete it again msg = clienttypes.NewMsgDeleteClientCreator(clientID, authority) @@ -1339,39 +1335,39 @@ func (suite *KeeperTestSuite) TestDeleteClientCreator() { { "failure: unauthorized signer - not creator or authority", func() { - msg = clienttypes.NewMsgDeleteClientCreator(clientID, suite.chainB.SenderAccount.GetAddress().String()) + msg = clienttypes.NewMsgDeleteClientCreator(clientID, s.chainB.SenderAccount.GetAddress().String()) }, ibcerrors.ErrUnauthorized, }, { "failure: client ID does not exist", func() { - msg = clienttypes.NewMsgDeleteClientCreator("nonexistentclient", suite.chainA.App.GetIBCKeeper().GetAuthority()) + msg = clienttypes.NewMsgDeleteClientCreator("nonexistentclient", s.chainA.App.GetIBCKeeper().GetAuthority()) }, ibcerrors.ErrNotFound, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientID = path.EndpointA.ClientID tc.malleate() - _, err := suite.chainA.App.GetIBCKeeper().DeleteClientCreator(suite.chainA.GetContext(), msg) + _, err := s.chainA.App.GetIBCKeeper().DeleteClientCreator(s.chainA.GetContext(), msg) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // Verify creator has been deleted - creator := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(suite.chainA.GetContext(), clientID) - suite.Require().Nil(creator) + creator := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientCreator(s.chainA.GetContext(), clientID) + s.Require().Nil(creator) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expError) } }) } diff --git a/modules/core/migrations/v7/genesis_test.go b/modules/core/migrations/v7/genesis_test.go index 8e634e13a4d..a31e1aea954 100644 --- a/modules/core/migrations/v7/genesis_test.go +++ b/modules/core/migrations/v7/genesis_test.go @@ -36,33 +36,33 @@ func TestMigrationsV7TestSuite(t *testing.T) { } // SetupTest creates a coordinator with 2 test chains. -func (suite *MigrationsV7TestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func (s *MigrationsV7TestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) } // NOTE: this test is mainly copied from 02-client/migrations/v7/genesis_test.go -func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { +func (s *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // create tendermint clients for range 3 { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // update a second time to add more state err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) } // create multiple legacy solo machine clients - solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1) - solomachineMulti := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + solomachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1) + solomachineMulti := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-1", "testing", 4) - clientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + clientGenState := ibcclient.ExportGenesis(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) // manually generate old proto buf definitions and set in genesis // NOTE: we cannot use 'ExportGenesis' for the solo machines since we are @@ -85,8 +85,8 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // set client state protoAny, err := codectypes.NewAnyWithValue(legacyClientState) - suite.Require().NoError(err) - suite.Require().NotNil(protoAny) + s.Require().NoError(err) + s.Require().NotNil(protoAny) clients = append(clients, clienttypes.IdentifiedClientState{ ClientId: sm.ClientID, @@ -94,27 +94,27 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { }) // set in store for ease of determining expected genesis - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), sm.ClientID) - cdc, ok := suite.chainA.App.AppCodec().(*codec.ProtoCodec) - suite.Require().True(ok) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), sm.ClientID) + cdc, ok := s.chainA.App.AppCodec().(*codec.ProtoCodec) + s.Require().True(ok) clientv7.RegisterInterfaces(cdc.InterfaceRegistry()) bz, err := cdc.MarshalInterface(legacyClientState) - suite.Require().NoError(err) + s.Require().NoError(err) clientStore.Set(host.ClientStateKey(), bz) protoAny, err = codectypes.NewAnyWithValue(legacyClientState.ConsensusState) - suite.Require().NoError(err) - suite.Require().NotNil(protoAny) + s.Require().NoError(err) + s.Require().NotNil(protoAny) // obtain marshalled bytes to set in client store bz, err = cdc.MarshalInterface(legacyClientState.ConsensusState) - suite.Require().NoError(err) + s.Require().NoError(err) var consensusStates []clienttypes.ConsensusStateWithHeight // set consensus states in store and genesis - for i := uint64(0); i < 10; i++ { + for i := range uint64(10) { height := clienttypes.NewHeight(1, i) clientStore.Set(host.ConsensusStateKey(height), bz) consensusStates = append(consensusStates, clienttypes.ConsensusStateWithHeight{ @@ -135,12 +135,12 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // migrate store get expected genesis // store migration and genesis migration should produce identical results // NOTE: tendermint clients are not pruned in genesis so the test should not have expired tendermint clients - err := clientv7.MigrateStore(suite.chainA.GetContext(), runtime.NewKVStoreService(suite.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), suite.chainA.App.AppCodec(), suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - suite.Require().NoError(err) - expectedClientGenState := ibcclient.ExportGenesis(suite.chainA.GetContext(), suite.chainA.App.GetIBCKeeper().ClientKeeper) + err := clientv7.MigrateStore(s.chainA.GetContext(), runtime.NewKVStoreService(s.chainA.GetSimApp().GetKey(ibcexported.StoreKey)), s.chainA.App.AppCodec(), s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + s.Require().NoError(err) + expectedClientGenState := ibcclient.ExportGenesis(s.chainA.GetContext(), s.chainA.App.GetIBCKeeper().ClientKeeper) - cdc, ok := suite.chainA.App.AppCodec().(*codec.ProtoCodec) - suite.Require().True(ok) + cdc, ok := s.chainA.App.AppCodec().(*codec.ProtoCodec) + s.Require().True(ok) // NOTE: these lines are added in comparison to 02-client/migrations/v7/genesis_test.go // generate appState with old ibc genesis state @@ -154,15 +154,15 @@ func (suite *MigrationsV7TestSuite) TestMigrateGenesisSolomachine() { // NOTE: genesis time isn't updated since we aren't testing for tendermint consensus state pruning migrated, err := v7.MigrateGenesis(appState, cdc) - suite.Require().NoError(err) + s.Require().NoError(err) expectedAppState := genutiltypes.AppMap{} expectedIBCGenState := types.DefaultGenesisState() expectedIBCGenState.ClientGenesis = expectedClientGenState bz, err := cdc.MarshalJSON(expectedIBCGenState) - suite.Require().NoError(err) + s.Require().NoError(err) expectedAppState[ibcexported.ModuleName] = bz - suite.Require().Equal(expectedAppState, migrated) + s.Require().Equal(expectedAppState, migrated) } diff --git a/modules/core/module.go b/modules/core/module.go index a76a6d70d1c..83c70eda2d1 100644 --- a/modules/core/module.go +++ b/modules/core/module.go @@ -62,8 +62,10 @@ func (AppModule) IsOnePerModuleType() {} // IsAppModule implements the appmodule.AppModule interface. func (AppModule) IsAppModule() {} -// RegisterLegacyAminoCodec does nothing. IBC does not support amino. -func (AppModuleBasic) RegisterLegacyAminoCodec(*codec.LegacyAmino) {} +// RegisterLegacyAminoCodec implements AppModuleBasic interface. +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + clienttypes.RegisterLegacyAminoCodec(cdc) +} // DefaultGenesis returns default genesis state as raw bytes for the ibc // module. @@ -148,26 +150,6 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { channeltypes.RegisterQueryServer(cfg.QueryServer(), channelkeeper.NewQueryServer(am.keeper.ChannelKeeper)) channeltypesv2.RegisterQueryServer(cfg.QueryServer(), channelkeeperv2.NewQueryServer(am.keeper.ChannelKeeperV2)) - clientMigrator := clientkeeper.NewMigrator(am.keeper.ClientKeeper) - if err := cfg.RegisterMigration(exported.ModuleName, 2, clientMigrator.Migrate2to3); err != nil { - panic(err) - } - - connectionMigrator := connectionkeeper.NewMigrator(am.keeper.ConnectionKeeper) - if err := cfg.RegisterMigration(exported.ModuleName, 3, connectionMigrator.Migrate3to4); err != nil { - panic(err) - } - - if err := cfg.RegisterMigration(exported.ModuleName, 4, func(ctx sdk.Context) error { - if err := clientMigrator.MigrateParams(ctx); err != nil { - return err - } - - return connectionMigrator.MigrateParams(ctx) - }); err != nil { - panic(err) - } - // This upgrade used to just add default params, since we have deleted it (in consensus version 8 - ibc-go v10), // we just return directly to increment the ConsensusVersion as expected if err := cfg.RegisterMigration(exported.ModuleName, 5, func(_ sdk.Context) error { @@ -176,6 +158,7 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { panic(err) } + clientMigrator := clientkeeper.NewMigrator(am.keeper.ClientKeeper) if err := cfg.RegisterMigration(exported.ModuleName, 6, clientMigrator.MigrateToStatelessLocalhost); err != nil { panic(err) } @@ -192,7 +175,7 @@ func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, bz json.Ra var gs types.GenesisState err := cdc.UnmarshalJSON(bz, &gs) if err != nil { - panic(fmt.Errorf("failed to unmarshal %s genesis state: %s", exported.ModuleName, err)) + panic(fmt.Errorf("failed to unmarshal %s genesis state: %w", exported.ModuleName, err)) } InitGenesis(ctx, *am.keeper, &gs) } diff --git a/modules/core/simulation/proposals_test.go b/modules/core/simulation/proposals_test.go index 4964643ac4b..8e61ca7d704 100644 --- a/modules/core/simulation/proposals_test.go +++ b/modules/core/simulation/proposals_test.go @@ -29,7 +29,7 @@ func TestProposalMsgs(t *testing.T) { // execute ProposalMsgs function weightedProposalMsgs := simulation.ProposalMsgs() - require.Equal(t, 4, len(weightedProposalMsgs)) + require.Len(t, weightedProposalMsgs, 4) // tests w0 interface: w0 := weightedProposalMsgs[0] @@ -41,7 +41,7 @@ func TestProposalMsgs(t *testing.T) { require.True(t, ok) require.Equal(t, sdk.AccAddress(address.Module("gov")).String(), msgUpdateParams.Signer) - require.EqualValues(t, []string{"06-solomachine", "07-tendermint"}, msgUpdateParams.Params.AllowedClients) + require.Equal(t, []string{"06-solomachine", "07-tendermint"}, msgUpdateParams.Params.AllowedClients) // tests w1 interface: w1 := weightedProposalMsgs[1] @@ -53,7 +53,7 @@ func TestProposalMsgs(t *testing.T) { require.True(t, ok) require.Equal(t, sdk.AccAddress(address.Module("gov")).String(), msgUpdateParams.Signer) - require.EqualValues(t, uint64(100), msgUpdateConnectionParams.Params.MaxExpectedTimePerBlock) + require.Equal(t, uint64(100), msgUpdateConnectionParams.Params.MaxExpectedTimePerBlock) // tests w2 interface: w2 := weightedProposalMsgs[2] @@ -65,7 +65,7 @@ func TestProposalMsgs(t *testing.T) { require.True(t, ok) require.Equal(t, sdk.AccAddress(address.Module("gov")).String(), msgRecoverClient.Signer) - require.EqualValues(t, "07-tendermint-1", msgRecoverClient.SubstituteClientId) + require.Equal(t, "07-tendermint-1", msgRecoverClient.SubstituteClientId) // tests w3 interface: w3 := weightedProposalMsgs[3] @@ -79,5 +79,5 @@ func TestProposalMsgs(t *testing.T) { require.Equal(t, sdk.AccAddress(address.Module("gov")).String(), msgIBCSoftwareUpgrade.Signer) clientState, err := clienttypes.UnpackClientState(msgIBCSoftwareUpgrade.UpgradedClientState) require.NoError(t, err) - require.EqualValues(t, time.Hour*24*7*2, clientState.(*ibctm.ClientState).UnbondingPeriod) + require.Equal(t, time.Hour*24*7*2, clientState.(*ibctm.ClientState).UnbondingPeriod) } diff --git a/modules/core/types/expected_interfaces.go b/modules/core/types/expected_interfaces.go deleted file mode 100644 index c6aca313696..00000000000 --- a/modules/core/types/expected_interfaces.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" -) - -// ParamSubspace defines the expected Subspace interface for module parameters. -type ParamSubspace interface { - GetParamSet(ctx sdk.Context, ps paramtypes.ParamSet) -} diff --git a/modules/light-clients/06-solomachine/client_state_test.go b/modules/light-clients/06-solomachine/client_state_test.go index 76215c6b7de..4d53b7698ff 100644 --- a/modules/light-clients/06-solomachine/client_state_test.go +++ b/modules/light-clients/06-solomachine/client_state_test.go @@ -15,10 +15,9 @@ const ( testPortID = "testportid" ) -func (suite *SoloMachineTestSuite) TestClientStateValidate() { +func (s *SoloMachineTestSuite) TestClientStateValidate() { // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string clientState *solomachine.ClientState @@ -57,22 +56,22 @@ func (suite *SoloMachineTestSuite) TestClientStateValidate() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.clientState.Validate() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *SoloMachineTestSuite) TestSignBytesMarshalling() { - sm := suite.solomachine +func (s *SoloMachineTestSuite) TestSignBytesMarshalling() { + sm := s.solomachine path := []byte("solomachine") signBytesNilData := solomachine.SignBytes{ Sequence: sm.GetHeight().GetRevisionHeight(), @@ -90,11 +89,11 @@ func (suite *SoloMachineTestSuite) TestSignBytesMarshalling() { Data: []byte{}, } - signBzNil, err := suite.chainA.Codec.Marshal(&signBytesNilData) - suite.Require().NoError(err) + signBzNil, err := s.chainA.Codec.Marshal(&signBytesNilData) + s.Require().NoError(err) - signBzEmptyArray, err := suite.chainA.Codec.Marshal(&signBytesEmptyArray) - suite.Require().NoError(err) + signBzEmptyArray, err := s.chainA.Codec.Marshal(&signBytesEmptyArray) + s.Require().NoError(err) - suite.Require().True(bytes.Equal(signBzNil, signBzEmptyArray)) + s.Require().True(bytes.Equal(signBzNil, signBzEmptyArray)) } diff --git a/modules/light-clients/06-solomachine/consensus_state_test.go b/modules/light-clients/06-solomachine/consensus_state_test.go index 64710efe1c5..3b476f9fbf9 100644 --- a/modules/light-clients/06-solomachine/consensus_state_test.go +++ b/modules/light-clients/06-solomachine/consensus_state_test.go @@ -8,17 +8,16 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *SoloMachineTestSuite) TestConsensusState() { - consensusState := suite.solomachine.ConsensusState() +func (s *SoloMachineTestSuite) TestConsensusState() { + consensusState := s.solomachine.ConsensusState() - suite.Require().Equal(exported.Solomachine, consensusState.ClientType()) - suite.Require().Equal(suite.solomachine.Time, consensusState.GetTimestamp()) + s.Require().Equal(exported.Solomachine, consensusState.ClientType()) + s.Require().Equal(s.solomachine.Time, consensusState.GetTimestamp()) } -func (suite *SoloMachineTestSuite) TestConsensusStateValidateBasic() { +func (s *SoloMachineTestSuite) TestConsensusStateValidateBasic() { // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string consensusState *solomachine.ConsensusState @@ -59,14 +58,14 @@ func (suite *SoloMachineTestSuite) TestConsensusStateValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.consensusState.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/06-solomachine/header_test.go b/modules/light-clients/06-solomachine/header_test.go index 5eb56f9432d..5d6778046c5 100644 --- a/modules/light-clients/06-solomachine/header_test.go +++ b/modules/light-clients/06-solomachine/header_test.go @@ -8,10 +8,9 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *SoloMachineTestSuite) TestHeaderValidateBasic() { +func (s *SoloMachineTestSuite) TestHeaderValidateBasic() { // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { header := sm.CreateHeader(sm.Diversifier) cases := []struct { @@ -66,17 +65,17 @@ func (suite *SoloMachineTestSuite) TestHeaderValidateBasic() { }, } - suite.Require().Equal(exported.Solomachine, header.ClientType()) + s.Require().Equal(exported.Solomachine, header.ClientType()) for _, tc := range cases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.header.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/06-solomachine/light_client_module_test.go b/modules/light-clients/06-solomachine/light_client_module_test.go index f5ee73865bd..8ab5d0fa66f 100644 --- a/modules/light-clients/06-solomachine/light_client_module_test.go +++ b/modules/light-clients/06-solomachine/light_client_module_test.go @@ -24,7 +24,7 @@ const ( wasmClientID = "08-wasm-0" ) -func (suite *SoloMachineTestSuite) TestStatus() { +func (s *SoloMachineTestSuite) TestStatus() { var ( clientState *solomachine.ClientState clientID string @@ -45,7 +45,7 @@ func (suite *SoloMachineTestSuite) TestStatus() { func() { clientState = solomachine.NewClientState(0, &solomachine.ConsensusState{}) clientState.IsFrozen = true - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, exported.Frozen, }, @@ -59,23 +59,23 @@ func (suite *SoloMachineTestSuite) TestStatus() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - clientID = suite.solomachine.ClientID + s.Run(tc.name, func() { + clientID = s.solomachine.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, suite.solomachine.ClientState()) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, s.solomachine.ClientState()) tc.malleate() - status := lightClientModule.Status(suite.chainA.GetContext(), clientID) - suite.Require().Equal(tc.expStatus, status) + status := lightClientModule.Status(s.chainA.GetContext(), clientID) + s.Require().Equal(tc.expStatus, status) }) } } -func (suite *SoloMachineTestSuite) TestGetTimestampAtHeight() { +func (s *SoloMachineTestSuite) TestGetTimestampAtHeight() { var ( clientID string height exported.Height @@ -90,7 +90,7 @@ func (suite *SoloMachineTestSuite) TestGetTimestampAtHeight() { { "success: get timestamp at height exists", func() {}, - suite.solomachine.ClientState().ConsensusState.Timestamp, + s.solomachine.ClientState().ConsensusState.Timestamp, nil, }, { @@ -99,7 +99,7 @@ func (suite *SoloMachineTestSuite) TestGetTimestampAtHeight() { height = clienttypes.ZeroHeight() }, // Timestamp should be the same. - suite.solomachine.ClientState().ConsensusState.Timestamp, + s.solomachine.ClientState().ConsensusState.Timestamp, nil, }, { @@ -113,29 +113,29 @@ func (suite *SoloMachineTestSuite) TestGetTimestampAtHeight() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - clientID = suite.solomachine.ClientID - clientState := suite.solomachine.ClientState() - height = clienttypes.NewHeight(0, suite.solomachine.ClientState().Sequence) + s.Run(tc.name, func() { + clientID = s.solomachine.ClientID + clientState := s.solomachine.ClientState() + height = clienttypes.NewHeight(0, s.solomachine.ClientState().Sequence) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) tc.malleate() - ts, err := lightClientModule.TimestampAtHeight(suite.chainA.GetContext(), clientID, height) + ts, err := lightClientModule.TimestampAtHeight(s.chainA.GetContext(), clientID, height) - suite.Require().Equal(tc.expValue, ts) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Equal(tc.expValue, ts) + s.Require().ErrorIs(err, tc.expErr) }) } } -func (suite *SoloMachineTestSuite) TestInitialize() { +func (s *SoloMachineTestSuite) TestInitialize() { // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { malleatedConsensus := sm.ClientState().ConsensusState malleatedConsensus.Timestamp += 10 @@ -184,32 +184,32 @@ func (suite *SoloMachineTestSuite) TestInitialize() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientID := sm.ClientID - clientStateBz := suite.chainA.Codec.MustMarshal(tc.clientState) - consStateBz := suite.chainA.Codec.MustMarshal(tc.consState) + clientStateBz := s.chainA.Codec.MustMarshal(tc.clientState) + consStateBz := s.chainA.Codec.MustMarshal(tc.consState) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - err = lightClientModule.Initialize(suite.chainA.GetContext(), clientID, clientStateBz, consStateBz) - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) + err = lightClientModule.Initialize(s.chainA.GetContext(), clientID, clientStateBz, consStateBz) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().True(store.Has(host.ClientStateKey())) + s.Require().NoError(err) + s.Require().True(store.Has(host.ClientStateKey())) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) - suite.Require().False(store.Has(host.ClientStateKey())) + s.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().False(store.Has(host.ClientStateKey())) } }) } } } -func (suite *SoloMachineTestSuite) TestVerifyMembership() { +func (s *SoloMachineTestSuite) TestVerifyMembership() { var ( clientState *solomachine.ClientState path exported.Path @@ -221,8 +221,7 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { ) // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -237,14 +236,14 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { "success: client state verification", func() { clientState = sm.ClientState() - clientStateBz, err := suite.chainA.Codec.MarshalInterface(clientState) - suite.Require().NoError(err) + clientStateBz, err := s.chainA.Codec.MarshalInterface(clientState) + s.Require().NoError(err) path = sm.GetClientStatePath(counterpartyClientIdentifier) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.GetHeight().GetRevisionHeight(), Timestamp: sm.Time, @@ -253,8 +252,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: clientStateBz, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -263,8 +262,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -273,14 +272,14 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { func() { clientState = sm.ClientState() consensusState := clientState.ConsensusState - consensusStateBz, err := suite.chainA.Codec.MarshalInterface(consensusState) - suite.Require().NoError(err) + consensusStateBz, err := s.chainA.Codec.MarshalInterface(consensusState) + s.Require().NoError(err) path = sm.GetConsensusStatePath(counterpartyClientIdentifier, clienttypes.NewHeight(0, 1)) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -289,8 +288,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: consensusStateBz, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -299,8 +298,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -309,17 +308,17 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { func() { testingPath.SetupConnections() - connectionEnd, found := suite.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetConnection(suite.chainA.GetContext(), ibctesting.FirstConnectionID) - suite.Require().True(found) + connectionEnd, found := s.chainA.GetSimApp().IBCKeeper.ConnectionKeeper.GetConnection(s.chainA.GetContext(), ibctesting.FirstConnectionID) + s.Require().True(found) - connectionEndBz, err := suite.chainA.Codec.Marshal(&connectionEnd) - suite.Require().NoError(err) + connectionEndBz, err := s.chainA.Codec.Marshal(&connectionEnd) + s.Require().NoError(err) path = sm.GetConnectionStatePath(ibctesting.FirstConnectionID) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -328,8 +327,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: connectionEndBz, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -338,8 +337,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -347,19 +346,19 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { "success: channel state verification", func() { testingPath.SetupConnections() - suite.coordinator.CreateMockChannels(testingPath) + s.coordinator.CreateMockChannels(testingPath) - channelEnd, found := suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetChannel(suite.chainA.GetContext(), ibctesting.MockPort, testingPath.EndpointA.ChannelID) - suite.Require().True(found) + channelEnd, found := s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetChannel(s.chainA.GetContext(), ibctesting.MockPort, testingPath.EndpointA.ChannelID) + s.Require().True(found) - channelEndBz, err := suite.chainA.Codec.Marshal(&channelEnd) - suite.Require().NoError(err) + channelEndBz, err := s.chainA.Codec.Marshal(&channelEnd) + s.Require().NoError(err) path = sm.GetChannelStatePath(ibctesting.MockPort, ibctesting.FirstChannelID) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -368,8 +367,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: channelEndBz, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -378,8 +377,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -387,16 +386,16 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { "success: next sequence recv verification", func() { testingPath.SetupConnections() - suite.coordinator.CreateMockChannels(testingPath) + s.coordinator.CreateMockChannels(testingPath) - nextSeqRecv, found := suite.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetNextSequenceRecv(suite.chainA.GetContext(), ibctesting.MockPort, testingPath.EndpointA.ChannelID) - suite.Require().True(found) + nextSeqRecv, found := s.chainA.GetSimApp().IBCKeeper.ChannelKeeper.GetNextSequenceRecv(s.chainA.GetContext(), ibctesting.MockPort, testingPath.EndpointA.ChannelID) + s.Require().True(found) path = sm.GetNextSequenceRecvPath(ibctesting.MockPort, ibctesting.FirstChannelID) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -405,8 +404,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: sdk.Uint64ToBigEndian(nextSeqRecv), } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -415,8 +414,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -437,9 +436,9 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { commitmentBz := channeltypes.CommitPacket(packet) path = sm.GetPacketCommitmentPath(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -448,8 +447,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: commitmentBz, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -458,8 +457,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -468,9 +467,9 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { func() { path = sm.GetPacketAcknowledgementPath(ibctesting.MockPort, ibctesting.FirstChannelID, 1) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -479,8 +478,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: ibctesting.MockAcknowledgement, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -489,8 +488,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -499,9 +498,9 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { func() { path = sm.GetPacketReceiptPath(ibctesting.MockPort, ibctesting.FirstChannelID, 1) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.Sequence, Timestamp: sm.Time, @@ -510,8 +509,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: []byte{byte(1)}, // packet receipt is stored as a single byte } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -520,8 +519,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -556,9 +555,9 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { } clientState = solomachine.NewClientState(sm.Sequence, consensusState) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, - fmt.Errorf("the consensus state timestamp is greater than the signature timestamp (11 >= 10): %s", solomachine.ErrInvalidProof), + fmt.Errorf("the consensus state timestamp is greater than the signature timestamp (11 >= 10): %w", solomachine.ErrInvalidProof), }, { "failure: signature data is nil", @@ -568,18 +567,18 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, - fmt.Errorf("signature data cannot be empty: %s", solomachine.ErrInvalidProof), + fmt.Errorf("signature data cannot be empty: %w", solomachine.ErrInvalidProof), }, { "failure: consensus state public key is nil", func() { clientState.ConsensusState.PublicKey = nil - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, - fmt.Errorf("consensus state PublicKey cannot be nil: %s", clienttypes.ErrInvalidConsensus), + fmt.Errorf("consensus state PublicKey cannot be nil: %w", clienttypes.ErrInvalidConsensus), }, { "failure: malformed signature data fails to unmarshal", @@ -589,8 +588,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, errors.New("failed to unmarshal proof into type"), }, @@ -599,7 +598,7 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { func() { proof = nil }, - fmt.Errorf("proof cannot be empty: %s", solomachine.ErrInvalidProof), + fmt.Errorf("proof cannot be empty: %w", solomachine.ErrInvalidProof), }, { "failure: proof verification failed", @@ -613,23 +612,23 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { func() { path = commitmenttypesv2.MerklePath{} }, - fmt.Errorf("path must be of length 2: []: %s", host.ErrInvalidPath), + fmt.Errorf("path must be of length 2: []: %w", host.ErrInvalidPath), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - testingPath = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + testingPath = ibctesting.NewPath(s.chainA, s.chainB) clientID = sm.ClientID clientState = sm.ClientState() path = commitmenttypesv2.NewMerklePath([]byte("ibc"), []byte("solomachine")) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.GetHeight().GetRevisionHeight(), Timestamp: sm.Time, @@ -638,8 +637,8 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Data: []byte("solomachine"), } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -648,14 +647,14 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) // Set the client state in the store for light client call to find. - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) tc.malleate() @@ -666,30 +665,30 @@ func (suite *SoloMachineTestSuite) TestVerifyMembership() { // Verify the membership proof err = lightClientModule.VerifyMembership( - suite.chainA.GetContext(), clientID, clienttypes.ZeroHeight(), + s.chainA.GetContext(), clientID, clienttypes.ZeroHeight(), 0, 0, proof, path, signBytes.Data, ) if tc.expErr == nil { // Grab fresh client state after updates. - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), clientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), clientID) + s.Require().True(found) clientState, ok = cs.(*solomachine.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - suite.Require().NoError(err) + s.Require().NoError(err) // clientState.Sequence is the most recent view of state. - suite.Require().Equal(expSeq, clientState.Sequence) + s.Require().Equal(expSeq, clientState.Sequence) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { +func (s *SoloMachineTestSuite) TestVerifyNonMembership() { var ( clientState *solomachine.ClientState path exported.Path @@ -700,7 +699,7 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { ) // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -716,9 +715,9 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { func() { path = sm.GetPacketReceiptPath(ibctesting.MockPort, ibctesting.FirstChannelID, 1) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.GetHeight().GetRevisionHeight(), Timestamp: sm.Time, @@ -727,8 +726,8 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Data: nil, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -737,8 +736,8 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, nil, }, @@ -773,9 +772,9 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { } clientState = solomachine.NewClientState(sm.Sequence, consensusState) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, - fmt.Errorf("the consensus state timestamp is greater than the signature timestamp (11 >= 10): %s", solomachine.ErrInvalidProof), + fmt.Errorf("the consensus state timestamp is greater than the signature timestamp (11 >= 10): %w", solomachine.ErrInvalidProof), }, { "failure: signature data is nil", @@ -785,18 +784,18 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, - fmt.Errorf("signature data cannot be empty: %s", solomachine.ErrInvalidProof), + fmt.Errorf("signature data cannot be empty: %w", solomachine.ErrInvalidProof), }, { "failure: consensus state public key is nil", func() { clientState.ConsensusState.PublicKey = nil - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, - fmt.Errorf("consensus state PublicKey cannot be nil: %s", clienttypes.ErrInvalidConsensus), + fmt.Errorf("consensus state PublicKey cannot be nil: %w", clienttypes.ErrInvalidConsensus), }, { "failure: malformed signature data fails to unmarshal", @@ -806,8 +805,8 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, errors.New("failed to unmarshal proof into type"), }, @@ -816,15 +815,15 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { func() { proof = nil }, - fmt.Errorf("proof cannot be empty: %s", solomachine.ErrInvalidProof), + fmt.Errorf("proof cannot be empty: %w", solomachine.ErrInvalidProof), }, { "failure: proof verification failed", func() { signBytes.Data = []byte("invalid non-membership data value") - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -833,25 +832,25 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) }, solomachine.ErrSignatureVerificationFailed, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientState = sm.ClientState() clientID = sm.ClientID path = commitmenttypesv2.NewMerklePath([]byte("ibc"), []byte("solomachine")) merklePath, ok := path.(commitmenttypesv2.MerklePath) - suite.Require().True(ok) + s.Require().True(ok) key, err := merklePath.GetKey(1) // in a multistore context: index 0 is the key for the IBC store in the multistore, index 1 is the key in the IBC store - suite.Require().NoError(err) + s.Require().NoError(err) signBytes = solomachine.SignBytes{ Sequence: sm.GetHeight().GetRevisionHeight(), Timestamp: sm.Time, @@ -860,8 +859,8 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Data: nil, } - signBz, err := suite.chainA.Codec.Marshal(&signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(&signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) @@ -870,14 +869,14 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { Timestamp: sm.Time, } - proof, err = suite.chainA.Codec.Marshal(signatureDoc) - suite.Require().NoError(err) + proof, err = s.chainA.Codec.Marshal(signatureDoc) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) // Set the client state in the store for light client call to find. - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) tc.malleate() @@ -888,29 +887,29 @@ func (suite *SoloMachineTestSuite) TestVerifyNonMembership() { // Verify the membership proof err = lightClientModule.VerifyNonMembership( - suite.chainA.GetContext(), clientID, clienttypes.ZeroHeight(), + s.chainA.GetContext(), clientID, clienttypes.ZeroHeight(), 0, 0, proof, path, ) if tc.expErr == nil { // Grab fresh client state after updates. - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), clientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), clientID) + s.Require().True(found) clientState, ok = cs.(*solomachine.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - suite.Require().NoError(err) - suite.Require().Equal(expSeq, clientState.Sequence) + s.Require().NoError(err) + s.Require().Equal(expSeq, clientState.Sequence) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *SoloMachineTestSuite) TestRecoverClient() { +func (s *SoloMachineTestSuite) TestRecoverClient() { var ( subjectClientID, substituteClientID string subjectClientState, substituteClientState *solomachine.ClientState @@ -958,56 +957,56 @@ func (suite *SoloMachineTestSuite) TestRecoverClient() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() - subjectClientID = suite.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(ctx, exported.Solomachine) - subject := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, substituteClientID, "testing", 1) + subjectClientID = s.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(ctx, exported.Solomachine) + subject := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, substituteClientID, "testing", 1) subjectClientState = subject.ClientState() - substituteClientID = suite.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(ctx, exported.Solomachine) - substitute := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, substituteClientID, "testing", 1) + substituteClientID = s.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(ctx, exported.Solomachine) + substitute := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, substituteClientID, "testing", 1) substitute.Sequence++ // increase sequence so that latest height of substitute is > than subject's latest height substituteClientState = substitute.ClientState() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, substituteClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, substituteClientID) clientStore.Get(host.ClientStateKey()) - bz := clienttypes.MustMarshalClientState(suite.chainA.Codec, substituteClientState) + bz := clienttypes.MustMarshalClientState(s.chainA.Codec, substituteClientState) clientStore.Set(host.ClientStateKey(), bz) subjectClientState.IsFrozen = true - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(ctx, subjectClientID, subjectClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(ctx, subjectClientID, subjectClientState) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), subjectClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), subjectClientID) + s.Require().NoError(err) tc.malleate() err = lightClientModule.RecoverClient(ctx, subjectClientID, substituteClientID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // assert that status of subject client is now Active - clientStore = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, subjectClientID) + clientStore = s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, subjectClientID) bz = clientStore.Get(host.ClientStateKey()) - smClientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.Codec, bz).(*solomachine.ClientState) - suite.Require().True(ok) + smClientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.Codec, bz).(*solomachine.ClientState) + s.Require().True(ok) - suite.Require().Equal(substituteClientState.ConsensusState, smClientState.ConsensusState) - suite.Require().Equal(substituteClientState.Sequence, smClientState.Sequence) - suite.Require().Equal(exported.Active, lightClientModule.Status(ctx, subjectClientID)) + s.Require().Equal(substituteClientState.ConsensusState, smClientState.ConsensusState) + s.Require().Equal(substituteClientState.Sequence, smClientState.Sequence) + s.Require().Equal(exported.Active, lightClientModule.Status(ctx, subjectClientID)) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *SoloMachineTestSuite) TestUpdateState() { +func (s *SoloMachineTestSuite) TestUpdateState() { var ( clientState *solomachine.ClientState clientMsg exported.ClientMessage @@ -1015,8 +1014,7 @@ func (suite *SoloMachineTestSuite) TestUpdateState() { ) // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -1032,7 +1030,7 @@ func (suite *SoloMachineTestSuite) TestUpdateState() { func() { clientState = sm.ClientState() clientMsg = sm.CreateMisbehaviour() - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, nil, }, @@ -1041,69 +1039,69 @@ func (suite *SoloMachineTestSuite) TestUpdateState() { func() { clientID = unusedSmClientID }, - fmt.Errorf("%s: %s", unusedSmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedSmClientID, clienttypes.ErrClientNotFound), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientID = sm.ClientID clientState = sm.ClientState() clientMsg = sm.CreateHeader(sm.Diversifier) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) tc.malleate() // setup test - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) var consensusHeights []exported.Height updateStateFunc := func() { - consensusHeights = lightClientModule.UpdateState(suite.chainA.GetContext(), clientID, clientMsg) + consensusHeights = lightClientModule.UpdateState(s.chainA.GetContext(), clientID, clientMsg) } if tc.expPanic == nil { updateStateFunc() clientStateBz := store.Get(host.ClientStateKey()) - suite.Require().NotEmpty(clientStateBz) + s.Require().NotEmpty(clientStateBz) - newClientState := clienttypes.MustUnmarshalClientState(suite.chainA.Codec, clientStateBz) + newClientState := clienttypes.MustUnmarshalClientState(s.chainA.Codec, clientStateBz) if len(consensusHeights) == 0 { - suite.Require().Equal(clientState, newClientState) + s.Require().Equal(clientState, newClientState) return } - suite.Require().Len(consensusHeights, 1) - suite.Require().Equal(uint64(0), consensusHeights[0].GetRevisionNumber()) - suite.Require().Equal(newClientState.(*solomachine.ClientState).Sequence, consensusHeights[0].GetRevisionHeight()) + s.Require().Len(consensusHeights, 1) + s.Require().Equal(uint64(0), consensusHeights[0].GetRevisionNumber()) + s.Require().Equal(newClientState.(*solomachine.ClientState).Sequence, consensusHeights[0].GetRevisionHeight()) - suite.Require().False(newClientState.(*solomachine.ClientState).IsFrozen) - suite.Require().Equal(clientMsg.(*solomachine.Header).NewPublicKey, newClientState.(*solomachine.ClientState).ConsensusState.PublicKey) - suite.Require().Equal(clientMsg.(*solomachine.Header).NewDiversifier, newClientState.(*solomachine.ClientState).ConsensusState.Diversifier) - suite.Require().Equal(clientMsg.(*solomachine.Header).Timestamp, newClientState.(*solomachine.ClientState).ConsensusState.Timestamp) + s.Require().False(newClientState.(*solomachine.ClientState).IsFrozen) + s.Require().Equal(clientMsg.(*solomachine.Header).NewPublicKey, newClientState.(*solomachine.ClientState).ConsensusState.PublicKey) + s.Require().Equal(clientMsg.(*solomachine.Header).NewDiversifier, newClientState.(*solomachine.ClientState).ConsensusState.Diversifier) + s.Require().Equal(clientMsg.(*solomachine.Header).Timestamp, newClientState.(*solomachine.ClientState).ConsensusState.Timestamp) } else { - suite.Require().PanicsWithError(tc.expPanic.Error(), updateStateFunc) + s.Require().PanicsWithError(tc.expPanic.Error(), updateStateFunc) } }) } } } -func (suite *SoloMachineTestSuite) TestCheckForMisbehaviour() { +func (s *SoloMachineTestSuite) TestCheckForMisbehaviour() { var ( clientMsg exported.ClientMessage clientID string ) // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -1132,46 +1130,46 @@ func (suite *SoloMachineTestSuite) TestCheckForMisbehaviour() { clientID = unusedSmClientID }, false, - fmt.Errorf("%s: %s", unusedSmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedSmClientID, clienttypes.ErrClientNotFound), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientID = sm.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, sm.ClientState()) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, sm.ClientState()) tc.malleate() var foundMisbehaviour bool foundMisbehaviourFunc := func() { - foundMisbehaviour = lightClientModule.CheckForMisbehaviour(suite.chainA.GetContext(), clientID, clientMsg) + foundMisbehaviour = lightClientModule.CheckForMisbehaviour(s.chainA.GetContext(), clientID, clientMsg) } if tc.expPanic == nil { foundMisbehaviourFunc() - suite.Require().Equal(tc.foundMisbehaviour, foundMisbehaviour) + s.Require().Equal(tc.foundMisbehaviour, foundMisbehaviour) } else { - suite.Require().PanicsWithError(tc.expPanic.Error(), foundMisbehaviourFunc) - suite.Require().False(foundMisbehaviour) + s.Require().PanicsWithError(tc.expPanic.Error(), foundMisbehaviourFunc) + s.Require().False(foundMisbehaviour) } }) } } } -func (suite *SoloMachineTestSuite) TestUpdateStateOnMisbehaviour() { +func (s *SoloMachineTestSuite) TestUpdateStateOnMisbehaviour() { var clientID string // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -1187,54 +1185,53 @@ func (suite *SoloMachineTestSuite) TestUpdateStateOnMisbehaviour() { func() { clientID = unusedSmClientID }, - fmt.Errorf("%s: %s", unusedSmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedSmClientID, clienttypes.ErrClientNotFound), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientID = sm.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, sm.ClientState()) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, sm.ClientState()) tc.malleate() updateOnMisbehaviourFunc := func() { - lightClientModule.UpdateStateOnMisbehaviour(suite.chainA.GetContext(), clientID, nil) + lightClientModule.UpdateStateOnMisbehaviour(s.chainA.GetContext(), clientID, nil) } if tc.expPanic == nil { updateOnMisbehaviourFunc() - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) clientStateBz := store.Get(host.ClientStateKey()) - suite.Require().NotEmpty(clientStateBz) + s.Require().NotEmpty(clientStateBz) - newClientState := clienttypes.MustUnmarshalClientState(suite.chainA.Codec, clientStateBz) + newClientState := clienttypes.MustUnmarshalClientState(s.chainA.Codec, clientStateBz) - suite.Require().True(newClientState.(*solomachine.ClientState).IsFrozen) + s.Require().True(newClientState.(*solomachine.ClientState).IsFrozen) } else { - suite.Require().PanicsWithError(tc.expPanic.Error(), updateOnMisbehaviourFunc) + s.Require().PanicsWithError(tc.expPanic.Error(), updateOnMisbehaviourFunc) } }) } } } -func (suite *SoloMachineTestSuite) TestVerifyClientMessageHeader() { +func (s *SoloMachineTestSuite) TestVerifyClientMessageHeader() { var ( clientID string clientMsg exported.ClientMessage ) // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -1272,7 +1269,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageHeader() { "failure: invalid header Signature", func() { h := sm.CreateHeader(sm.Diversifier) - h.Signature = suite.GetInvalidProof() + h.Signature = s.GetInvalidProof() clientMsg = h }, errors.New("proto: wrong wireType = 0 for field Multi"), }, @@ -1300,15 +1297,15 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageHeader() { h := sm.CreateHeader(sm.Diversifier) publicKey, err := codectypes.NewAnyWithValue(sm.PublicKey) - suite.NoError(err) + s.Require().NoError(err) data := &solomachine.HeaderData{ NewPubKey: publicKey, NewDiversifier: h.NewDiversifier, } - dataBz, err := suite.chainA.Codec.Marshal(data) - suite.Require().NoError(err) + dataBz, err := s.chainA.Codec.Marshal(data) + s.Require().NoError(err) // generate invalid signature signBytes := &solomachine.SignBytes{ @@ -1319,11 +1316,11 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageHeader() { Data: dataBz, } - signBz, err := suite.chainA.Codec.Marshal(signBytes) - suite.Require().NoError(err) + signBz, err := s.chainA.Codec.Marshal(signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(signBz) - suite.Require().NoError(err) + s.Require().NoError(err) h.Signature = sig clientMsg = h @@ -1362,35 +1359,35 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageHeader() { func() { clientID = unusedSmClientID }, - fmt.Errorf("%s: %s", unusedSmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedSmClientID, clienttypes.ErrClientNotFound), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientID = sm.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, sm.ClientState()) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, sm.ClientState()) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), clientID, clientMsg) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), clientID, clientMsg) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { +func (s *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { var ( clientMsg exported.ClientMessage clientState *solomachine.ClientState @@ -1398,8 +1395,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { ) // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleate func() @@ -1432,7 +1428,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { func() { clientState.ConsensusState.PublicKey = nil clientMsg = sm.CreateMisbehaviour() - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) }, clienttypes.ErrInvalidConsensus, }, @@ -1441,7 +1437,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { func() { m := sm.CreateMisbehaviour() - m.SignatureOne.Signature = suite.GetInvalidProof() + m.SignatureOne.Signature = s.GetInvalidProof() clientMsg = m }, errors.New("proto: wrong wireType = 0 for field Multi"), }, @@ -1450,7 +1446,7 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { func() { m := sm.CreateMisbehaviour() - m.SignatureTwo.Signature = suite.GetInvalidProof() + m.SignatureTwo.Signature = s.GetInvalidProof() clientMsg = m }, errors.New("proto: wrong wireType = 0 for field Multi"), }, @@ -1487,8 +1483,8 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { Data: msg, } - data, err := suite.chainA.Codec.Marshal(signBytes) - suite.Require().NoError(err) + data, err := s.chainA.Codec.Marshal(signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(data) @@ -1513,8 +1509,8 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { Data: msg, } - data, err := suite.chainA.Codec.Marshal(signBytes) - suite.Require().NoError(err) + data, err := s.chainA.Codec.Marshal(signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(data) @@ -1569,8 +1565,8 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { Data: msg, } - data, err := suite.chainA.Codec.Marshal(signBytes) - suite.Require().NoError(err) + data, err := s.chainA.Codec.Marshal(signBytes) + s.Require().NoError(err) sig := sm.GenerateSignature(data) @@ -1588,8 +1584,8 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { Path: []byte("invalid signature data"), Data: msg, } - data, err = suite.chainA.Codec.Marshal(signBytes) - suite.Require().NoError(err) + data, err = s.chainA.Codec.Marshal(signBytes) + s.Require().NoError(err) sig = sm.GenerateSignature(data) @@ -1605,45 +1601,45 @@ func (suite *SoloMachineTestSuite) TestVerifyClientMessageMisbehaviour() { func() { clientID = unusedSmClientID }, - fmt.Errorf("%s: %s", unusedSmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedSmClientID, clienttypes.ErrClientNotFound), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() clientID = sm.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, sm.ClientState()) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, sm.ClientState()) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), clientID, clientMsg) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), clientID, clientMsg) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } } -func (suite *SoloMachineTestSuite) TestVerifyUpgradeAndUpdateState() { - clientID := suite.solomachine.ClientID +func (s *SoloMachineTestSuite) TestVerifyUpgradeAndUpdateState() { + clientID := s.solomachine.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - err = lightClientModule.VerifyUpgradeAndUpdateState(suite.chainA.GetContext(), clientID, nil, nil, nil, nil) - suite.Require().Error(err) + err = lightClientModule.VerifyUpgradeAndUpdateState(s.chainA.GetContext(), clientID, nil, nil, nil, nil) + s.Require().Error(err) } -func (suite *SoloMachineTestSuite) TestLatestHeight() { +func (s *SoloMachineTestSuite) TestLatestHeight() { var clientID string testCases := []struct { @@ -1667,21 +1663,21 @@ func (suite *SoloMachineTestSuite) TestLatestHeight() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - clientID = suite.solomachine.ClientID - clientState := suite.solomachine.ClientState() + s.Run(tc.name, func() { + s.SetupTest() + clientID = s.solomachine.ClientID + clientState := s.solomachine.ClientState() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), clientID, clientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), clientID, clientState) tc.malleate() - height := lightClientModule.LatestHeight(suite.chainA.GetContext(), clientID) + height := lightClientModule.LatestHeight(s.chainA.GetContext(), clientID) - suite.Require().Equal(tc.expHeight, height) + s.Require().Equal(tc.expHeight, height) }) } } diff --git a/modules/light-clients/06-solomachine/misbehaviour.go b/modules/light-clients/06-solomachine/misbehaviour.go index 78db1813db8..d4e0f309917 100644 --- a/modules/light-clients/06-solomachine/misbehaviour.go +++ b/modules/light-clients/06-solomachine/misbehaviour.go @@ -17,27 +17,27 @@ func (Misbehaviour) ClientType() string { } // ValidateBasic implements Misbehaviour interface. -func (misbehaviour Misbehaviour) ValidateBasic() error { - if misbehaviour.Sequence == 0 { +func (m Misbehaviour) ValidateBasic() error { + if m.Sequence == 0 { return errorsmod.Wrap(clienttypes.ErrInvalidMisbehaviour, "sequence cannot be 0") } - if err := misbehaviour.SignatureOne.ValidateBasic(); err != nil { + if err := m.SignatureOne.ValidateBasic(); err != nil { return errorsmod.Wrap(err, "signature one failed basic validation") } - if err := misbehaviour.SignatureTwo.ValidateBasic(); err != nil { + if err := m.SignatureTwo.ValidateBasic(); err != nil { return errorsmod.Wrap(err, "signature two failed basic validation") } // misbehaviour signatures cannot be identical. - if bytes.Equal(misbehaviour.SignatureOne.Signature, misbehaviour.SignatureTwo.Signature) { + if bytes.Equal(m.SignatureOne.Signature, m.SignatureTwo.Signature) { return errorsmod.Wrap(clienttypes.ErrInvalidMisbehaviour, "misbehaviour signatures cannot be equal") } // message data signed cannot be identical if both paths are the same. - if bytes.Equal(misbehaviour.SignatureOne.Path, misbehaviour.SignatureTwo.Path) && - bytes.Equal(misbehaviour.SignatureOne.Data, misbehaviour.SignatureTwo.Data) { + if bytes.Equal(m.SignatureOne.Path, m.SignatureTwo.Path) && + bytes.Equal(m.SignatureOne.Data, m.SignatureTwo.Data) { return errorsmod.Wrap(clienttypes.ErrInvalidMisbehaviour, "misbehaviour signature data must be signed over different messages") } diff --git a/modules/light-clients/06-solomachine/misbehaviour_test.go b/modules/light-clients/06-solomachine/misbehaviour_test.go index e8f85476358..c47b608c87f 100644 --- a/modules/light-clients/06-solomachine/misbehaviour_test.go +++ b/modules/light-clients/06-solomachine/misbehaviour_test.go @@ -8,16 +8,15 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *SoloMachineTestSuite) TestMisbehaviour() { - misbehaviour := suite.solomachine.CreateMisbehaviour() +func (s *SoloMachineTestSuite) TestMisbehaviour() { + misbehaviour := s.solomachine.CreateMisbehaviour() - suite.Require().Equal(exported.Solomachine, misbehaviour.ClientType()) + s.Require().Equal(exported.Solomachine, misbehaviour.ClientType()) } -func (suite *SoloMachineTestSuite) TestMisbehaviourValidateBasic() { +func (s *SoloMachineTestSuite) TestMisbehaviourValidateBasic() { // test singlesig and multisig public keys - for _, sm := range []*ibctesting.Solomachine{suite.solomachine, suite.solomachineMulti} { - + for _, sm := range []*ibctesting.Solomachine{s.solomachine, s.solomachineMulti} { testCases := []struct { name string malleateMisbehaviour func(misbehaviour *solomachine.Misbehaviour) @@ -116,17 +115,17 @@ func (suite *SoloMachineTestSuite) TestMisbehaviourValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { misbehaviour := sm.CreateMisbehaviour() tc.malleateMisbehaviour(misbehaviour) err := misbehaviour.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/06-solomachine/proof_test.go b/modules/light-clients/06-solomachine/proof_test.go index bbc8fb1e2bb..9c362e9b46b 100644 --- a/modules/light-clients/06-solomachine/proof_test.go +++ b/modules/light-clients/06-solomachine/proof_test.go @@ -9,17 +9,17 @@ import ( solomachine "github.com/cosmos/ibc-go/v10/modules/light-clients/06-solomachine" ) -func (suite *SoloMachineTestSuite) TestVerifySignature() { - cdc := suite.chainA.App.AppCodec() +func (s *SoloMachineTestSuite) TestVerifySignature() { + cdc := s.chainA.App.AppCodec() signBytes := []byte("sign bytes") - singleSignature := suite.solomachine.GenerateSignature(signBytes) + singleSignature := s.solomachine.GenerateSignature(signBytes) singleSigData, err := solomachine.UnmarshalSignatureData(cdc, singleSignature) - suite.Require().NoError(err) + s.Require().NoError(err) - multiSignature := suite.solomachineMulti.GenerateSignature(signBytes) + multiSignature := s.solomachineMulti.GenerateSignature(signBytes) multiSigData, err := solomachine.UnmarshalSignatureData(cdc, multiSignature) - suite.Require().NoError(err) + s.Require().NoError(err) testCases := []struct { name string @@ -29,39 +29,39 @@ func (suite *SoloMachineTestSuite) TestVerifySignature() { }{ { "single signature with regular public key", - suite.solomachine.PublicKey, + s.solomachine.PublicKey, singleSigData, nil, }, { "multi signature with multisig public key", - suite.solomachineMulti.PublicKey, + s.solomachineMulti.PublicKey, multiSigData, nil, }, { "single signature with multisig public key", - suite.solomachineMulti.PublicKey, + s.solomachineMulti.PublicKey, singleSigData, errors.New("invalid signature data type, expected *signing.MultiSignatureData, got *signing.MultiSignatureData: signature verification failed"), }, { "multi signature with regular public key", - suite.solomachine.PublicKey, + s.solomachine.PublicKey, multiSigData, errors.New("invalid signature data type, expected *signing.SingleSignatureData, got *signing.SingleSignatureData: signature verification failed"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := solomachine.VerifySignature(tc.publicKey, signBytes, tc.sigData) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/06-solomachine/proposal_handle.go b/modules/light-clients/06-solomachine/proposal_handle.go index 1be549936f9..c2952befab6 100644 --- a/modules/light-clients/06-solomachine/proposal_handle.go +++ b/modules/light-clients/06-solomachine/proposal_handle.go @@ -20,7 +20,7 @@ import ( // the client has been disallowed to be updated by a governance proposal, // the substitute is not a solo machine, or the current public key equals // the new public key. -func (cs ClientState) CheckSubstituteAndUpdateState( +func (cs *ClientState) CheckSubstituteAndUpdateState( ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore, _ storetypes.KVStore, substituteClient exported.ClientState, ) error { @@ -48,7 +48,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState( cs.ConsensusState = substituteClientState.ConsensusState cs.IsFrozen = false - setClientState(subjectClientStore, cdc, &cs) + setClientState(subjectClientStore, cdc, cs) return nil } diff --git a/modules/light-clients/06-solomachine/solomachine_test.go b/modules/light-clients/06-solomachine/solomachine_test.go index 19922961b72..6778d74fb21 100644 --- a/modules/light-clients/06-solomachine/solomachine_test.go +++ b/modules/light-clients/06-solomachine/solomachine_test.go @@ -40,45 +40,45 @@ type SoloMachineTestSuite struct { store storetypes.KVStore } -func (suite *SoloMachineTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func (s *SoloMachineTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) - suite.solomachine = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-0", "testing", 1) - suite.solomachineMulti = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "06-solomachine-1", "testing", 4) + s.solomachine = ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-0", "testing", 1) + s.solomachineMulti = ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "06-solomachine-1", "testing", 4) - suite.store = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), exported.Solomachine) + s.store = s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), exported.Solomachine) } func TestSoloMachineTestSuite(t *testing.T) { testifysuite.Run(t, new(SoloMachineTestSuite)) } -func (suite *SoloMachineTestSuite) SetupSolomachine() string { - clientID := suite.solomachine.CreateClient(suite.chainA) +func (s *SoloMachineTestSuite) SetupSolomachine() string { + clientID := s.solomachine.CreateClient(s.chainA) - connectionID := suite.solomachine.ConnOpenInit(suite.chainA, clientID) + connectionID := s.solomachine.ConnOpenInit(s.chainA, clientID) // open try is not necessary as the solo machine implementation is mocked - suite.solomachine.ConnOpenAck(suite.chainA, clientID, connectionID) + s.solomachine.ConnOpenAck(s.chainA, clientID, connectionID) // open confirm is not necessary as the solo machine implementation is mocked - channelID := suite.solomachine.ChanOpenInit(suite.chainA, connectionID) + channelID := s.solomachine.ChanOpenInit(s.chainA, connectionID) // open try is not necessary as the solo machine implementation is mocked - suite.solomachine.ChanOpenAck(suite.chainA, channelID) + s.solomachine.ChanOpenAck(s.chainA, channelID) // open confirm is not necessary as the solo machine implementation is mocked return channelID } -func (suite *SoloMachineTestSuite) TestRecvPacket() { - channelID := suite.SetupSolomachine() +func (s *SoloMachineTestSuite) TestRecvPacket() { + channelID := s.SetupSolomachine() packet := channeltypes.NewPacket( mock.MockPacketData, 1, @@ -87,73 +87,73 @@ func (suite *SoloMachineTestSuite) TestRecvPacket() { transfertypes.PortID, channelID, clienttypes.ZeroHeight(), - uint64(suite.chainA.GetContext().BlockTime().Add(time.Hour).UnixNano()), + uint64(s.chainA.GetContext().BlockTime().Add(time.Hour).UnixNano()), ) // send packet is not necessary as the solo machine implementation is mocked - suite.solomachine.RecvPacket(suite.chainA, packet) + s.solomachine.RecvPacket(s.chainA, packet) // close init is not necessary as the solomachine implementation is mocked - suite.solomachine.ChanCloseConfirm(suite.chainA, transfertypes.PortID, channelID) + s.solomachine.ChanCloseConfirm(s.chainA, transfertypes.PortID, channelID) } -func (suite *SoloMachineTestSuite) TestAcknowledgePacket() { - channelID := suite.SetupSolomachine() +func (s *SoloMachineTestSuite) TestAcknowledgePacket() { + channelID := s.SetupSolomachine() - packet := suite.solomachine.SendTransfer(suite.chainA, transfertypes.PortID, channelID) + packet := s.solomachine.SendTransfer(s.chainA, transfertypes.PortID, channelID) // recv packet is not necessary as the solo machine implementation is mocked - suite.solomachine.AcknowledgePacket(suite.chainA, packet) + s.solomachine.AcknowledgePacket(s.chainA, packet) // close init is not necessary as the solomachine implementation is mocked - suite.solomachine.ChanCloseConfirm(suite.chainA, transfertypes.PortID, channelID) + s.solomachine.ChanCloseConfirm(s.chainA, transfertypes.PortID, channelID) } -func (suite *SoloMachineTestSuite) TestTimeout() { - channelID := suite.SetupSolomachine() - packet := suite.solomachine.SendTransfer(suite.chainA, transfertypes.PortID, channelID, func(msg *transfertypes.MsgTransfer) { - msg.TimeoutTimestamp = suite.solomachine.Time + 1 +func (s *SoloMachineTestSuite) TestTimeout() { + channelID := s.SetupSolomachine() + packet := s.solomachine.SendTransfer(s.chainA, transfertypes.PortID, channelID, func(msg *transfertypes.MsgTransfer) { + msg.TimeoutTimestamp = s.solomachine.Time + 1 }) // simulate solomachine time increment - suite.solomachine.Time++ + s.solomachine.Time++ - suite.solomachine.UpdateClient(suite.chainA, ibctesting.DefaultSolomachineClientID) + s.solomachine.UpdateClient(s.chainA, ibctesting.DefaultSolomachineClientID) - suite.solomachine.TimeoutPacket(suite.chainA, packet) + s.solomachine.TimeoutPacket(s.chainA, packet) - suite.solomachine.ChanCloseConfirm(suite.chainA, transfertypes.PortID, channelID) + s.solomachine.ChanCloseConfirm(s.chainA, transfertypes.PortID, channelID) } -func (suite *SoloMachineTestSuite) TestTimeoutOnClose() { - channelID := suite.SetupSolomachine() +func (s *SoloMachineTestSuite) TestTimeoutOnClose() { + channelID := s.SetupSolomachine() - packet := suite.solomachine.SendTransfer(suite.chainA, transfertypes.PortID, channelID) + packet := s.solomachine.SendTransfer(s.chainA, transfertypes.PortID, channelID) - suite.solomachine.TimeoutPacketOnClose(suite.chainA, packet, channelID) + s.solomachine.TimeoutPacketOnClose(s.chainA, packet, channelID) } -func (suite *SoloMachineTestSuite) GetSequenceFromStore() uint64 { - bz := suite.store.Get(host.ClientStateKey()) - suite.Require().NotNil(bz) +func (s *SoloMachineTestSuite) GetSequenceFromStore() uint64 { + bz := s.store.Get(host.ClientStateKey()) + s.Require().NotNil(bz) var clientState exported.ClientState - err := suite.chainA.Codec.UnmarshalInterface(bz, &clientState) - suite.Require().NoError(err) + err := s.chainA.Codec.UnmarshalInterface(bz, &clientState) + s.Require().NoError(err) smClientState, ok := clientState.(*solomachine.ClientState) - suite.Require().True(ok) + s.Require().True(ok) return smClientState.Sequence } -func (suite *SoloMachineTestSuite) GetInvalidProof() []byte { - invalidProof, err := suite.chainA.Codec.Marshal(&solomachine.TimestampedSignatureData{Timestamp: suite.solomachine.Time}) - suite.Require().NoError(err) +func (s *SoloMachineTestSuite) GetInvalidProof() []byte { + invalidProof, err := s.chainA.Codec.Marshal(&solomachine.TimestampedSignatureData{Timestamp: s.solomachine.Time}) + s.Require().NoError(err) return invalidProof } diff --git a/modules/light-clients/06-solomachine/update.go b/modules/light-clients/06-solomachine/update.go index ebe5e63e0fa..c473b21e8f9 100644 --- a/modules/light-clients/06-solomachine/update.go +++ b/modules/light-clients/06-solomachine/update.go @@ -14,7 +14,7 @@ import ( // VerifyClientMessage introspects the provided ClientMessage and checks its validity // A Solomachine Header is considered valid if the currently registered public key has signed over the new public key with the correct sequence // A Solomachine Misbehaviour is considered valid if duplicate signatures of the current public key are found on two different messages at a given sequence -func (cs ClientState) VerifyClientMessage(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, clientMsg exported.ClientMessage) error { +func (cs *ClientState) VerifyClientMessage(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, clientMsg exported.ClientMessage) error { switch msg := clientMsg.(type) { case *Header: return cs.verifyHeader(cdc, msg) @@ -25,7 +25,7 @@ func (cs ClientState) VerifyClientMessage(ctx sdk.Context, cdc codec.BinaryCodec } } -func (cs ClientState) verifyHeader(cdc codec.BinaryCodec, header *Header) error { +func (cs *ClientState) verifyHeader(cdc codec.BinaryCodec, header *Header) error { // assert update timestamp is not less than current consensus state timestamp if header.Timestamp < cs.ConsensusState.Timestamp { return errorsmod.Wrapf( @@ -78,7 +78,7 @@ func (cs ClientState) verifyHeader(cdc codec.BinaryCodec, header *Header) error // UpdateState updates the consensus state to the new public key and an incremented sequence. // A list containing the updated consensus height is returned. // If the provided clientMsg is not of type Header, the handler will no-op and return an empty slice. -func (cs ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, clientMsg exported.ClientMessage) []exported.Height { +func (cs *ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, clientMsg exported.ClientMessage) []exported.Height { smHeader, ok := clientMsg.(*Header) if !ok { // clientMsg is invalid Misbehaviour, no update necessary @@ -95,7 +95,7 @@ func (cs ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, client cs.Sequence++ cs.ConsensusState = consensusState - setClientState(clientStore, cdc, &cs) + setClientState(clientStore, cdc, cs) return []exported.Height{clienttypes.NewHeight(0, cs.Sequence)} } diff --git a/modules/light-clients/07-tendermint/client_state_test.go b/modules/light-clients/07-tendermint/client_state_test.go index fe25b6b2800..0a2a2ee66a6 100644 --- a/modules/light-clients/07-tendermint/client_state_test.go +++ b/modules/light-clients/07-tendermint/client_state_test.go @@ -16,7 +16,7 @@ const ( var invalidProof = []byte("invalid proof") -func (suite *TendermintTestSuite) TestValidate() { +func (s *TendermintTestSuite) TestValidate() { testCases := []struct { name string clientState *ibctm.ClientState @@ -121,13 +121,13 @@ func (suite *TendermintTestSuite) TestValidate() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.clientState.Validate() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/07-tendermint/consensus_state_test.go b/modules/light-clients/07-tendermint/consensus_state_test.go index e460e5451a4..0e379650e74 100644 --- a/modules/light-clients/07-tendermint/consensus_state_test.go +++ b/modules/light-clients/07-tendermint/consensus_state_test.go @@ -8,7 +8,7 @@ import ( ibctm "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" ) -func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() { +func (s *TendermintTestSuite) TestConsensusStateValidateBasic() { testCases := []struct { msg string consensusState *ibctm.ConsensusState @@ -17,43 +17,43 @@ func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() { { "success", &ibctm.ConsensusState{ - Timestamp: suite.now, + Timestamp: s.now, Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), - NextValidatorsHash: suite.valsHash, + NextValidatorsHash: s.valsHash, }, true, }, { "success with sentinel", &ibctm.ConsensusState{ - Timestamp: suite.now, + Timestamp: s.now, Root: commitmenttypes.NewMerkleRoot([]byte(ibctm.SentinelRoot)), - NextValidatorsHash: suite.valsHash, + NextValidatorsHash: s.valsHash, }, true, }, { "root is nil", &ibctm.ConsensusState{ - Timestamp: suite.now, + Timestamp: s.now, Root: commitmenttypes.MerkleRoot{}, - NextValidatorsHash: suite.valsHash, + NextValidatorsHash: s.valsHash, }, false, }, { "root is empty", &ibctm.ConsensusState{ - Timestamp: suite.now, + Timestamp: s.now, Root: commitmenttypes.MerkleRoot{}, - NextValidatorsHash: suite.valsHash, + NextValidatorsHash: s.valsHash, }, false, }, { "nextvalshash is invalid", &ibctm.ConsensusState{ - Timestamp: suite.now, + Timestamp: s.now, Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), NextValidatorsHash: []byte("hi"), }, @@ -65,23 +65,23 @@ func (suite *TendermintTestSuite) TestConsensusStateValidateBasic() { &ibctm.ConsensusState{ Timestamp: time.Time{}, Root: commitmenttypes.NewMerkleRoot([]byte("app_hash")), - NextValidatorsHash: suite.valsHash, + NextValidatorsHash: s.valsHash, }, false, }, } for i, tc := range testCases { - suite.Run(tc.msg, func() { + s.Run(tc.msg, func() { // check just to increase coverage - suite.Require().Equal(exported.Tendermint, tc.consensusState.ClientType()) - suite.Require().Equal(tc.consensusState.GetRoot(), tc.consensusState.Root) + s.Require().Equal(exported.Tendermint, tc.consensusState.ClientType()) + s.Require().Equal(tc.consensusState.GetRoot(), tc.consensusState.Root) err := tc.consensusState.ValidateBasic() if tc.expectPass { - suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg) + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.msg) } else { - suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg) + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.msg) } }) } diff --git a/modules/light-clients/07-tendermint/header_test.go b/modules/light-clients/07-tendermint/header_test.go index 6c98b832cd2..cc3f93d13b0 100644 --- a/modules/light-clients/07-tendermint/header_test.go +++ b/modules/light-clients/07-tendermint/header_test.go @@ -11,17 +11,17 @@ import ( ibctm "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" ) -func (suite *TendermintTestSuite) TestGetHeight() { - header := suite.chainA.LatestCommittedHeader - suite.Require().NotEqual(uint64(0), header.GetHeight()) +func (s *TendermintTestSuite) TestGetHeight() { + header := s.chainA.LatestCommittedHeader + s.Require().NotEqual(uint64(0), header.GetHeight()) } -func (suite *TendermintTestSuite) TestGetTime() { - header := suite.chainA.LatestCommittedHeader - suite.Require().NotEqual(time.Time{}, header.GetTime()) +func (s *TendermintTestSuite) TestGetTime() { + header := s.chainA.LatestCommittedHeader + s.Require().NotEqual(time.Time{}, header.GetTime()) } -func (suite *TendermintTestSuite) TestHeaderValidateBasic() { +func (s *TendermintTestSuite) TestHeaderValidateBasic() { var header *ibctm.Header testCases := []struct { name string @@ -39,13 +39,13 @@ func (suite *TendermintTestSuite) TestHeaderValidateBasic() { header.Commit.Height = -1 }, errors.New("header is not a tendermint header")}, {"signed header failed tendermint ValidateBasic", func() { - header = suite.chainA.LatestCommittedHeader + header = s.chainA.LatestCommittedHeader header.Commit = nil }, errors.New("header failed basic validation")}, {"trusted height is equal to header height", func() { var ok bool header.TrustedHeight, ok = header.GetHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) }, errors.New("invalid header height")}, {"validator set nil", func() { header.ValidatorSet = nil @@ -55,27 +55,27 @@ func (suite *TendermintTestSuite) TestHeaderValidateBasic() { }, errors.New("validator set is not tendermint validator set")}, {"header validator hash does not equal hash of validator set", func() { // use chainB's randomly generated validator set - header.ValidatorSet = suite.chainB.LatestCommittedHeader.ValidatorSet + header.ValidatorSet = s.chainB.LatestCommittedHeader.ValidatorSet }, errors.New("validator set does not match hash")}, } - suite.Require().Equal(exported.Tendermint, suite.header.ClientType()) + s.Require().Equal(exported.Tendermint, s.header.ClientType()) for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - header = suite.chainA.LatestCommittedHeader // must be explicitly changed in malleate + header = s.chainA.LatestCommittedHeader // must be explicitly changed in malleate tc.malleate() err := header.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/07-tendermint/light_client_module_test.go b/modules/light-clients/07-tendermint/light_client_module_test.go index 16bff624a41..228c01f8fff 100644 --- a/modules/light-clients/07-tendermint/light_client_module_test.go +++ b/modules/light-clients/07-tendermint/light_client_module_test.go @@ -28,7 +28,7 @@ var ( solomachineClientID = clienttypes.FormatClientIdentifier(exported.Solomachine, 0) ) -func (suite *TendermintTestSuite) TestInitialize() { +func (s *TendermintTestSuite) TestInitialize() { var consensusState exported.ConsensusState var clientState exported.ClientState @@ -52,14 +52,14 @@ func (suite *TendermintTestSuite) TestInitialize() { { "invalid client state: solomachine client state", func() { - clientState = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ClientState() + clientState = ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2).ClientState() }, errors.New("failed to unmarshal client state bytes into client state"), }, { "invalid consensus: consensus state is solomachine consensus", func() { - consensusState = ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, "solomachine", "", 2).ConsensusState() + consensusState = ibctesting.NewSolomachine(s.T(), s.chainA.Codec, "solomachine", "", 2).ConsensusState() }, errors.New("failed to unmarshal consensus state bytes into consensus state"), }, @@ -73,49 +73,49 @@ func (suite *TendermintTestSuite) TestInitialize() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) tmConfig, ok := path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig) - suite.Require().True(ok) + s.Require().True(ok) clientState = ibctm.NewClientState( path.EndpointA.Chain.ChainID, tmConfig.TrustLevel, tmConfig.TrustingPeriod, tmConfig.UnbondingPeriod, tmConfig.MaxClockDrift, - suite.chainA.LatestCommittedHeader.GetHeight().(clienttypes.Height), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, + s.chainA.LatestCommittedHeader.GetHeight().(clienttypes.Height), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, ) - consensusState = ibctm.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte(ibctm.SentinelRoot)), suite.chainA.ProposedHeader.ValidatorsHash) + consensusState = ibctm.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte(ibctm.SentinelRoot)), s.chainA.ProposedHeader.ValidatorsHash) - clientID := suite.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(suite.chainA.GetContext(), clientState.ClientType()) + clientID := s.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(s.chainA.GetContext(), clientState.ClientType()) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - clientStateBz := suite.chainA.Codec.MustMarshal(clientState) - consStateBz := suite.chainA.Codec.MustMarshal(consensusState) + clientStateBz := s.chainA.Codec.MustMarshal(clientState) + consStateBz := s.chainA.Codec.MustMarshal(consensusState) - err = lightClientModule.Initialize(suite.chainA.GetContext(), path.EndpointA.ClientID, clientStateBz, consStateBz) + err = lightClientModule.Initialize(s.chainA.GetContext(), path.EndpointA.ClientID, clientStateBz, consStateBz) - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) if tc.expErr == nil { - suite.Require().NoError(err, "valid case returned an error") - suite.Require().True(store.Has(host.ClientStateKey())) - suite.Require().True(store.Has(host.ConsensusStateKey(suite.chainB.LatestCommittedHeader.GetHeight()))) + s.Require().NoError(err, "valid case returned an error") + s.Require().True(store.Has(host.ClientStateKey())) + s.Require().True(store.Has(host.ConsensusStateKey(s.chainB.LatestCommittedHeader.GetHeight()))) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) - suite.Require().False(store.Has(host.ClientStateKey())) - suite.Require().False(store.Has(host.ConsensusStateKey(suite.chainB.LatestCommittedHeader.GetHeight()))) + s.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().False(store.Has(host.ClientStateKey())) + s.Require().False(store.Has(host.ConsensusStateKey(s.chainB.LatestCommittedHeader.GetHeight()))) } }) } } -func (suite *TendermintTestSuite) TestVerifyClientMessage() { +func (s *TendermintTestSuite) TestVerifyClientMessage() { var path *ibctesting.Path testCases := []struct { @@ -131,7 +131,7 @@ func (suite *TendermintTestSuite) TestVerifyClientMessage() { { "failure: client state not found", func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) }, clienttypes.ErrClientNotFound, @@ -139,119 +139,119 @@ func (suite *TendermintTestSuite) TestVerifyClientMessage() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), path.EndpointA.ClientID, header) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), path.EndpointA.ClientID, header) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *TendermintTestSuite) TestCheckForMisbehaviourPanicsOnClientStateNotFound() { - suite.SetupTest() +func (s *TendermintTestSuite) TestCheckForMisbehaviourPanicsOnClientStateNotFound() { + s.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) // delete client state - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) - suite.Require().PanicsWithError(errorsmod.Wrap(clienttypes.ErrClientNotFound, path.EndpointA.ClientID).Error(), + s.Require().PanicsWithError(errorsmod.Wrap(clienttypes.ErrClientNotFound, path.EndpointA.ClientID).Error(), func() { - lightClientModule.CheckForMisbehaviour(suite.chainA.GetContext(), path.EndpointA.ClientID, header) + lightClientModule.CheckForMisbehaviour(s.chainA.GetContext(), path.EndpointA.ClientID, header) }, ) } -func (suite *TendermintTestSuite) TestUpdateStateOnMisbehaviourPanicsOnClientStateNotFound() { - suite.SetupTest() +func (s *TendermintTestSuite) TestUpdateStateOnMisbehaviourPanicsOnClientStateNotFound() { + s.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) // delete client state - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) - suite.Require().PanicsWithError( + s.Require().PanicsWithError( errorsmod.Wrap(clienttypes.ErrClientNotFound, path.EndpointA.ClientID).Error(), func() { - lightClientModule.UpdateStateOnMisbehaviour(suite.chainA.GetContext(), path.EndpointA.ClientID, header) + lightClientModule.UpdateStateOnMisbehaviour(s.chainA.GetContext(), path.EndpointA.ClientID, header) }, ) } -func (suite *TendermintTestSuite) TestUpdateStatePanicsOnClientStateNotFound() { - suite.SetupTest() +func (s *TendermintTestSuite) TestUpdateStatePanicsOnClientStateNotFound() { + s.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) // delete client state - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) - suite.Require().PanicsWithError( + s.Require().PanicsWithError( errorsmod.Wrap(clienttypes.ErrClientNotFound, path.EndpointA.ClientID).Error(), func() { - lightClientModule.UpdateState(suite.chainA.GetContext(), path.EndpointA.ClientID, header) + lightClientModule.UpdateState(s.chainA.GetContext(), path.EndpointA.ClientID, header) }, ) } -func (suite *TendermintTestSuite) TestVerifyMembership() { +func (s *TendermintTestSuite) TestVerifyMembership() { var ( testingpath *ibctesting.Path delayTimePeriod uint64 @@ -281,15 +281,15 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { key := host.FullConsensusStateKey(testingpath.EndpointB.ClientID, latestHeight) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) consensusState, ok := testingpath.EndpointB.GetConsensusState(latestHeight).(*ibctm.ConsensusState) - suite.Require().True(ok) - value, err = suite.chainB.Codec.MarshalInterface(consensusState) - suite.Require().NoError(err) + s.Require().True(ok) + value, err = s.chainB.Codec.MarshalInterface(consensusState) + s.Require().NoError(err) }, nil, }, @@ -297,14 +297,14 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { "successful Connection verification", func() { key := host.ConnectionKey(testingpath.EndpointB.ConnectionID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) connection := testingpath.EndpointB.GetConnection() - value, err = suite.chainB.Codec.Marshal(&connection) - suite.Require().NoError(err) + value, err = s.chainB.Codec.Marshal(&connection) + s.Require().NoError(err) }, nil, }, @@ -312,14 +312,14 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { "successful Channel verification", func() { key := host.ChannelKey(testingpath.EndpointB.ChannelConfig.PortID, testingpath.EndpointB.ChannelID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) channel := testingpath.EndpointB.GetChannel() - value, err = suite.chainB.Codec.Marshal(&channel) - suite.Require().NoError(err) + value, err = s.chainB.Codec.Marshal(&channel) + s.Require().NoError(err) }, nil, }, @@ -327,14 +327,14 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { "successful PacketCommitment verification", func() { // send from chainB to chainA since we are proving chainB sent a packet sequence, err := testingpath.EndpointB.SendPacket(clienttypes.NewHeight(1, 100), 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // make packet commitment proof packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, testingpath.EndpointB.ChannelConfig.PortID, testingpath.EndpointB.ChannelID, testingpath.EndpointA.ChannelConfig.PortID, testingpath.EndpointA.ChannelID, clienttypes.NewHeight(1, 100), 0) key := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) proof, proofHeight = testingpath.EndpointB.QueryProof(key) @@ -345,18 +345,18 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { "successful Acknowledgement verification", func() { // send from chainA to chainB since we are proving chainB wrote an acknowledgement sequence, err := testingpath.EndpointA.SendPacket(clienttypes.NewHeight(1, 100), 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // write receipt and ack packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, testingpath.EndpointA.ChannelConfig.PortID, testingpath.EndpointA.ChannelID, testingpath.EndpointB.ChannelConfig.PortID, testingpath.EndpointB.ChannelID, clienttypes.NewHeight(1, 100), 0) err = testingpath.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) key := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) proof, proofHeight = testingpath.EndpointB.QueryProof(key) @@ -370,17 +370,17 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { // send packet sequence, err := testingpath.EndpointA.SendPacket(clienttypes.NewHeight(1, 100), 0, ibctesting.MockPacketData) - suite.Require().NoError(err) + s.Require().NoError(err) // next seq recv incremented packet := channeltypes.NewPacket(ibctesting.MockPacketData, sequence, testingpath.EndpointA.ChannelConfig.PortID, testingpath.EndpointA.ChannelID, testingpath.EndpointB.ChannelConfig.PortID, testingpath.EndpointB.ChannelID, clienttypes.NewHeight(1, 100), 0) err = testingpath.EndpointB.RecvPacket(packet) - suite.Require().NoError(err) + s.Require().NoError(err) key := host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) proof, proofHeight = testingpath.EndpointB.QueryProof(key) @@ -393,12 +393,12 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { key := transfertypes.PortKey merklePath := commitmenttypes.NewMerklePath(key) path, err = commitmenttypes.ApplyPrefix(commitmenttypes.NewMerklePrefix([]byte(transfertypes.StoreKey)), merklePath) - suite.Require().NoError(err) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProofForStore(transfertypes.StoreKey, key, int64(testingpath.EndpointA.GetClientLatestHeight().GetRevisionHeight())) + proof, proofHeight = s.chainB.QueryProofForStore(transfertypes.StoreKey, key, int64(testingpath.EndpointA.GetClientLatestHeight().GetRevisionHeight())) - value = []byte(suite.chainB.GetSimApp().TransferKeeper.GetPort(suite.chainB.GetContext())) - suite.Require().NoError(err) + value = []byte(s.chainB.GetSimApp().TransferKeeper.GetPort(s.chainB.GetContext())) + s.Require().NoError(err) }, nil, }, @@ -468,7 +468,7 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { { "client state not found", func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), testingpath.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), testingpath.EndpointA.ClientID) store.Delete(host.ClientStateKey()) }, clienttypes.ErrClientNotFound, @@ -476,9 +476,9 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - testingpath = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + testingpath = ibctesting.NewPath(s.chainA, s.chainB) testingpath.SetChannelOrdered() testingpath.Setup() @@ -490,35 +490,35 @@ func (suite *TendermintTestSuite) TestVerifyMembership() { // may be overwritten by malleate() key := host.FullClientStateKey(testingpath.EndpointB.ClientID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) clientState, ok := testingpath.EndpointB.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - value, err = suite.chainB.Codec.MarshalInterface(clientState) - suite.Require().NoError(err) + s.Require().True(ok) + value, err = s.chainB.Codec.MarshalInterface(clientState) + s.Require().NoError(err) tc.malleate() // make changes as necessary - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), testingpath.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), testingpath.EndpointA.ClientID) + s.Require().NoError(err) err = lightClientModule.VerifyMembership( - suite.chainA.GetContext(), testingpath.EndpointA.ClientID, proofHeight, delayTimePeriod, delayBlockPeriod, + s.chainA.GetContext(), testingpath.EndpointA.ClientID, proofHeight, delayTimePeriod, delayBlockPeriod, proof, path, value, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } -func (suite *TendermintTestSuite) TestVerifyNonMembership() { +func (s *TendermintTestSuite) TestVerifyNonMembership() { var ( testingpath *ibctesting.Path delayTimePeriod uint64 @@ -549,10 +549,10 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { "successful ConsensusState verification of non membership", func() { key := host.FullConsensusStateKey(invalidClientID, testingpath.EndpointB.GetClientLatestHeight()) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) }, nil, }, @@ -560,10 +560,10 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { "successful Connection verification of non membership", func() { key := host.ConnectionKey(invalidConnectionID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) }, nil, }, @@ -571,10 +571,10 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { "successful Channel verification of non membership", func() { key := host.ChannelKey(testingpath.EndpointB.ChannelConfig.PortID, invalidChannelID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) }, nil, }, @@ -583,8 +583,8 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { // make packet commitment proof key := host.PacketCommitmentKey(invalidPortID, invalidChannelID, 1) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) proof, proofHeight = testingpath.EndpointB.QueryProof(key) }, @@ -594,8 +594,8 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { "successful Acknowledgement verification of non membership", func() { key := host.PacketAcknowledgementKey(invalidPortID, invalidChannelID, 1) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) proof, proofHeight = testingpath.EndpointB.QueryProof(key) }, @@ -605,8 +605,8 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { "successful NextSequenceRecv verification of non membership", func() { key := host.NextSequenceRecvKey(invalidPortID, invalidChannelID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) proof, proofHeight = testingpath.EndpointB.QueryProof(key) }, @@ -617,9 +617,9 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { key := []byte{0x08} merklePath := commitmenttypes.NewMerklePath(key) path, err = commitmenttypes.ApplyPrefix(commitmenttypes.NewMerklePrefix([]byte(transfertypes.StoreKey)), merklePath) - suite.Require().NoError(err) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProofForStore(transfertypes.StoreKey, key, int64(testingpath.EndpointA.GetClientLatestHeight().GetRevisionHeight())) + proof, proofHeight = s.chainB.QueryProofForStore(transfertypes.StoreKey, key, int64(testingpath.EndpointA.GetClientLatestHeight().GetRevisionHeight())) }, nil, }, @@ -677,10 +677,10 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { // change the value being proved key := host.FullClientStateKey(testingpath.EndpointB.ClientID) merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) }, commitmenttypes.ErrInvalidProof, }, @@ -694,7 +694,7 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { { "client state not found", func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), testingpath.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), testingpath.EndpointA.ClientID) store.Delete(host.ClientStateKey()) }, clienttypes.ErrClientNotFound, @@ -702,9 +702,9 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - testingpath = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + testingpath = ibctesting.NewPath(s.chainA, s.chainB) testingpath.SetChannelOrdered() testingpath.Setup() @@ -717,31 +717,31 @@ func (suite *TendermintTestSuite) TestVerifyNonMembership() { key := host.FullClientStateKey("invalid-client-id") merklePath := commitmenttypes.NewMerklePath(key) - path, err = commitmenttypes.ApplyPrefix(suite.chainB.GetPrefix(), merklePath) - suite.Require().NoError(err) + path, err = commitmenttypes.ApplyPrefix(s.chainB.GetPrefix(), merklePath) + s.Require().NoError(err) - proof, proofHeight = suite.chainB.QueryProof(key) + proof, proofHeight = s.chainB.QueryProof(key) tc.malleate() // make changes as necessary - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), testingpath.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), testingpath.EndpointA.ClientID) + s.Require().NoError(err) err = lightClientModule.VerifyNonMembership( - suite.chainA.GetContext(), testingpath.EndpointA.ClientID, proofHeight, delayTimePeriod, delayBlockPeriod, + s.chainA.GetContext(), testingpath.EndpointA.ClientID, proofHeight, delayTimePeriod, delayBlockPeriod, proof, path, ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } -func (suite *TendermintTestSuite) TestStatus() { +func (s *TendermintTestSuite) TestStatus() { var ( path *ibctesting.Path clientState *ibctm.ClientState @@ -769,7 +769,7 @@ func (suite *TendermintTestSuite) TestStatus() { "client status without consensus state", func() { newLatestHeight, ok := clientState.LatestHeight.Increment().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientState.LatestHeight = newLatestHeight path.EndpointA.SetClientState(clientState) }, @@ -778,14 +778,14 @@ func (suite *TendermintTestSuite) TestStatus() { { "client status is expired", func() { - suite.coordinator.IncrementTimeBy(clientState.TrustingPeriod) + s.coordinator.IncrementTimeBy(clientState.TrustingPeriod) }, exported.Expired, }, { "client state not found", func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) }, exported.Unknown, @@ -793,28 +793,28 @@ func (suite *TendermintTestSuite) TestStatus() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) var ok bool clientState, ok = path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tc.malleate() - status := lightClientModule.Status(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().Equal(tc.expStatus, status) + status := lightClientModule.Status(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().Equal(tc.expStatus, status) }) } } -func (suite *TendermintTestSuite) TestLatestHeight() { +func (s *TendermintTestSuite) TestLatestHeight() { var ( path *ibctesting.Path height exported.Height @@ -833,7 +833,7 @@ func (suite *TendermintTestSuite) TestLatestHeight() { { "client state not found", func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) }, clienttypes.ZeroHeight(), @@ -841,24 +841,24 @@ func (suite *TendermintTestSuite) TestLatestHeight() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - height = lightClientModule.LatestHeight(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().Equal(tc.expHeight, height) + height = lightClientModule.LatestHeight(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().Equal(tc.expHeight, height) }) } } -func (suite *TendermintTestSuite) TestGetTimestampAtHeight() { +func (s *TendermintTestSuite) TestGetTimestampAtHeight() { var ( path *ibctesting.Path height exported.Height @@ -878,7 +878,7 @@ func (suite *TendermintTestSuite) TestGetTimestampAtHeight() { { "failure: client state not found", func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) store.Delete(host.ClientStateKey()) }, clienttypes.ErrClientNotFound, @@ -887,7 +887,7 @@ func (suite *TendermintTestSuite) TestGetTimestampAtHeight() { "failure: consensus state not found for height", func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) height = clientState.LatestHeight.Increment() }, clienttypes.ErrConsensusStateNotFound, @@ -895,44 +895,44 @@ func (suite *TendermintTestSuite) TestGetTimestampAtHeight() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) height = clientState.LatestHeight // grab consensusState from store and update with a predefined timestamp consensusState := path.EndpointA.GetConsensusState(height) tmConsensusState, ok := consensusState.(*ibctm.ConsensusState) - suite.Require().True(ok) + s.Require().True(ok) tmConsensusState.Timestamp = expectedTimestamp path.EndpointA.SetConsensusState(tmConsensusState, height) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - timestamp, err := lightClientModule.TimestampAtHeight(suite.chainA.GetContext(), path.EndpointA.ClientID, height) + timestamp, err := lightClientModule.TimestampAtHeight(s.chainA.GetContext(), path.EndpointA.ClientID, height) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) expectedTimestamp := uint64(expectedTimestamp.UnixNano()) - suite.Require().Equal(expectedTimestamp, timestamp) + s.Require().Equal(expectedTimestamp, timestamp) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *TendermintTestSuite) TestRecoverClient() { +func (s *TendermintTestSuite) TestRecoverClient() { var ( subjectClientID, substituteClientID string subjectClientState exported.ClientState @@ -980,47 +980,47 @@ func (suite *TendermintTestSuite) TestRecoverClient() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - ctx := suite.chainA.GetContext() + s.Run(tc.name, func() { + s.SetupTest() // reset + ctx := s.chainA.GetContext() - subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) + subjectPath := ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() subjectClientID = subjectPath.EndpointA.ClientID - subjectClientState = suite.chainA.GetClientState(subjectClientID) + subjectClientState = s.chainA.GetClientState(subjectClientID) - substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath := ibctesting.NewPath(s.chainA, s.chainB) substitutePath.SetupClients() substituteClientID = substitutePath.EndpointA.ClientID tmClientState, ok := subjectClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClientState.FrozenHeight = tmClientState.LatestHeight - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(ctx, subjectPath.EndpointA.ClientID, tmClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(ctx, subjectPath.EndpointA.ClientID, tmClientState) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), subjectClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), subjectClientID) + s.Require().NoError(err) tc.malleate() err = lightClientModule.RecoverClient(ctx, subjectClientID, substituteClientID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // assert that status of subject client is now Active - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), subjectClientID) - suite.Require().NoError(err) - suite.Require().Equal(lightClientModule.Status(suite.chainA.GetContext(), subjectClientID), exported.Active) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), subjectClientID) + s.Require().NoError(err) + s.Require().Equal(lightClientModule.Status(s.chainA.GetContext(), subjectClientID), exported.Active) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *TendermintTestSuite) TestVerifyUpgradeAndUpdateState() { +func (s *TendermintTestSuite) TestVerifyUpgradeAndUpdateState() { var ( clientID string path *ibctesting.Path @@ -1038,26 +1038,26 @@ func (suite *TendermintTestSuite) TestVerifyUpgradeAndUpdateState() { "success", func() { // upgrade height is at next block - upgradeHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + upgradeHeight := clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store zeroedUpgradedClient := upgradedClientState.(*ibctm.ClientState).ZeroCustomFields() zeroedUpgradedClientAny, err := codectypes.NewAnyWithValue(zeroedUpgradedClient) - suite.Require().NoError(err) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(zeroedUpgradedClientAny)) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(zeroedUpgradedClientAny)) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedConsensusStateAny)) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedConsensusStateAny)) + s.Require().NoError(err) // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - upgradedClientStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) + upgradedClientStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) }, nil, }, @@ -1090,75 +1090,75 @@ func (suite *TendermintTestSuite) TestVerifyUpgradeAndUpdateState() { "upgraded client state height is not greater than current height", func() { // upgrade height is at next block - upgradeHeight := clienttypes.NewHeight(1, uint64(suite.chainB.GetContext().BlockHeight()+1)) + upgradeHeight := clienttypes.NewHeight(1, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store zeroedUpgradedClient := upgradedClientState.(*ibctm.ClientState).ZeroCustomFields() zeroedUpgradedClientAny, err := codectypes.NewAnyWithValue(zeroedUpgradedClient) - suite.Require().NoError(err) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(zeroedUpgradedClientAny)) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(zeroedUpgradedClientAny)) + s.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), suite.chainB.Codec.MustMarshal(upgradedConsensusStateAny)) - suite.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), s.chainB.Codec.MustMarshal(upgradedConsensusStateAny)) + s.Require().NoError(err) // change upgraded client state height to be lower than current client state height tmClient, ok := upgradedClientState.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) newLatestheight, ok := path.EndpointA.GetClientLatestHeight().Decrement() - suite.Require().True(ok) + s.Require().True(ok) tmClient.LatestHeight, ok = newLatestheight.(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) upgradedClientStateAny, err = codectypes.NewAnyWithValue(tmClient) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - upgradedClientStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) + upgradedClientStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), path.EndpointA.GetClientLatestHeight().GetRevisionHeight()) }, ibcerrors.ErrInvalidHeight, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientID = path.EndpointA.ClientID clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) revisionNumber := clienttypes.ParseChainID(clientState.ChainId) newUnbondindPeriod := ubdPeriod + trustingPeriod newChainID, err := clienttypes.SetRevisionNumber(clientState.ChainId, revisionNumber+1) - suite.Require().NoError(err) + s.Require().NoError(err) upgradedClientState = ibctm.NewClientState(newChainID, ibctm.DefaultTrustLevel, trustingPeriod, newUnbondindPeriod, maxClockDrift, clienttypes.NewHeight(revisionNumber+1, clientState.LatestHeight.GetRevisionHeight()+1), commitmenttypes.GetSDKSpecs(), upgradePath) upgradedClientStateAny, err = codectypes.NewAnyWithValue(upgradedClientState) - suite.Require().NoError(err) + s.Require().NoError(err) nextValsHash := sha256.Sum256([]byte("new-nextValsHash")) upgradedConsensusState := ibctm.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("new-hash")), nextValsHash[:]) upgradedConsensusStateAny, err = codectypes.NewAnyWithValue(upgradedConsensusState) - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) err = lightClientModule.VerifyUpgradeAndUpdateState( - suite.chainA.GetContext(), + s.chainA.GetContext(), clientID, upgradedClientStateAny.Value, upgradedConsensusStateAny.Value, @@ -1167,20 +1167,20 @@ func (suite *TendermintTestSuite) TestVerifyUpgradeAndUpdateState() { ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) expClientState := path.EndpointA.GetClientState() - expClientStateBz := suite.chainA.Codec.MustMarshal(expClientState) - suite.Require().Equal(upgradedClientStateAny.Value, expClientStateBz) + expClientStateBz := s.chainA.Codec.MustMarshal(expClientState) + s.Require().Equal(upgradedClientStateAny.Value, expClientStateBz) expConsensusState := ibctm.NewConsensusState(upgradedConsensusState.Timestamp, commitmenttypes.NewMerkleRoot([]byte(ibctm.SentinelRoot)), upgradedConsensusState.NextValidatorsHash) - expConsensusStateBz := suite.chainA.Codec.MustMarshal(expConsensusState) + expConsensusStateBz := s.chainA.Codec.MustMarshal(expConsensusState) - consensusStateBz := suite.chainA.Codec.MustMarshal(path.EndpointA.GetConsensusState(path.EndpointA.GetClientLatestHeight())) - suite.Require().Equal(expConsensusStateBz, consensusStateBz) + consensusStateBz := s.chainA.Codec.MustMarshal(path.EndpointA.GetConsensusState(path.EndpointA.GetClientLatestHeight())) + s.Require().Equal(expConsensusStateBz, consensusStateBz) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/light-clients/07-tendermint/migrations/migrations_test.go b/modules/light-clients/07-tendermint/migrations/migrations_test.go index 2b1c0197361..95b95fa459f 100644 --- a/modules/light-clients/07-tendermint/migrations/migrations_test.go +++ b/modules/light-clients/07-tendermint/migrations/migrations_test.go @@ -24,10 +24,10 @@ type MigrationsTestSuite struct { chainB *ibctesting.TestChain } -func (suite *MigrationsTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func (s *MigrationsTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) } func TestTendermintTestSuite(t *testing.T) { @@ -35,7 +35,7 @@ func TestTendermintTestSuite(t *testing.T) { } // test pruning of multiple expired tendermint consensus states -func (suite *MigrationsTestSuite) TestPruneExpiredConsensusStates() { +func (s *MigrationsTestSuite) TestPruneExpiredConsensusStates() { // create multiple tendermint clients and a solo machine client // the solo machine is used to verify this pruning function only modifies // the tendermint store. @@ -44,22 +44,22 @@ func (suite *MigrationsTestSuite) TestPruneExpiredConsensusStates() { paths := make([]*ibctesting.Path, numTMClients) for i := range numTMClients { - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() paths[i] = path } - solomachine := ibctesting.NewSolomachine(suite.T(), suite.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1) - smClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID) + solomachine := ibctesting.NewSolomachine(s.T(), s.chainA.Codec, ibctesting.DefaultSolomachineClientID, "testing", 1) + smClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), solomachine.ClientID) // set client state - bz, err := suite.chainA.App.AppCodec().MarshalInterface(solomachine.ClientState()) - suite.Require().NoError(err) + bz, err := s.chainA.App.AppCodec().MarshalInterface(solomachine.ClientState()) + s.Require().NoError(err) smClientStore.Set(host.ClientStateKey(), bz) - bz, err = suite.chainA.App.AppCodec().MarshalInterface(solomachine.ConsensusState()) - suite.Require().NoError(err) + bz, err = s.chainA.App.AppCodec().MarshalInterface(solomachine.ConsensusState()) + s.Require().NoError(err) smHeight := clienttypes.NewHeight(0, 1) smClientStore.Set(host.ConsensusStateKey(smHeight), bz) @@ -74,46 +74,46 @@ func (suite *MigrationsTestSuite) TestPruneExpiredConsensusStates() { // these heights will be expired and also pruned for range 3 { err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) pruneHeights = append(pruneHeights, path.EndpointA.GetClientLatestHeight()) } // double chedck all information is currently stored for _, pruneHeight := range pruneHeights { - consState, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().True(ok) - suite.Require().NotNil(consState) + consState, ok := s.chainA.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + s.Require().True(ok) + s.Require().NotNil(consState) - ctx := suite.chainA.GetContext() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + ctx := s.chainA.GetContext() + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) processedTime, ok := ibctm.GetProcessedTime(clientStore, pruneHeight) - suite.Require().True(ok) - suite.Require().NotNil(processedTime) + s.Require().True(ok) + s.Require().NotNil(processedTime) processedHeight, ok := ibctm.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().True(ok) - suite.Require().NotNil(processedHeight) + s.Require().True(ok) + s.Require().NotNil(processedHeight) expectedConsKey := ibctm.GetIterationKey(clientStore, pruneHeight) - suite.Require().NotNil(expectedConsKey) + s.Require().NotNil(expectedConsKey) } pruneHeightMap[path] = pruneHeights } // Increment the time by a week - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) for _, path := range paths { // create the consensus state that can be used as trusted height for next update var unexpiredHeights []exported.Height err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientLatestHeight()) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) unexpiredHeights = append(unexpiredHeights, path.EndpointA.GetClientLatestHeight()) unexpiredHeightMap[path] = unexpiredHeights @@ -122,57 +122,57 @@ func (suite *MigrationsTestSuite) TestPruneExpiredConsensusStates() { // Increment the time by another week, then update the client. // This will cause the consensus states created before the first time increment // to be expired - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) - totalPruned, err := ibctmmigrations.PruneExpiredConsensusStates(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), suite.chainA.GetSimApp().IBCKeeper.ClientKeeper) - suite.Require().NoError(err) - suite.Require().NotZero(totalPruned) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + totalPruned, err := ibctmmigrations.PruneExpiredConsensusStates(s.chainA.GetContext(), s.chainA.App.AppCodec(), s.chainA.GetSimApp().IBCKeeper.ClientKeeper) + s.Require().NoError(err) + s.Require().NotZero(totalPruned) for _, path := range paths { - ctx := suite.chainA.GetContext() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) + ctx := s.chainA.GetContext() + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) // ensure everything has been pruned for i, pruneHeight := range pruneHeightMap[path] { - consState, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().False(ok, i) - suite.Require().Nil(consState, i) + consState, ok := s.chainA.GetConsensusState(path.EndpointA.ClientID, pruneHeight) + s.Require().False(ok, i) + s.Require().Nil(consState, i) processedTime, ok := ibctm.GetProcessedTime(clientStore, pruneHeight) - suite.Require().False(ok, i) - suite.Require().Equal(uint64(0), processedTime, i) + s.Require().False(ok, i) + s.Require().Equal(uint64(0), processedTime, i) processedHeight, ok := ibctm.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().False(ok, i) - suite.Require().Nil(processedHeight, i) + s.Require().False(ok, i) + s.Require().Nil(processedHeight, i) expectedConsKey := ibctm.GetIterationKey(clientStore, pruneHeight) - suite.Require().Nil(expectedConsKey, i) + s.Require().Nil(expectedConsKey, i) } // ensure metadata is set for unexpired consensus state for _, height := range unexpiredHeightMap[path] { - consState, ok := suite.chainA.GetConsensusState(path.EndpointA.ClientID, height) - suite.Require().True(ok) - suite.Require().NotNil(consState) + consState, ok := s.chainA.GetConsensusState(path.EndpointA.ClientID, height) + s.Require().True(ok) + s.Require().NotNil(consState) processedTime, ok := ibctm.GetProcessedTime(clientStore, height) - suite.Require().True(ok) - suite.Require().NotEqual(uint64(0), processedTime) + s.Require().True(ok) + s.Require().NotEqual(uint64(0), processedTime) processedHeight, ok := ibctm.GetProcessedHeight(clientStore, height) - suite.Require().True(ok) - suite.Require().NotEqual(clienttypes.ZeroHeight(), processedHeight) + s.Require().True(ok) + s.Require().NotEqual(clienttypes.ZeroHeight(), processedHeight) consKey := ibctm.GetIterationKey(clientStore, height) - suite.Require().Equal(host.ConsensusStateKey(height), consKey) + s.Require().Equal(host.ConsensusStateKey(height), consKey) } } // verify that solomachine client and consensus state were not removed - smClientStore = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), solomachine.ClientID) + smClientStore = s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), solomachine.ClientID) bz = smClientStore.Get(host.ClientStateKey()) - suite.Require().NotEmpty(bz) + s.Require().NotEmpty(bz) bz = smClientStore.Get(host.ConsensusStateKey(smHeight)) - suite.Require().NotEmpty(bz) + s.Require().NotEmpty(bz) } diff --git a/modules/light-clients/07-tendermint/misbehaviour.go b/modules/light-clients/07-tendermint/misbehaviour.go index bed22ad137a..999173ad687 100644 --- a/modules/light-clients/07-tendermint/misbehaviour.go +++ b/modules/light-clients/07-tendermint/misbehaviour.go @@ -35,8 +35,8 @@ func (Misbehaviour) ClientType() string { // GetTime returns the timestamp at which misbehaviour occurred. It uses the // maximum value from both headers to prevent producing an invalid header outside // of the misbehaviour age range. -func (misbehaviour Misbehaviour) GetTime() time.Time { - t1, t2 := misbehaviour.Header1.GetTime(), misbehaviour.Header2.GetTime() +func (m Misbehaviour) GetTime() time.Time { + t1, t2 := m.Header1.GetTime(), m.Header2.GetTime() if t1.After(t2) { return t1 } @@ -44,70 +44,70 @@ func (misbehaviour Misbehaviour) GetTime() time.Time { } // ValidateBasic implements Misbehaviour interface -func (misbehaviour Misbehaviour) ValidateBasic() error { - if misbehaviour.Header1 == nil { +func (m Misbehaviour) ValidateBasic() error { + if m.Header1 == nil { return errorsmod.Wrap(ErrInvalidHeader, "misbehaviour Header1 cannot be nil") } - if misbehaviour.Header2 == nil { + if m.Header2 == nil { return errorsmod.Wrap(ErrInvalidHeader, "misbehaviour Header2 cannot be nil") } - if misbehaviour.Header1.TrustedHeight.RevisionHeight == 0 { + if m.Header1.TrustedHeight.RevisionHeight == 0 { return errorsmod.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header1 cannot have zero revision height") } - if misbehaviour.Header2.TrustedHeight.RevisionHeight == 0 { + if m.Header2.TrustedHeight.RevisionHeight == 0 { return errorsmod.Wrapf(ErrInvalidHeaderHeight, "misbehaviour Header2 cannot have zero revision height") } - if misbehaviour.Header1.TrustedValidators == nil { + if m.Header1.TrustedValidators == nil { return errorsmod.Wrap(ErrInvalidValidatorSet, "trusted validator set in Header1 cannot be empty") } - if misbehaviour.Header2.TrustedValidators == nil { + if m.Header2.TrustedValidators == nil { return errorsmod.Wrap(ErrInvalidValidatorSet, "trusted validator set in Header2 cannot be empty") } - if misbehaviour.Header1.Header.ChainID != misbehaviour.Header2.Header.ChainID { + if m.Header1.Header.ChainID != m.Header2.Header.ChainID { return errorsmod.Wrap(clienttypes.ErrInvalidMisbehaviour, "headers must have identical chainIDs") } - if err := host.ClientIdentifierValidator(misbehaviour.ClientId); err != nil { + if err := host.ClientIdentifierValidator(m.ClientId); err != nil { return errorsmod.Wrap(err, "misbehaviour client ID is invalid") } // ValidateBasic on both validators - if err := misbehaviour.Header1.ValidateBasic(); err != nil { + if err := m.Header1.ValidateBasic(); err != nil { return errorsmod.Wrap( clienttypes.ErrInvalidMisbehaviour, errorsmod.Wrap(err, "header 1 failed validation").Error(), ) } - if err := misbehaviour.Header2.ValidateBasic(); err != nil { + if err := m.Header2.ValidateBasic(); err != nil { return errorsmod.Wrap( clienttypes.ErrInvalidMisbehaviour, errorsmod.Wrap(err, "header 2 failed validation").Error(), ) } // Ensure that Height1 is greater than or equal to Height2 - if misbehaviour.Header1.GetHeight().LT(misbehaviour.Header2.GetHeight()) { - return errorsmod.Wrapf(clienttypes.ErrInvalidMisbehaviour, "Header1 height is less than Header2 height (%s < %s)", misbehaviour.Header1.GetHeight(), misbehaviour.Header2.GetHeight()) + if m.Header1.GetHeight().LT(m.Header2.GetHeight()) { + return errorsmod.Wrapf(clienttypes.ErrInvalidMisbehaviour, "Header1 height is less than Header2 height (%s < %s)", m.Header1.GetHeight(), m.Header2.GetHeight()) } - blockID1, err := cmttypes.BlockIDFromProto(&misbehaviour.Header1.Commit.BlockID) + blockID1, err := cmttypes.BlockIDFromProto(&m.Header1.Commit.BlockID) if err != nil { return errorsmod.Wrap(err, "invalid block ID from header 1 in misbehaviour") } - blockID2, err := cmttypes.BlockIDFromProto(&misbehaviour.Header2.Commit.BlockID) + blockID2, err := cmttypes.BlockIDFromProto(&m.Header2.Commit.BlockID) if err != nil { return errorsmod.Wrap(err, "invalid block ID from header 2 in misbehaviour") } - if err := validCommit(misbehaviour.Header1.Header.ChainID, *blockID1, - misbehaviour.Header1.Commit, misbehaviour.Header1.ValidatorSet); err != nil { + if err := validCommit(m.Header1.Header.ChainID, *blockID1, + m.Header1.Commit, m.Header1.ValidatorSet); err != nil { return err } - return validCommit(misbehaviour.Header2.Header.ChainID, *blockID2, - misbehaviour.Header2.Commit, misbehaviour.Header2.ValidatorSet) + return validCommit(m.Header2.Header.ChainID, *blockID2, + m.Header2.Commit, m.Header2.ValidatorSet) } // validCommit checks if the given commit is a valid commit from the passed-in validatorset -func validCommit(chainID string, blockID cmttypes.BlockID, commit *cmtproto.Commit, valSet *cmtproto.ValidatorSet) (err error) { +func validCommit(chainID string, blockID cmttypes.BlockID, commit *cmtproto.Commit, valSet *cmtproto.ValidatorSet) error { tmCommit, err := cmttypes.CommitFromProto(commit) if err != nil { return errorsmod.Wrap(err, "commit is not tendermint commit type") diff --git a/modules/light-clients/07-tendermint/misbehaviour_handle.go b/modules/light-clients/07-tendermint/misbehaviour_handle.go index 4247f51c92b..b06ae0abd3f 100644 --- a/modules/light-clients/07-tendermint/misbehaviour_handle.go +++ b/modules/light-clients/07-tendermint/misbehaviour_handle.go @@ -71,7 +71,6 @@ func (ClientState) CheckForMisbehaviour(ctx sdk.Context, cdc codec.BinaryCodec, if !bytes.Equal(blockID1.Hash, blockID2.Hash) { return true } - } else if !msg.Header1.Header.Time.After(msg.Header2.Header.Time) { // Header1 is at greater height than Header2, therefore Header1 time must be less than or equal to // Header2 time in order to be valid misbehaviour (violation of monotonic time). diff --git a/modules/light-clients/07-tendermint/misbehaviour_handle_test.go b/modules/light-clients/07-tendermint/misbehaviour_handle_test.go index 69f8f3080e3..0e17910ca6f 100644 --- a/modules/light-clients/07-tendermint/misbehaviour_handle_test.go +++ b/modules/light-clients/07-tendermint/misbehaviour_handle_test.go @@ -15,11 +15,11 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { +func (s *TendermintTestSuite) TestVerifyMisbehaviour() { // Setup different validators and signers for testing different types of updates altPrivVal := cmttypes.NewMockPV() altPubKey, err := altPrivVal.GetPubKey() - suite.Require().NoError(err) + s.Require().NoError(err) // create modified heights to use for test-cases altVal := cmttypes.NewValidator(altPubKey, 100) @@ -41,20 +41,20 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "valid fork misbehaviour", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -62,14 +62,14 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "valid time misbehaviour", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+3, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+3, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -77,14 +77,14 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "valid time misbehaviour, header 1 time strictly less than header 2 time", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+3, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Hour), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+3, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Hour), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -92,37 +92,37 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "valid misbehavior at height greater than last consensusState", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, }, { "valid misbehaviour with different trusted heights", func() { trustedHeight1, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals1, ok := suite.chainB.TrustedValidators[trustedHeight1.RevisionHeight] - suite.Require().True(ok) + trustedVals1, ok := s.chainB.TrustedValidators[trustedHeight1.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) trustedHeight2, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals2, ok := suite.chainB.TrustedValidators[trustedHeight2.RevisionHeight] - suite.Require().True(ok) + trustedVals2, ok := s.chainB.TrustedValidators[trustedHeight2.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight1, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals1, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight2, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals2, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight1, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals1, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight2, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals2, s.chainB.Signers), } }, nil, @@ -130,44 +130,44 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "valid misbehaviour at a previous revision", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } // increment revision number err = path.EndpointB.UpgradeChain() - suite.Require().NoError(err) + s.Require().NoError(err) }, nil, }, { "valid misbehaviour at a future revision", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - futureRevision := fmt.Sprintf("%s-%d", strings.TrimSuffix(suite.chainB.ChainID, fmt.Sprintf("-%d", clienttypes.ParseChainID(suite.chainB.ChainID))), height.GetRevisionNumber()+1) + futureRevision := fmt.Sprintf("%s-%d", strings.TrimSuffix(s.chainB.ChainID, fmt.Sprintf("-%d", clienttypes.ParseChainID(s.chainB.ChainID))), height.GetRevisionNumber()+1) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(futureRevision, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(futureRevision, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(futureRevision, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(futureRevision, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -175,21 +175,21 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "valid misbehaviour with trusted heights at a previous revision", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) // increment revision of chainID err = path.EndpointB.UpgradeChain() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -197,76 +197,76 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "consensus state's valset hash different from misbehaviour should still pass", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) // Create bothValSet with both suite validator and altVal - bothValSet := cmttypes.NewValidatorSet(append(suite.chainB.Vals.Validators, altValSet.Proposer)) - bothSigners := suite.chainB.Signers + bothValSet := cmttypes.NewValidatorSet(append(s.chainB.Vals.Validators, altValSet.Proposer)) + bothSigners := s.chainB.Signers bothSigners[altValSet.Proposer.Address.String()] = altPrivVal misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), bothValSet, suite.chainB.NextVals, trustedVals, bothSigners), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, bothValSet, suite.chainB.NextVals, trustedVals, bothSigners), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), bothValSet, s.chainB.NextVals, trustedVals, bothSigners), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, bothValSet, s.chainB.NextVals, trustedVals, bothSigners), } }, nil, }, { "invalid misbehaviour: misbehaviour from different chain", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("invalid light client misbehaviour"), }, { "misbehaviour trusted validators does not match validator hash in trusted consensus state", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, altValSet, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, altValSet, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, altValSet, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, altValSet, s.chainB.Signers), } }, errors.New("invalid validator set"), }, { "trusted consensus state does not exist", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight.Increment().(clienttypes.Height), suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight.Increment().(clienttypes.Height), s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("consensus state not found"), }, @@ -278,107 +278,107 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { { "trusting period expired", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - suite.chainA.ExpireClient(path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod) + s.chainA.ExpireClient(path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("time since latest trusted state has passed the trusting period"), }, { "header 1 valset has too much change", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, suite.chainB.NextVals, trustedVals, altSigners), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, s.chainB.NextVals, trustedVals, altSigners), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("validator set in header has too much change from trusted validator set"), }, { "header 2 valset has too much change", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, altValSet, suite.chainB.NextVals, trustedVals, altSigners), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, altValSet, s.chainB.NextVals, trustedVals, altSigners), } }, errors.New("validator set in header has too much change from trusted validator set"), }, { "both header 1 and header 2 valsets have too much change", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, suite.chainB.NextVals, trustedVals, altSigners), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, altValSet, suite.chainB.NextVals, trustedVals, altSigners), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, s.chainB.NextVals, trustedVals, altSigners), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, altValSet, s.chainB.NextVals, trustedVals, altSigners), } }, errors.New("validator set in header has too much change from trusted validator set"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), path.EndpointA.ClientID, misbehaviour) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), path.EndpointA.ClientID, misbehaviour) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } @@ -387,14 +387,14 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviour() { // test both fork and time misbehaviour for chainIDs not in the revision format // this function is separate as it must use a global variable in the testing package // to initialize chains not in the revision format -func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { +func (s *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { // NOTE: chains set to non revision format ibctesting.ChainIDSuffix = "" // Setup different validators and signers for testing different types of updates altPrivVal := cmttypes.NewMockPV() altPubKey, err := altPrivVal.GetPubKey() - suite.Require().NoError(err) + s.Require().NoError(err) // create modified heights to use for test-cases altVal := cmttypes.NewValidator(altPubKey, 100) @@ -416,20 +416,20 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { { "valid fork misbehaviour", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -437,14 +437,14 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { { "valid time misbehaviour", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+3, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+3, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -452,14 +452,14 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { { "valid time misbehaviour, header 1 time strictly less than header 2 time", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+3, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Hour), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+3, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Hour), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, @@ -467,37 +467,37 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { { "valid misbehavior at height greater than last consensusState", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, nil, }, { "valid misbehaviour with different trusted heights", func() { trustedHeight1, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals1, ok := suite.chainB.TrustedValidators[trustedHeight1.RevisionHeight] - suite.Require().True(ok) + trustedVals1, ok := s.chainB.TrustedValidators[trustedHeight1.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) trustedHeight2, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals2, ok := suite.chainB.TrustedValidators[trustedHeight2.RevisionHeight] - suite.Require().True(ok) + trustedVals2, ok := s.chainB.TrustedValidators[trustedHeight2.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight1, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals1, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight2, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals2, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight1, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals1, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight2, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals2, s.chainB.Signers), } }, nil, @@ -505,76 +505,76 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { { "consensus state's valset hash different from misbehaviour should still pass", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) // Create bothValSet with both suite validator and altVal - bothValSet := cmttypes.NewValidatorSet(append(suite.chainB.Vals.Validators, altValSet.Proposer)) - bothSigners := suite.chainB.Signers + bothValSet := cmttypes.NewValidatorSet(append(s.chainB.Vals.Validators, altValSet.Proposer)) + bothSigners := s.chainB.Signers bothSigners[altValSet.Proposer.Address.String()] = altPrivVal misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), bothValSet, suite.chainB.NextVals, trustedVals, bothSigners), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, bothValSet, suite.chainB.NextVals, trustedVals, bothSigners), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), bothValSet, s.chainB.NextVals, trustedVals, bothSigners), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, bothValSet, s.chainB.NextVals, trustedVals, bothSigners), } }, nil, }, { "invalid misbehaviour: misbehaviour from different chain", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader("evmos", int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("validator set in header has too much change from trusted validator set"), }, { "misbehaviour trusted validators does not match validator hash in trusted consensus state", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, altValSet, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, altValSet, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, altValSet, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, altValSet, s.chainB.Signers), } }, errors.New("invalid validator set"), }, { "trusted consensus state does not exist", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight.Increment().(clienttypes.Height), suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight.Increment().(clienttypes.Height), s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("consensus state not found"), }, @@ -586,107 +586,107 @@ func (suite *TendermintTestSuite) TestVerifyMisbehaviourNonRevisionChainID() { { "trusting period expired", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - suite.chainA.ExpireClient(path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod) + s.chainA.ExpireClient(path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("time since latest trusted state has passed the trusting period"), }, { "header 1 valset has too much change", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, suite.chainB.NextVals, trustedVals, altSigners), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, s.chainB.NextVals, trustedVals, altSigners), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, errors.New("validator set in header has too much change from trusted validator set"), }, { "header 2 valset has too much change", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, altValSet, suite.chainB.NextVals, trustedVals, altSigners), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, altValSet, s.chainB.NextVals, trustedVals, altSigners), } }, errors.New("validator set in header has too much change from trusted validator set"), }, { "both header 1 and header 2 valsets have too much change", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) misbehaviour = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, suite.chainB.NextVals, trustedVals, altSigners), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, altValSet, suite.chainB.NextVals, trustedVals, altSigners), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), altValSet, s.chainB.NextVals, trustedVals, altSigners), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, altValSet, s.chainB.NextVals, trustedVals, altSigners), } }, errors.New("validator set in header has too much change from trusted validator set"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), path.EndpointA.ClientID, misbehaviour) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), path.EndpointA.ClientID, misbehaviour) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/07-tendermint/misbehaviour_test.go b/modules/light-clients/07-tendermint/misbehaviour_test.go index 9ffa7804cdc..d1307693993 100644 --- a/modules/light-clients/07-tendermint/misbehaviour_test.go +++ b/modules/light-clients/07-tendermint/misbehaviour_test.go @@ -16,22 +16,22 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TendermintTestSuite) TestMisbehaviour() { +func (s *TendermintTestSuite) TestMisbehaviour() { heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) misbehaviour := &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.valSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now, s.valSet, s.valSet, s.valSet, s.signers), ClientId: clientID, } - suite.Require().Equal(exported.Tendermint, misbehaviour.ClientType()) + s.Require().Equal(exported.Tendermint, misbehaviour.ClientType()) } -func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { +func (s *TendermintTestSuite) TestMisbehaviourValidateBasic() { altPrivVal := cmttypes.NewMockPV() altPubKey, err := altPrivVal.GetPubKey() - suite.Require().NoError(err) + s.Require().NoError(err) revisionHeight := int64(height.RevisionHeight) @@ -41,7 +41,7 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { altValSet := cmttypes.NewValidatorSet([]*cmttypes.Validator{altVal}) // Create signer array and ensure it is in same order as bothValSet - bothValSet, bothSigners := getBothSigners(suite, altVal, altPrivVal) + bothValSet, bothSigners := getBothSigners(s, altVal, altPrivVal) altSignerArr := []cmttypes.PrivValidator{altPrivVal} @@ -56,8 +56,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "valid fork misbehaviour, two headers at same height have different time", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, suite.valSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now.Add(time.Minute), s.valSet, s.valSet, s.valSet, s.signers), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -66,8 +66,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "valid time misbehaviour, both headers at different heights are at same time", &ibctm.Misbehaviour{ - Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+5), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.valSet, suite.signers), - Header2: suite.header, + Header1: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight+5), heightMinus1, s.now, s.valSet, s.valSet, s.valSet, s.signers), + Header2: s.header, ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -75,21 +75,21 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { }, { "misbehaviour Header1 is nil", - ibctm.NewMisbehaviour(clientID, nil, suite.header), + ibctm.NewMisbehaviour(clientID, nil, s.header), func(m *ibctm.Misbehaviour) error { return nil }, errorsmod.Wrap(ibctm.ErrInvalidHeader, "misbehaviour Header1 cannot be nil"), }, { "misbehaviour Header2 is nil", - ibctm.NewMisbehaviour(clientID, suite.header, nil), + ibctm.NewMisbehaviour(clientID, s.header, nil), func(m *ibctm.Misbehaviour) error { return nil }, errorsmod.Wrap(ibctm.ErrInvalidHeader, "misbehaviour Header2 cannot be nil"), }, { "valid misbehaviour with different trusted headers", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.NewHeight(0, height.RevisionHeight-3), suite.now.Add(time.Minute), suite.valSet, suite.valSet, bothValSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.NewHeight(0, height.RevisionHeight-3), s.now.Add(time.Minute), s.valSet, s.valSet, bothValSet, s.signers), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -98,8 +98,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "trusted height is 0 in Header1", &ibctm.Misbehaviour{ - Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, suite.valSet, suite.signers), - Header2: suite.header, + Header1: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), s.now.Add(time.Minute), s.valSet, s.valSet, s.valSet, s.signers), + Header2: s.header, ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -108,8 +108,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "trusted height is 0 in Header2", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), suite.now.Add(time.Minute), suite.valSet, suite.valSet, suite.valSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), clienttypes.ZeroHeight(), s.now.Add(time.Minute), s.valSet, s.valSet, s.valSet, s.signers), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -118,8 +118,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "trusted valset is nil in Header1", &ibctm.Misbehaviour{ - Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, nil, suite.signers), - Header2: suite.header, + Header1: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now.Add(time.Minute), s.valSet, s.valSet, nil, s.signers), + Header2: s.header, ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -128,8 +128,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "trusted valset is nil in Header2", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now.Add(time.Minute), suite.valSet, suite.valSet, nil, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now.Add(time.Minute), s.valSet, s.valSet, nil, s.signers), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -138,8 +138,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "invalid client ID ", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.valSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now, s.valSet, s.valSet, s.valSet, s.signers), ClientId: "GAI", }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -148,8 +148,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "chainIDs do not match", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.valSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader("ethermint", int64(height.RevisionHeight), heightMinus1, s.now, s.valSet, s.valSet, s.valSet, s.signers), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -158,8 +158,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "header2 height is greater", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, 6, clienttypes.NewHeight(0, height.RevisionHeight+1), suite.now, suite.valSet, suite.valSet, suite.valSet, suite.signers), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, 6, clienttypes.NewHeight(0, height.RevisionHeight+1), s.now, s.valSet, s.valSet, s.valSet, s.signers), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { return nil }, @@ -168,8 +168,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "header 1 doesn't have 2/3 majority", &ibctm.Misbehaviour{ - Header1: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, suite.valSet, bothSigners), - Header2: suite.header, + Header1: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now, bothValSet, bothValSet, s.valSet, bothSigners), + Header2: s.header, ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { @@ -180,7 +180,7 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { return err } - extCommit, err := cmttypes.MakeExtCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header1.Commit.Round, wrongVoteSet, altSignerArr, suite.now, false) + extCommit, err := cmttypes.MakeExtCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header1.Commit.Round, wrongVoteSet, altSignerArr, s.now, false) misbehaviour.Header1.Commit = extCommit.ToCommit().ToProto() return err }, @@ -189,8 +189,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "header 2 doesn't have 2/3 majority", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, suite.valSet, bothSigners), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now, bothValSet, bothValSet, s.valSet, bothSigners), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { @@ -201,7 +201,7 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { return err } - extCommit, err := cmttypes.MakeExtCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header2.Commit.Round, wrongVoteSet, altSignerArr, suite.now, false) + extCommit, err := cmttypes.MakeExtCommit(*blockID, int64(misbehaviour.Header2.GetHeight().GetRevisionHeight()), misbehaviour.Header2.Commit.Round, wrongVoteSet, altSignerArr, s.now, false) misbehaviour.Header2.Commit = extCommit.ToCommit().ToProto() return err }, @@ -210,8 +210,8 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { { "validators sign off on wrong commit", &ibctm.Misbehaviour{ - Header1: suite.header, - Header2: suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, bothValSet, bothValSet, suite.valSet, bothSigners), + Header1: s.header, + Header2: s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now, bothValSet, bothValSet, s.valSet, bothSigners), ClientId: clientID, }, func(misbehaviour *ibctm.Misbehaviour) error { @@ -224,16 +224,16 @@ func (suite *TendermintTestSuite) TestMisbehaviourValidateBasic() { } for i, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.malleateMisbehaviour(tc.misbehaviour) - suite.Require().NoError(err) + s.Require().NoError(err) err = tc.misbehaviour.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) + s.Require().NoError(err, "valid test case %d failed: %s", i, tc.name) } else { - suite.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err, "invalid test case %d passed: %s", i, tc.name) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } diff --git a/modules/light-clients/07-tendermint/proposal_handle.go b/modules/light-clients/07-tendermint/proposal_handle.go index cdbe9963b67..fce02623728 100644 --- a/modules/light-clients/07-tendermint/proposal_handle.go +++ b/modules/light-clients/07-tendermint/proposal_handle.go @@ -26,7 +26,7 @@ import ( // // In case 1) before updating the client, the client will be unfrozen by resetting // the FrozenHeight to the zero Height. -func (cs ClientState) CheckSubstituteAndUpdateState( +func (cs *ClientState) CheckSubstituteAndUpdateState( ctx sdk.Context, cdc codec.BinaryCodec, subjectClientStore, substituteClientStore storetypes.KVStore, substituteClient exported.ClientState, ) error { @@ -35,7 +35,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState( return errorsmod.Wrapf(clienttypes.ErrInvalidClient, "expected type %T, got %T", &ClientState{}, substituteClient) } - if !IsMatchingClientState(cs, *substituteClientState) { + if !IsMatchingClientState(*cs, *substituteClientState) { return errorsmod.Wrap(clienttypes.ErrInvalidSubstitute, "subject client state does not match substitute client state") } @@ -76,7 +76,7 @@ func (cs ClientState) CheckSubstituteAndUpdateState( // no validation is necessary since the substitute is verified to be Active // in 02-client. - setClientState(subjectClientStore, cdc, &cs) + setClientState(subjectClientStore, cdc, cs) return nil } diff --git a/modules/light-clients/07-tendermint/proposal_handle_test.go b/modules/light-clients/07-tendermint/proposal_handle_test.go index 78fbe3ee6cc..b19520c11c7 100644 --- a/modules/light-clients/07-tendermint/proposal_handle_test.go +++ b/modules/light-clients/07-tendermint/proposal_handle_test.go @@ -11,7 +11,7 @@ import ( var frozenHeight = clienttypes.NewHeight(0, 1) -func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() { +func (s *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() { var ( substituteClientState exported.ClientState substitutePath *ibctesting.Path @@ -22,14 +22,14 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() { }{ { "solo machine used for substitute", func() { - substituteClientState = ibctesting.NewSolomachine(suite.T(), suite.cdc, "solo machine", "", 1).ClientState() + substituteClientState = ibctesting.NewSolomachine(s.T(), s.cdc, "solo machine", "", 1).ClientState() }, }, { "non-matching substitute", func() { substitutePath.SetupClients() - substituteClientState, ok := suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) + substituteClientState, ok := s.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) // change trusting period so that test should fail substituteClientState.TrustingPeriod = time.Hour * 24 * 7 @@ -40,31 +40,31 @@ func (suite *TendermintTestSuite) TestCheckSubstituteUpdateStateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset - subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) - substitutePath = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() // reset + subjectPath := ibctesting.NewPath(s.chainA, s.chainB) + substitutePath = ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() - subjectClientState, ok := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) + subjectClientState, ok := s.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) // expire subject client - suite.coordinator.IncrementTimeBy(subjectClientState.TrustingPeriod) - suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + s.coordinator.IncrementTimeBy(subjectClientState.TrustingPeriod) + s.coordinator.CommitBlock(s.chainA, s.chainB) tc.malleate() - subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) - substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID) + subjectClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), subjectPath.EndpointA.ClientID) + substituteClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), substitutePath.EndpointA.ClientID) - err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState) - suite.Require().Error(err) + err := subjectClientState.CheckSubstituteAndUpdateState(s.chainA.GetContext(), s.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState) + s.Require().Error(err) }) } } -func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() { +func (s *TendermintTestSuite) TestCheckSubstituteAndUpdateState() { testCases := []struct { name string FreezeClient bool @@ -83,14 +83,14 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset // construct subject using test case parameters - subjectPath := ibctesting.NewPath(suite.chainA, suite.chainB) + subjectPath := ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() - subjectClientState, ok := suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) + subjectClientState, ok := s.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) if tc.FreezeClient { subjectClientState.FrozenHeight = frozenHeight @@ -98,76 +98,76 @@ func (suite *TendermintTestSuite) TestCheckSubstituteAndUpdateState() { // construct the substitute to match the subject client - substitutePath := ibctesting.NewPath(suite.chainA, suite.chainB) + substitutePath := ibctesting.NewPath(s.chainA, s.chainB) substitutePath.SetupClients() - substituteClientState, ok := suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) + substituteClientState, ok := s.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) // update trusting period of substitute client state substituteClientState.TrustingPeriod = time.Hour * 24 * 7 - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID, substituteClientState) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), substitutePath.EndpointA.ClientID, substituteClientState) // update substitute a few times for range 3 { err := substitutePath.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // skip a block - suite.coordinator.CommitBlock(suite.chainA, suite.chainB) + s.coordinator.CommitBlock(s.chainA, s.chainB) } // get updated substitute - substituteClientState, ok = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) + substituteClientState, ok = s.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) // test that subject gets updated chain-id newChainID := "new-chain-id" substituteClientState.ChainId = newChainID - subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) - substituteClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), substitutePath.EndpointA.ClientID) + subjectClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), subjectPath.EndpointA.ClientID) + substituteClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), substitutePath.EndpointA.ClientID) expectedConsState := substitutePath.EndpointA.GetConsensusState(substituteClientState.LatestHeight) expectedProcessedTime, found := ibctm.GetProcessedTime(substituteClientStore, substituteClientState.LatestHeight) - suite.Require().True(found) + s.Require().True(found) expectedProcessedHeight, found := ibctm.GetProcessedTime(substituteClientStore, substituteClientState.LatestHeight) - suite.Require().True(found) + s.Require().True(found) expectedIterationKey := ibctm.GetIterationKey(substituteClientStore, substituteClientState.LatestHeight) - err := subjectClientState.CheckSubstituteAndUpdateState(suite.chainA.GetContext(), suite.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState) + err := subjectClientState.CheckSubstituteAndUpdateState(s.chainA.GetContext(), s.chainA.App.AppCodec(), subjectClientStore, substituteClientStore, substituteClientState) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) updatedClient, ok := subjectPath.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().Equal(clienttypes.ZeroHeight(), updatedClient.FrozenHeight) + s.Require().True(ok) + s.Require().Equal(clienttypes.ZeroHeight(), updatedClient.FrozenHeight) - subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectPath.EndpointA.ClientID) + subjectClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), subjectPath.EndpointA.ClientID) // check that the correct consensus state was copied over - suite.Require().Equal(substituteClientState.LatestHeight, updatedClient.LatestHeight) + s.Require().Equal(substituteClientState.LatestHeight, updatedClient.LatestHeight) subjectConsState := subjectPath.EndpointA.GetConsensusState(updatedClient.LatestHeight) subjectProcessedTime, found := ibctm.GetProcessedTime(subjectClientStore, updatedClient.LatestHeight) - suite.Require().True(found) + s.Require().True(found) subjectProcessedHeight, found := ibctm.GetProcessedTime(substituteClientStore, updatedClient.LatestHeight) - suite.Require().True(found) + s.Require().True(found) subjectIterationKey := ibctm.GetIterationKey(substituteClientStore, updatedClient.LatestHeight) - suite.Require().Equal(expectedConsState, subjectConsState) - suite.Require().Equal(expectedProcessedTime, subjectProcessedTime) - suite.Require().Equal(expectedProcessedHeight, subjectProcessedHeight) - suite.Require().Equal(expectedIterationKey, subjectIterationKey) + s.Require().Equal(expectedConsState, subjectConsState) + s.Require().Equal(expectedProcessedTime, subjectProcessedTime) + s.Require().Equal(expectedProcessedHeight, subjectProcessedHeight) + s.Require().Equal(expectedIterationKey, subjectIterationKey) - suite.Require().Equal(newChainID, updatedClient.ChainId) - suite.Require().Equal(time.Hour*24*7, updatedClient.TrustingPeriod) + s.Require().Equal(newChainID, updatedClient.ChainId) + s.Require().Equal(time.Hour*24*7, updatedClient.TrustingPeriod) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expError.Error()) } }) } } -func (suite *TendermintTestSuite) TestIsMatchingClientState() { +func (s *TendermintTestSuite) TestIsMatchingClientState() { var ( subjectPath, substitutePath *ibctesting.Path subjectClientState, substituteClientState *ibctm.ClientState @@ -181,10 +181,10 @@ func (suite *TendermintTestSuite) TestIsMatchingClientState() { { "matching clients", func() { var ok bool - subjectClientState, ok = suite.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) - substituteClientState, ok = suite.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) + subjectClientState, ok = s.chainA.GetClientState(subjectPath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) + substituteClientState, ok = s.chainA.GetClientState(substitutePath.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) }, true, }, { @@ -220,17 +220,17 @@ func (suite *TendermintTestSuite) TestIsMatchingClientState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset - subjectPath = ibctesting.NewPath(suite.chainA, suite.chainB) - substitutePath = ibctesting.NewPath(suite.chainA, suite.chainB) + subjectPath = ibctesting.NewPath(s.chainA, s.chainB) + substitutePath = ibctesting.NewPath(s.chainA, s.chainB) subjectPath.SetupClients() substitutePath.SetupClients() tc.malleate() - suite.Require().Equal(tc.isMatch, ibctm.IsMatchingClientState(*subjectClientState, *substituteClientState)) + s.Require().Equal(tc.isMatch, ibctm.IsMatchingClientState(*subjectClientState, *substituteClientState)) }) } } diff --git a/modules/light-clients/07-tendermint/store_test.go b/modules/light-clients/07-tendermint/store_test.go index 26df63bc2ce..89f2dac5a69 100644 --- a/modules/light-clients/07-tendermint/store_test.go +++ b/modules/light-clients/07-tendermint/store_test.go @@ -13,7 +13,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TendermintTestSuite) TestGetConsensusState() { +func (s *TendermintTestSuite) TestGetConsensusState() { var ( height exported.Height path *ibctesting.Path @@ -37,25 +37,25 @@ func (suite *TendermintTestSuite) TestGetConsensusState() { { "not a consensus state interface", func() { // marshal an empty client state and set as consensus state - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) - clientStateBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), &tendermint.ClientState{}) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) + clientStateBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), &tendermint.ClientState{}) store.Set(host.ConsensusStateKey(height), clientStateBz) }, false, true, }, { "invalid consensus state (solomachine)", func() { // marshal and set solomachine consensus state - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) - consensusStateBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), &solomachine.ConsensusState{}) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) + consensusStateBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), &solomachine.ConsensusState{}) store.Set(host.ConsensusStateKey(height), consensusStateBz) }, false, true, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) path.Setup() @@ -64,69 +64,69 @@ func (suite *TendermintTestSuite) TestGetConsensusState() { tc.malleate() // change vars as necessary if tc.expPanic { - suite.Require().Panics(func() { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) - tendermint.GetConsensusState(store, suite.chainA.Codec, height) + s.Require().Panics(func() { + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) + tendermint.GetConsensusState(store, s.chainA.Codec, height) }) return } - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) - consensusState, found := tendermint.GetConsensusState(store, suite.chainA.Codec, height) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) + consensusState, found := tendermint.GetConsensusState(store, s.chainA.Codec, height) if tc.expPass { - suite.Require().True(found) + s.Require().True(found) - expConsensusState, found := suite.chainA.GetConsensusState(path.EndpointA.ClientID, height) - suite.Require().True(found) - suite.Require().Equal(expConsensusState, consensusState) + expConsensusState, found := s.chainA.GetConsensusState(path.EndpointA.ClientID, height) + s.Require().True(found) + s.Require().Equal(expConsensusState, consensusState) } else { - suite.Require().False(found) - suite.Require().Nil(consensusState) + s.Require().False(found) + s.Require().Nil(consensusState) } }) } } -func (suite *TendermintTestSuite) TestGetProcessedTime() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) - suite.coordinator.UpdateTime() +func (s *TendermintTestSuite) TestGetProcessedTime() { + path := ibctesting.NewPath(s.chainA, s.chainB) + s.coordinator.UpdateTime() - expectedTime := suite.chainA.ProposedHeader.Time + expectedTime := s.chainA.ProposedHeader.Time // Verify ProcessedTime on CreateClient err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height := path.EndpointA.GetClientLatestHeight() - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) actualTime, ok := tendermint.GetProcessedTime(store, height) - suite.Require().True(ok, "could not retrieve processed time for stored consensus state") - suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") + s.Require().True(ok, "could not retrieve processed time for stored consensus state") + s.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") - suite.coordinator.UpdateTime() + s.coordinator.UpdateTime() // coordinator increments time before updating client - expectedTime = suite.chainA.ProposedHeader.Time.Add(ibctesting.TimeIncrement) + expectedTime = s.chainA.ProposedHeader.Time.Add(ibctesting.TimeIncrement) // Verify ProcessedTime on UpdateClient err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height = path.EndpointA.GetClientLatestHeight() - store = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + store = s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) actualTime, ok = tendermint.GetProcessedTime(store, height) - suite.Require().True(ok, "could not retrieve processed time for stored consensus state") - suite.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") + s.Require().True(ok, "could not retrieve processed time for stored consensus state") + s.Require().Equal(uint64(expectedTime.UnixNano()), actualTime, "retrieved processed time is not expected value") // try to get processed time for height that doesn't exist in store _, ok = tendermint.GetProcessedTime(store, clienttypes.NewHeight(1, 1)) - suite.Require().False(ok, "retrieved processed time for a non-existent consensus state") + s.Require().False(ok, "retrieved processed time for a non-existent consensus state") } -func (suite *TendermintTestSuite) TestIterationKey() { +func (s *TendermintTestSuite) TestIterationKey() { testHeights := []exported.Height{ clienttypes.NewHeight(0, 1), clienttypes.NewHeight(0, 1234), @@ -136,24 +136,24 @@ func (suite *TendermintTestSuite) TestIterationKey() { for _, h := range testHeights { k := tendermint.IterationKey(h) retrievedHeight := tendermint.GetHeightFromIterationKey(k) - suite.Require().Equal(h, retrievedHeight, "retrieving height from iteration key failed") + s.Require().Equal(h, retrievedHeight, "retrieving height from iteration key failed") } } -func (suite *TendermintTestSuite) TestIterateConsensusStates() { +func (s *TendermintTestSuite) TestIterateConsensusStates() { nextValsHash := []byte("nextVals") // Set iteration keys and consensus states - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 1)) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 1), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash)) - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(4, 9)) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(4, 9), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash4-9")), nextValsHash)) - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 10)) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 10), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-10")), nextValsHash)) - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 4)) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 4), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash)) - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), clienttypes.NewHeight(40, 1)) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", clienttypes.NewHeight(40, 1), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash40-1")), nextValsHash)) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 1)) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 1), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash)) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), clienttypes.NewHeight(4, 9)) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", clienttypes.NewHeight(4, 9), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash4-9")), nextValsHash)) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 10)) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 10), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-10")), nextValsHash)) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), clienttypes.NewHeight(0, 4)) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", clienttypes.NewHeight(0, 4), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash)) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), clienttypes.NewHeight(40, 1)) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", clienttypes.NewHeight(40, 1), tendermint.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("hash40-1")), nextValsHash)) var testArr []string cb := func(height exported.Height) bool { @@ -161,12 +161,12 @@ func (suite *TendermintTestSuite) TestIterateConsensusStates() { return false } - tendermint.IterateConsensusStateAscending(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), cb) + tendermint.IterateConsensusStateAscending(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), cb) expectedArr := []string{"0-1", "0-4", "0-10", "4-9", "40-1"} - suite.Require().Equal(expectedArr, testArr) + s.Require().Equal(expectedArr, testArr) } -func (suite *TendermintTestSuite) TestGetNeighboringConsensusStates() { +func (s *TendermintTestSuite) TestGetNeighboringConsensusStates() { nextValsHash := []byte("nextVals") cs01 := tendermint.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash0-1")), nextValsHash) cs04 := tendermint.NewConsensusState(time.Now().UTC(), commitmenttypes.NewMerkleRoot([]byte("hash0-4")), nextValsHash) @@ -176,24 +176,24 @@ func (suite *TendermintTestSuite) TestGetNeighboringConsensusStates() { height49 := clienttypes.NewHeight(4, 9) // Set iteration keys and consensus states - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), height01) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", height01, cs01) - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), height04) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", height04, cs04) - tendermint.SetIterationKey(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), height49) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), "testClient", height49, cs49) - - prevCs01, ok := tendermint.GetPreviousConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height01) - suite.Require().Nil(prevCs01, "consensus state exists before lowest consensus state") - suite.Require().False(ok) - prevCs49, ok := tendermint.GetPreviousConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height49) - suite.Require().Equal(cs04, prevCs49, "previous consensus state is not returned correctly") - suite.Require().True(ok) - - nextCs01, ok := tendermint.GetNextConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height01) - suite.Require().Equal(cs04, nextCs01, "next consensus state not returned correctly") - suite.Require().True(ok) - nextCs49, ok := tendermint.GetNextConsensusState(suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), "testClient"), suite.chainA.Codec, height49) - suite.Require().Nil(nextCs49, "next consensus state exists after highest consensus state") - suite.Require().False(ok) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), height01) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", height01, cs01) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), height04) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", height04, cs04) + tendermint.SetIterationKey(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), height49) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), "testClient", height49, cs49) + + prevCs01, ok := tendermint.GetPreviousConsensusState(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), s.chainA.Codec, height01) + s.Require().Nil(prevCs01, "consensus state exists before lowest consensus state") + s.Require().False(ok) + prevCs49, ok := tendermint.GetPreviousConsensusState(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), s.chainA.Codec, height49) + s.Require().Equal(cs04, prevCs49, "previous consensus state is not returned correctly") + s.Require().True(ok) + + nextCs01, ok := tendermint.GetNextConsensusState(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), s.chainA.Codec, height01) + s.Require().Equal(cs04, nextCs01, "next consensus state not returned correctly") + s.Require().True(ok) + nextCs49, ok := tendermint.GetNextConsensusState(s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), "testClient"), s.chainA.Codec, height49) + s.Require().Nil(nextCs49, "next consensus state exists after highest consensus state") + s.Require().False(ok) } diff --git a/modules/light-clients/07-tendermint/tendermint_test.go b/modules/light-clients/07-tendermint/tendermint_test.go index decb64b1dfa..595032c5a5a 100644 --- a/modules/light-clients/07-tendermint/tendermint_test.go +++ b/modules/light-clients/07-tendermint/tendermint_test.go @@ -57,59 +57,59 @@ type TendermintTestSuite struct { clientTime time.Time } -func (suite *TendermintTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) +func TestTendermintTestSuite(t *testing.T) { + testifysuite.Run(t, new(TendermintTestSuite)) +} + +func (s *TendermintTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) // commit some blocks so that QueryProof returns valid proof (cannot return valid query if height <= 1) - suite.coordinator.CommitNBlocks(suite.chainA, 2) - suite.coordinator.CommitNBlocks(suite.chainB, 2) + s.coordinator.CommitNBlocks(s.chainA, 2) + s.coordinator.CommitNBlocks(s.chainB, 2) // TODO: deprecate usage in favor of testing package checkTx := false - app := simapp.Setup(suite.T(), checkTx) + app := simapp.Setup(s.T(), checkTx) - suite.cdc = app.AppCodec() + s.cdc = app.AppCodec() // now is the time of the current chain, must be after the updating header // mocks ctx.BlockTime() - suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) - suite.clientTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + s.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + s.clientTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) // Header time is intended to be time for any new header used for updates - suite.headerTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) + s.headerTime = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC) - suite.privVal = cmttypes.NewMockPV() + s.privVal = cmttypes.NewMockPV() - pubKey, err := suite.privVal.GetPubKey() - suite.Require().NoError(err) + pubKey, err := s.privVal.GetPubKey() + s.Require().NoError(err) heightMinus1 := clienttypes.NewHeight(0, height.RevisionHeight-1) val := cmttypes.NewValidator(pubKey, 10) - suite.signers = make(map[string]cmttypes.PrivValidator) - suite.signers[val.Address.String()] = suite.privVal - suite.valSet = cmttypes.NewValidatorSet([]*cmttypes.Validator{val}) - suite.valsHash = suite.valSet.Hash() - suite.header = suite.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, suite.now, suite.valSet, suite.valSet, suite.valSet, suite.signers) - suite.ctx = app.NewContext(checkTx) + s.signers = make(map[string]cmttypes.PrivValidator) + s.signers[val.Address.String()] = s.privVal + s.valSet = cmttypes.NewValidatorSet([]*cmttypes.Validator{val}) + s.valsHash = s.valSet.Hash() + s.header = s.chainA.CreateTMClientHeader(chainID, int64(height.RevisionHeight), heightMinus1, s.now, s.valSet, s.valSet, s.valSet, s.signers) + s.ctx = app.NewContext(checkTx) } func getAltSigners(altVal *cmttypes.Validator, altPrivVal cmttypes.PrivValidator) map[string]cmttypes.PrivValidator { return map[string]cmttypes.PrivValidator{altVal.Address.String(): altPrivVal} } -func getBothSigners(suite *TendermintTestSuite, altVal *cmttypes.Validator, altPrivVal cmttypes.PrivValidator) (*cmttypes.ValidatorSet, map[string]cmttypes.PrivValidator) { +func getBothSigners(s *TendermintTestSuite, altVal *cmttypes.Validator, altPrivVal cmttypes.PrivValidator) (*cmttypes.ValidatorSet, map[string]cmttypes.PrivValidator) { // Create bothValSet with both suite validator and altVal. Would be valid update - bothValSet := cmttypes.NewValidatorSet(append(suite.valSet.Validators, altVal)) + bothValSet := cmttypes.NewValidatorSet(append(s.valSet.Validators, altVal)) // Create signer array and ensure it is in same order as bothValSet - _, suiteVal := suite.valSet.GetByIndex(0) + _, suiteVal := s.valSet.GetByIndex(0) bothSigners := map[string]cmttypes.PrivValidator{ - suiteVal.Address.String(): suite.privVal, + suiteVal.Address.String(): s.privVal, altVal.Address.String(): altPrivVal, } return bothValSet, bothSigners } - -func TestTendermintTestSuite(t *testing.T) { - testifysuite.Run(t, new(TendermintTestSuite)) -} diff --git a/modules/light-clients/07-tendermint/update.go b/modules/light-clients/07-tendermint/update.go index 01f0912eeb2..9ba0e29f5a1 100644 --- a/modules/light-clients/07-tendermint/update.go +++ b/modules/light-clients/07-tendermint/update.go @@ -132,7 +132,7 @@ func (cs *ClientState) verifyHeader( // number must be the same. To update to a new revision, use a separate upgrade path // UpdateState will prune the oldest consensus state if it is expired. // If the provided clientMsg is not of type of Header then the handler will noop and empty slice is returned. -func (cs ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, clientMsg exported.ClientMessage) []exported.Height { +func (cs *ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, clientMsg exported.ClientMessage) []exported.Height { header, ok := clientMsg.(*Header) if !ok { // clientMsg is invalid Misbehaviour, no update necessary @@ -166,7 +166,7 @@ func (cs ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, client } // set client state, consensus state and associated metadata - setClientState(clientStore, cdc, &cs) + setClientState(clientStore, cdc, cs) setConsensusState(clientStore, cdc, consensusState, header.GetHeight()) setConsensusMetadata(ctx, clientStore, header.GetHeight()) @@ -176,7 +176,7 @@ func (cs ClientState) UpdateState(ctx sdk.Context, cdc codec.BinaryCodec, client // pruneOldestConsensusState will retrieve the earliest consensus state for this clientID and check if it is expired. If it is, // that consensus state will be pruned from store along with all associated metadata. This will prevent the client store from // becoming bloated with expired consensus states that can no longer be used for updates and packet verification. -func (cs ClientState) pruneOldestConsensusState(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore) { +func (cs *ClientState) pruneOldestConsensusState(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore) { // Check the earliest consensus state to see if it is expired, if so then set the prune height // so that we can delete consensus state and all associated metadata. var ( @@ -208,10 +208,10 @@ func (cs ClientState) pruneOldestConsensusState(ctx sdk.Context, cdc codec.Binar // UpdateStateOnMisbehaviour updates state upon misbehaviour, freezing the ClientState. This method should only be called when misbehaviour is detected // as it does not perform any misbehaviour checks. -func (cs ClientState) UpdateStateOnMisbehaviour(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, _ exported.ClientMessage) { +func (cs *ClientState) UpdateStateOnMisbehaviour(ctx sdk.Context, cdc codec.BinaryCodec, clientStore storetypes.KVStore, _ exported.ClientMessage) { cs.FrozenHeight = FrozenHeight - clientStore.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(cdc, &cs)) + clientStore.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(cdc, cs)) } // checkTrustedHeader checks that consensus state matches trusted fields of Header diff --git a/modules/light-clients/07-tendermint/update_test.go b/modules/light-clients/07-tendermint/update_test.go index d170ee8b475..14f0662f0dc 100644 --- a/modules/light-clients/07-tendermint/update_test.go +++ b/modules/light-clients/07-tendermint/update_test.go @@ -18,7 +18,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TendermintTestSuite) TestVerifyHeader() { +func (s *TendermintTestSuite) TestVerifyHeader() { var ( path *ibctesting.Path header *ibctm.Header @@ -27,7 +27,7 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { // Setup different validators and signers for testing different types of updates altPrivVal := cmttypes.NewMockPV() altPubKey, err := altPrivVal.GetPubKey() - suite.Require().NoError(err) + s.Require().NoError(err) revisionHeight := int64(height.RevisionHeight) @@ -51,19 +51,19 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "successful verify header for header with a previous height", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // passing the ProposedHeader.Height as the block height as it will become a previous height once we commit N blocks - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) // commit some blocks so that the created Header now has a previous height as the BlockHeight - suite.coordinator.CommitNBlocks(suite.chainB, 5) + s.coordinator.CommitNBlocks(s.chainB, 5) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, expErr: nil, }, @@ -71,17 +71,17 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "successful verify header: header with future height and different validator set", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // Create bothValSet with both suite validator and altVal - bothValSet := cmttypes.NewValidatorSet(append(suite.chainB.Vals.Validators, altVal)) - bothSigners := suite.chainB.Signers + bothValSet := cmttypes.NewValidatorSet(append(s.chainB.Vals.Validators, altVal)) + bothSigners := s.chainB.Signers bothSigners[altVal.Address.String()] = altPrivVal - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+5, trustedHeight, suite.chainB.ProposedHeader.Time, bothValSet, suite.chainB.NextVals, trustedVals, bothSigners) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+5, trustedHeight, s.chainB.ProposedHeader.Time, bothValSet, s.chainB.NextVals, trustedVals, bothSigners) }, expErr: nil, }, @@ -89,17 +89,17 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "successful verify header: header with next height and different validator set", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // Create bothValSet with both suite validator and altVal - bothValSet := cmttypes.NewValidatorSet(append(suite.chainB.Vals.Validators, altVal)) - bothSigners := suite.chainB.Signers + bothValSet := cmttypes.NewValidatorSet(append(s.chainB.Vals.Validators, altVal)) + bothSigners := s.chainB.Signers bothSigners[altVal.Address.String()] = altPrivVal - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, bothValSet, suite.chainB.NextVals, trustedVals, bothSigners) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, bothValSet, s.chainB.NextVals, trustedVals, bothSigners) }, expErr: nil, }, @@ -107,14 +107,14 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful updates, passed in incorrect trusted validators for given consensus state", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) // Create bothValSet with both suite validator and altVal - bothValSet := cmttypes.NewValidatorSet(append(suite.chainB.Vals.Validators, altVal)) - bothSigners := suite.chainB.Signers + bothValSet := cmttypes.NewValidatorSet(append(s.chainB.Vals.Validators, altVal)) + bothSigners := s.chainB.Signers bothSigners[altVal.Address.String()] = altPrivVal - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, bothValSet, bothValSet, bothValSet, bothSigners) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, bothValSet, bothValSet, bothValSet, bothSigners) }, expErr: errors.New("invalid validator set"), }, @@ -122,13 +122,13 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful verify header with next height: update header mismatches nextValSetHash", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // this will err as altValSet.Hash() != consState.NextValidatorsHash - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, altValSet, altValSet, trustedVals, altSigners) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, altValSet, altValSet, trustedVals, altSigners) }, expErr: errors.New("failed to verify header"), }, @@ -136,12 +136,12 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful update with future height: too much change in validator set", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, altValSet, altValSet, trustedVals, altSigners) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, altValSet, altValSet, trustedVals, altSigners) }, expErr: errors.New("failed to verify header: cant trust new val set"), }, @@ -149,11 +149,11 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful verify header: header height revision and trusted height revision mismatch", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + s.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) - header = suite.chainB.CreateTMClientHeader(chainIDRevision1, 3, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(chainIDRevision1, 3, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) }, expErr: errors.New("invalid client header"), }, @@ -161,15 +161,15 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful verify header: header height < consensus height", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) heightMinus1 := clienttypes.NewHeight(trustedHeight.RevisionNumber, trustedHeight.RevisionHeight-1) // Make new header at height less than latest client state - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(heightMinus1.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(heightMinus1.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) }, expErr: errors.New("invalid client header"), }, @@ -185,12 +185,12 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful verify header: header timestamp is not past last client timestamp", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time.Add(-time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time.Add(-time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) }, expErr: errors.New("failed to verify header"), }, @@ -198,12 +198,12 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful verify header: header with incorrect header chain-id", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) - header = suite.chainB.CreateTMClientHeader(chainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(chainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) }, expErr: errors.New("header height revision 0 does not match trusted header revision 1"), }, @@ -211,14 +211,14 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful update: trusting period has passed since last client timestamp", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight] + s.Require().True(ok) - header = suite.chainA.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+1, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainA.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+1, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) - suite.chainB.ExpireClient(ibctesting.TrustingPeriod) + s.chainB.ExpireClient(ibctesting.TrustingPeriod) }, expErr: errors.New("failed to verify header"), }, @@ -226,17 +226,17 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful update for a previous revision", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // passing the ProposedHeader.Height as the block height as it will become an update to previous revision once we upgrade the client - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) // increment the revision of the chain err = path.EndpointB.UpgradeChain() - suite.Require().NoError(err) + s.Require().NoError(err) }, expErr: errors.New("failed to verify header"), }, @@ -244,17 +244,17 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "successful update with identical header to a previous update", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // passing the ProposedHeader.Height as the block height as it will become a previous height once we commit N blocks - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) // update client so the header constructed becomes a duplicate err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) }, expErr: nil, }, @@ -263,12 +263,12 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful update to a future revision", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID+"-1", suite.chainB.ProposedHeader.Height+5, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID+"-1", s.chainB.ProposedHeader.Height+5, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) }, expErr: errors.New("failed to verify header"), }, @@ -277,54 +277,54 @@ func (suite *TendermintTestSuite) TestVerifyHeader() { name: "unsuccessful update: header height revision and trusted height revision mismatch", malleate: func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) // increment the revision of the chain err = path.EndpointB.UpgradeChain() - suite.Require().NoError(err) + s.Require().NoError(err) - header = suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + header = s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) }, expErr: errors.New("header height revision 2 does not match trusted header revision 1"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.Run(tc.name, func() { + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header, err = path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), path.EndpointA.ClientID, header) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), path.EndpointA.ClientID, header) if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err) - suite.Require().ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } -func (suite *TendermintTestSuite) TestUpdateState() { +func (s *TendermintTestSuite) TestUpdateState() { var ( path *ibctesting.Path clientMessage exported.ClientMessage @@ -344,158 +344,158 @@ func (suite *TendermintTestSuite) TestUpdateState() { { "success with height later than latest height", func() { tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) - suite.Require().True(path.EndpointA.GetClientLatestHeight().(clienttypes.Height).LT(tmHeader.GetHeight())) + s.Require().True(ok) + s.Require().True(path.EndpointA.GetClientLatestHeight().(clienttypes.Height).LT(tmHeader.GetHeight())) }, func() { tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().True(clientState.LatestHeight.EQ(tmHeader.GetHeight())) // new update, updated client state should have changed - suite.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) + s.Require().True(ok) + s.Require().True(clientState.LatestHeight.EQ(tmHeader.GetHeight())) // new update, updated client state should have changed + s.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) }, true, }, { "success with height earlier than latest height", func() { // commit a block so the pre-created ClientMessage // isn't used to update the client to a newer height - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) - suite.Require().True(path.EndpointA.GetClientLatestHeight().(clienttypes.Height).GT(tmHeader.GetHeight())) + s.Require().True(ok) + s.Require().True(path.EndpointA.GetClientLatestHeight().(clienttypes.Height).GT(tmHeader.GetHeight())) prevClientState = path.EndpointA.GetClientState() }, func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().Equal(clientState, prevClientState) // fill in height, no change to client state - suite.Require().True(clientState.LatestHeight.GT(consensusHeights[0])) + s.Require().True(ok) + s.Require().Equal(clientState, prevClientState) // fill in height, no change to client state + s.Require().True(clientState.LatestHeight.GT(consensusHeights[0])) }, true, }, { "success with duplicate header", func() { // update client in advance err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // use the same header which just updated the client trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientMessage, err = path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) - suite.Require().Equal(path.EndpointA.GetClientLatestHeight().(clienttypes.Height), tmHeader.GetHeight()) + s.Require().True(ok) + s.Require().Equal(path.EndpointA.GetClientLatestHeight().(clienttypes.Height), tmHeader.GetHeight()) prevClientState = path.EndpointA.GetClientState() prevConsensusState = path.EndpointA.GetConsensusState(tmHeader.GetHeight()) }, func() { clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().Equal(clientState, prevClientState) - suite.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) + s.Require().True(ok) + s.Require().Equal(clientState, prevClientState) + s.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) - suite.Require().Equal(path.EndpointA.GetConsensusState(tmHeader.GetHeight()), prevConsensusState) + s.Require().True(ok) + s.Require().Equal(path.EndpointA.GetConsensusState(tmHeader.GetHeight()), prevConsensusState) }, true, }, { "success with pruned consensus state", func() { // this height will be expired and pruned err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) var ok bool pruneHeight, ok = path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) // Increment the time by a week - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) // create the consensus state that can be used as trusted height for next update err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // Increment the time by another week, then update the client. // This will cause the first two consensus states to become expired. - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientMessage, err = path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) }, func() { tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().True(clientState.LatestHeight.EQ(tmHeader.GetHeight())) // new update, updated client state should have changed - suite.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) + s.Require().True(ok) + s.Require().True(clientState.LatestHeight.EQ(tmHeader.GetHeight())) // new update, updated client state should have changed + s.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) // ensure consensus state was pruned _, found := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().False(found) + s.Require().False(found) }, true, }, { "success with pruned consensus state using duplicate header", func() { // this height will be expired and pruned err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) var ok bool pruneHeight, ok = path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) // assert that a consensus state exists at the prune height consensusState, found := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().True(found) - suite.Require().NotNil(consensusState) + s.Require().True(found) + s.Require().NotNil(consensusState) // Increment the time by a week - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) // create the consensus state that can be used as trusted height for next update err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // Increment the time by another week, then update the client. // This will cause the first two consensus states to become expired. - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // use the same header which just updated the client trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientMessage, err = path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) }, func() { tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().True(clientState.LatestHeight.EQ(tmHeader.GetHeight())) // new update, updated client state should have changed - suite.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) + s.Require().True(ok) + s.Require().True(clientState.LatestHeight.EQ(tmHeader.GetHeight())) // new update, updated client state should have changed + s.Require().True(clientState.LatestHeight.EQ(consensusHeights[0])) // ensure consensus state was pruned _, found := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().False(found) + s.Require().False(found) }, true, }, { @@ -507,33 +507,33 @@ func (suite *TendermintTestSuite) TestUpdateState() { }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() // reset + s.Run(tc.name, func() { + s.SetupTest() // reset pruneHeight = clienttypes.ZeroHeight() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + path = ibctesting.NewPath(s.chainA, s.chainB) err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientMessage, err = path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - clientStore = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + clientStore = s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) if tc.expPass { - consensusHeights = lightClientModule.UpdateState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientMessage) + consensusHeights = lightClientModule.UpdateState(s.chainA.GetContext(), path.EndpointA.ClientID, clientMessage) header, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) expConsensusState := &ibctm.ConsensusState{ Timestamp: header.GetTime(), @@ -542,17 +542,16 @@ func (suite *TendermintTestSuite) TestUpdateState() { } bz := clientStore.Get(host.ConsensusStateKey(header.GetHeight())) - updatedConsensusState := clienttypes.MustUnmarshalConsensusState(suite.chainA.App.AppCodec(), bz) - - suite.Require().Equal(expConsensusState, updatedConsensusState) + updatedConsensusState := clienttypes.MustUnmarshalConsensusState(s.chainA.App.AppCodec(), bz) + s.Require().Equal(expConsensusState, updatedConsensusState) } else { - consensusHeights = lightClientModule.UpdateState(suite.chainA.GetContext(), path.EndpointA.ClientID, clientMessage) - suite.Require().Empty(consensusHeights) + consensusHeights = lightClientModule.UpdateState(s.chainA.GetContext(), path.EndpointA.ClientID, clientMessage) + s.Require().Empty(consensusHeights) - consensusState, found := suite.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.GetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, clienttypes.NewHeight(1, uint64(suite.chainB.GetContext().BlockHeight()))) - suite.Require().False(found) - suite.Require().Nil(consensusState) + consensusState, found := s.chainA.GetSimApp().GetIBCKeeper().ClientKeeper.GetClientConsensusState(s.chainA.GetContext(), path.EndpointA.ClientID, clienttypes.NewHeight(1, uint64(s.chainB.GetContext().BlockHeight()))) + s.Require().False(found) + s.Require().Nil(consensusState) } // perform custom checks @@ -561,15 +560,15 @@ func (suite *TendermintTestSuite) TestUpdateState() { } } -func (suite *TendermintTestSuite) TestUpdateStateCheckTx() { - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *TendermintTestSuite) TestUpdateStateCheckTx() { + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() createClientMessage := func() exported.ClientMessage { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header, err := path.EndpointB.Chain.IBCClientHeader(path.EndpointB.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) return header } @@ -584,43 +583,43 @@ func (suite *TendermintTestSuite) TestUpdateStateCheckTx() { ibctm.IterateConsensusStateAscending(clientStore, getFirstHeightCb) // Increment the time by a week - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) ctx = path.EndpointA.Chain.GetContext().WithIsCheckTx(true) lightClientModule.UpdateState(ctx, path.EndpointA.ClientID, createClientMessage()) // Increment the time by another week, then update the client. // This will cause the first two consensus states to become expired. - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) ctx = path.EndpointA.Chain.GetContext().WithIsCheckTx(true) lightClientModule.UpdateState(ctx, path.EndpointA.ClientID, createClientMessage()) assertPrune := func(pruned bool) { // check consensus states and associated metadata consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().Equal(!pruned, ok) + s.Require().Equal(!pruned, ok) processTime, ok := ibctm.GetProcessedTime(clientStore, pruneHeight) - suite.Require().Equal(!pruned, ok) + s.Require().Equal(!pruned, ok) processHeight, ok := ibctm.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().Equal(!pruned, ok) + s.Require().Equal(!pruned, ok) consKey := ibctm.GetIterationKey(clientStore, pruneHeight) if pruned { - suite.Require().Nil(consState, "expired consensus state not pruned") - suite.Require().Empty(processTime, "processed time metadata not pruned") - suite.Require().Nil(processHeight, "processed height metadata not pruned") - suite.Require().Nil(consKey, "iteration key not pruned") + s.Require().Nil(consState, "expired consensus state not pruned") + s.Require().Empty(processTime, "processed time metadata not pruned") + s.Require().Nil(processHeight, "processed height metadata not pruned") + s.Require().Nil(consKey, "iteration key not pruned") } else { - suite.Require().NotNil(consState, "expired consensus state pruned") - suite.Require().NotEqual(uint64(0), processTime, "processed time metadata pruned") - suite.Require().NotNil(processHeight, "processed height metadata pruned") - suite.Require().NotNil(consKey, "iteration key pruned") + s.Require().NotNil(consState, "expired consensus state pruned") + s.Require().NotEqual(uint64(0), processTime, "processed time metadata pruned") + s.Require().NotNil(processHeight, "processed height metadata pruned") + s.Require().NotNil(consKey, "iteration key pruned") } } @@ -633,9 +632,9 @@ func (suite *TendermintTestSuite) TestUpdateStateCheckTx() { assertPrune(true) } -func (suite *TendermintTestSuite) TestPruneConsensusState() { +func (s *TendermintTestSuite) TestPruneConsensusState() { // create path and setup clients - path := ibctesting.NewPath(suite.chainA, suite.chainB) + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() // get the first height as it will be pruned first. @@ -650,75 +649,75 @@ func (suite *TendermintTestSuite) TestPruneConsensusState() { // this height will be expired but not pruned err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) expiredHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) // expected values that must still remain in store after pruning expectedConsState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight) - suite.Require().True(ok) + s.Require().True(ok) ctx = path.EndpointA.Chain.GetContext() clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) expectedProcessTime, ok := ibctm.GetProcessedTime(clientStore, expiredHeight) - suite.Require().True(ok) + s.Require().True(ok) expectedProcessHeight, ok := ibctm.GetProcessedHeight(clientStore, expiredHeight) - suite.Require().True(ok) + s.Require().True(ok) expectedConsKey := ibctm.GetIterationKey(clientStore, expiredHeight) - suite.Require().NotNil(expectedConsKey) + s.Require().NotNil(expectedConsKey) // Increment the time by a week - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) // create the consensus state that can be used as trusted height for next update err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // Increment the time by another week, then update the client. // This will cause the first two consensus states to become expired. - suite.coordinator.IncrementTimeBy(7 * 24 * time.Hour) + s.coordinator.IncrementTimeBy(7 * 24 * time.Hour) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) ctx = path.EndpointA.Chain.GetContext() clientStore = path.EndpointA.Chain.App.GetIBCKeeper().ClientKeeper.ClientStore(ctx, path.EndpointA.ClientID) // check that the first expired consensus state got deleted along with all associated metadata consState, ok := path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, pruneHeight) - suite.Require().Nil(consState, "expired consensus state not pruned") - suite.Require().False(ok) + s.Require().Nil(consState, "expired consensus state not pruned") + s.Require().False(ok) // check processed time metadata is pruned processTime, ok := ibctm.GetProcessedTime(clientStore, pruneHeight) - suite.Require().Equal(uint64(0), processTime, "processed time metadata not pruned") - suite.Require().False(ok) + s.Require().Equal(uint64(0), processTime, "processed time metadata not pruned") + s.Require().False(ok) processHeight, ok := ibctm.GetProcessedHeight(clientStore, pruneHeight) - suite.Require().Nil(processHeight, "processed height metadata not pruned") - suite.Require().False(ok) + s.Require().Nil(processHeight, "processed height metadata not pruned") + s.Require().False(ok) // check iteration key metadata is pruned consKey := ibctm.GetIterationKey(clientStore, pruneHeight) - suite.Require().Nil(consKey, "iteration key not pruned") + s.Require().Nil(consKey, "iteration key not pruned") // check that second expired consensus state doesn't get deleted // this ensures that there is a cap on gas cost of UpdateClient consState, ok = path.EndpointA.Chain.GetConsensusState(path.EndpointA.ClientID, expiredHeight) - suite.Require().Equal(expectedConsState, consState, "consensus state incorrectly pruned") - suite.Require().True(ok) + s.Require().Equal(expectedConsState, consState, "consensus state incorrectly pruned") + s.Require().True(ok) // check processed time metadata is not pruned processTime, ok = ibctm.GetProcessedTime(clientStore, expiredHeight) - suite.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned") - suite.Require().True(ok) + s.Require().Equal(expectedProcessTime, processTime, "processed time metadata incorrectly pruned") + s.Require().True(ok) // check processed height metadata is not pruned processHeight, ok = ibctm.GetProcessedHeight(clientStore, expiredHeight) - suite.Require().Equal(expectedProcessHeight, processHeight, "processed height metadata incorrectly pruned") - suite.Require().True(ok) + s.Require().Equal(expectedProcessHeight, processHeight, "processed height metadata incorrectly pruned") + s.Require().True(ok) // check iteration key metadata is not pruned consKey = ibctm.GetIterationKey(clientStore, expiredHeight) - suite.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned") + s.Require().Equal(expectedConsKey, consKey, "iteration key incorrectly pruned") } -func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { +func (s *TendermintTestSuite) TestCheckForMisbehaviour() { var ( path *ibctesting.Path clientMessage exported.ClientMessage @@ -738,7 +737,7 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { "consensus state already exists, already updated", func() { header, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) consensusState := &ibctm.ConsensusState{ Timestamp: header.GetTime(), @@ -747,26 +746,26 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { } tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmHeader.GetHeight(), consensusState) + s.Require().True(ok) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), path.EndpointA.ClientID, tmHeader.GetHeight(), consensusState) }, false, }, { "invalid fork misbehaviour: identical headers", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) height, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - misbehaviourHeader := suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers) + misbehaviourHeader := s.chainB.CreateTMClientHeader(s.chainB.ChainID, int64(height.RevisionHeight), trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers) clientMessage = &ibctm.Misbehaviour{ Header1: misbehaviourHeader, Header2: misbehaviourHeader, @@ -776,14 +775,14 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { { "invalid time misbehaviour: monotonically increasing time", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) clientMessage = &ibctm.Misbehaviour{ - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+3, trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+3, trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, false, }, @@ -791,7 +790,7 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { "consensus state already exists, app hash mismatch", func() { header, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) consensusState := &ibctm.ConsensusState{ Timestamp: header.GetTime(), @@ -800,8 +799,8 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { } tmHeader, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmHeader.GetHeight(), consensusState) + s.Require().True(ok) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(s.chainA.GetContext(), path.EndpointA.ClientID, tmHeader.GetHeight(), consensusState) }, true, }, @@ -809,7 +808,7 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { "previous consensus state exists and header time is before previous consensus state time", func() { header, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) // offset header timestamp before previous consensus state timestamp header.Header.Time = header.GetTime().Add(-time.Hour) @@ -820,12 +819,12 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { "next consensus state exists and header time is after next consensus state time", func() { header, ok := clientMessage.(*ibctm.Header) - suite.Require().True(ok) + s.Require().True(ok) // commit block and update client, adding a new consensus state - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // increase timestamp of current header header.Header.Time = header.Header.Time.Add(time.Hour) @@ -836,19 +835,19 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { "valid fork misbehaviour returns true", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header1, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) // commit block and update client - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) trustedHeight, ok = path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) header2, err := path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) // assign the same height, each header will have a different commit hash header1.Header.Height = header2.Header.Height @@ -864,56 +863,56 @@ func (suite *TendermintTestSuite) TestCheckForMisbehaviour() { { "valid time misbehaviour: not monotonically increasing time", func() { trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) - trustedVals, ok := suite.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] - suite.Require().True(ok) + trustedVals, ok := s.chainB.TrustedValidators[trustedHeight.RevisionHeight+1] + s.Require().True(ok) clientMessage = &ibctm.Misbehaviour{ - Header2: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height+3, trustedHeight, suite.chainB.ProposedHeader.Time.Add(time.Minute), suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), - Header1: suite.chainB.CreateTMClientHeader(suite.chainB.ChainID, suite.chainB.ProposedHeader.Height, trustedHeight, suite.chainB.ProposedHeader.Time, suite.chainB.Vals, suite.chainB.NextVals, trustedVals, suite.chainB.Signers), + Header2: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height+3, trustedHeight, s.chainB.ProposedHeader.Time.Add(time.Minute), s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), + Header1: s.chainB.CreateTMClientHeader(s.chainB.ChainID, s.chainB.ProposedHeader.Height, trustedHeight, s.chainB.ProposedHeader.Time, s.chainB.Vals, s.chainB.NextVals, trustedVals, s.chainB.Signers), } }, true, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite to create fresh application state - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // ensure counterparty state is committed - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) trustedHeight, ok := path.EndpointA.GetClientLatestHeight().(clienttypes.Height) - suite.Require().True(ok) + s.Require().True(ok) clientMessage, err = path.EndpointA.Counterparty.Chain.IBCClientHeader(path.EndpointA.Counterparty.Chain.LatestCommittedHeader, trustedHeight) - suite.Require().NoError(err) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() foundMisbehaviour := lightClientModule.CheckForMisbehaviour( - suite.chainA.GetContext(), + s.chainA.GetContext(), path.EndpointA.ClientID, clientMessage, ) if tc.expPass { - suite.Require().True(foundMisbehaviour) + s.Require().True(foundMisbehaviour) } else { - suite.Require().False(foundMisbehaviour) + s.Require().False(foundMisbehaviour) } }) } } -func (suite *TendermintTestSuite) TestUpdateStateOnMisbehaviour() { +func (s *TendermintTestSuite) TestUpdateStateOnMisbehaviour() { var path *ibctesting.Path testCases := []struct { @@ -929,29 +928,29 @@ func (suite *TendermintTestSuite) TestUpdateStateOnMisbehaviour() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite to create fresh application state - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) err := path.EndpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().NoError(err) tc.malleate() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) - lightClientModule.UpdateStateOnMisbehaviour(suite.chainA.GetContext(), path.EndpointA.ClientID, nil) + lightClientModule.UpdateStateOnMisbehaviour(s.chainA.GetContext(), path.EndpointA.ClientID, nil) if tc.expPass { clientStateBz := clientStore.Get(host.ClientStateKey()) - suite.Require().NotEmpty(clientStateBz) + s.Require().NotEmpty(clientStateBz) - newClientState := clienttypes.MustUnmarshalClientState(suite.chainA.Codec, clientStateBz) - suite.Require().Equal(frozenHeight, newClientState.(*ibctm.ClientState).FrozenHeight) + newClientState := clienttypes.MustUnmarshalClientState(s.chainA.Codec, clientStateBz) + s.Require().Equal(frozenHeight, newClientState.(*ibctm.ClientState).FrozenHeight) } }) } diff --git a/modules/light-clients/07-tendermint/upgrade.go b/modules/light-clients/07-tendermint/upgrade.go index 8b3f0ed9bdf..8ae44f44072 100644 --- a/modules/light-clients/07-tendermint/upgrade.go +++ b/modules/light-clients/07-tendermint/upgrade.go @@ -143,10 +143,9 @@ func constructUpgradeClientMerklePath(upgradePath []string, lastHeight exported. // this will create the IAVL key that is used to store client in upgrade store lastKey := upgradePath[len(upgradePath)-1] appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedClient) - clientPath = append(clientPath, appendedKey) - var clientKey [][]byte + clientKey := make([][]byte, 0, len(clientPath)) for _, part := range clientPath { clientKey = append(clientKey, []byte(part)) } @@ -164,10 +163,9 @@ func constructUpgradeConsStateMerklePath(upgradePath []string, lastHeight export // this will create the IAVL key that is used to store client in upgrade store lastKey := upgradePath[len(upgradePath)-1] appendedKey := fmt.Sprintf("%s/%d/%s", lastKey, lastHeight.GetRevisionHeight(), upgradetypes.KeyUpgradedConsState) - consPath = append(consPath, appendedKey) - var consStateKey [][]byte + consStateKey := make([][]byte, 0, len(consPath)) for _, part := range consPath { consStateKey = append(consStateKey, []byte(part)) } diff --git a/modules/light-clients/07-tendermint/upgrade_test.go b/modules/light-clients/07-tendermint/upgrade_test.go index 0f42ea2f9d8..b6e49f4185f 100644 --- a/modules/light-clients/07-tendermint/upgrade_test.go +++ b/modules/light-clients/07-tendermint/upgrade_test.go @@ -15,7 +15,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *TendermintTestSuite) TestVerifyUpgrade() { +func (s *TendermintTestSuite) TestVerifyUpgrade() { var ( newChainID string upgradedClient exported.ClientState @@ -36,55 +36,55 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "successful upgrade", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: nil, }, { name: "successful upgrade to same revision", setup: func() { - upgradedClient = ibctm.NewClientState(suite.chainB.ChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, clienttypes.NewHeight(clienttypes.ParseChainID(suite.chainB.ChainID), upgradedClient.(*ibctm.ClientState).LatestHeight.GetRevisionHeight()+10), commitmenttypes.GetSDKSpecs(), upgradePath) + upgradedClient = ibctm.NewClientState(s.chainB.ChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, clienttypes.NewHeight(clienttypes.ParseChainID(s.chainB.ChainID), upgradedClient.(*ibctm.ClientState).LatestHeight.GetRevisionHeight()+10), commitmenttypes.GetSDKSpecs(), upgradePath) upgradedClient = upgradedClient.(*ibctm.ClientState).ZeroCustomFields() - upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) + upgradedClientBz, err = clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: nil, }, @@ -92,52 +92,52 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "successful upgrade with new unbonding period", setup: func() { newUnbondingPeriod := time.Hour * 24 * 7 * 2 - upgradedClient = ibctm.NewClientState(suite.chainB.ChainID, ibctm.DefaultTrustLevel, trustingPeriod, newUnbondingPeriod, maxClockDrift, clienttypes.NewHeight(clienttypes.ParseChainID(suite.chainB.ChainID), upgradedClient.(*ibctm.ClientState).LatestHeight.GetRevisionHeight()+10), commitmenttypes.GetSDKSpecs(), upgradePath) + upgradedClient = ibctm.NewClientState(s.chainB.ChainID, ibctm.DefaultTrustLevel, trustingPeriod, newUnbondingPeriod, maxClockDrift, clienttypes.NewHeight(clienttypes.ParseChainID(s.chainB.ChainID), upgradedClient.(*ibctm.ClientState).LatestHeight.GetRevisionHeight()+10), commitmenttypes.GetSDKSpecs(), upgradePath) upgradedClient = upgradedClient.(*ibctm.ClientState).ZeroCustomFields() - upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) + upgradedClientBz, err = clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: nil, }, { name: "unsuccessful upgrade: upgrade path not set", setup: func() { - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // set upgrade path to empty tmCs.UpgradePath = []string{} - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmCs) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, tmCs) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: clienttypes.ErrInvalidUpgradeClient, }, @@ -146,17 +146,17 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { setup: func() { upgradedConsState = &solomachine.ConsensusState{} - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: clienttypes.ErrInvalidConsensus, }, @@ -164,25 +164,25 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: upgrade height revision height is more than the current client revision height", setup: func() { // upgrade Height is 10 blocks from now - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+10)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+10)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -191,29 +191,29 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { setup: func() { // non-zeroed upgrade client upgradedClient = ibctm.NewClientState(newChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) - upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) + upgradedClientBz, err = clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -221,28 +221,28 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: chain-specified parameters do not match committed client", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - err := suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + s.Require().NoError(err) // change upgradedClient client-specified parameters upgradedClient = ibctm.NewClientState("wrongchainID", ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -250,28 +250,28 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: client-specified parameters do not match previous client", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - err := suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) - suite.Require().NoError(err) + err := s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) + s.Require().NoError(err) // change upgradedClient client-specified parameters upgradedClient = ibctm.NewClientState(newChainID, ibctm.DefaultTrustLevel, ubdPeriod, ubdPeriod+trustingPeriod, maxClockDrift+5, lastHeight, commitmenttypes.GetSDKSpecs(), upgradePath) - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -291,29 +291,29 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { } // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, { name: "unsuccessful upgrade: client proof unmarshal failed", setup: func() { - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) upgradedClientProof = []byte("proof") }, @@ -322,12 +322,12 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { { name: "unsuccessful upgrade: consensus state proof unmarshal failed", setup: func() { - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) upgradedConsensusStateProof = []byte("proof") }, @@ -339,17 +339,17 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { // do not store upgraded client // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -359,17 +359,17 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { // do not store upgraded client // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -377,29 +377,29 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: client state merkle path is empty", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) // SetClientState with empty string upgrade path tmClient, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tmClient.UpgradePath = []string{""} - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmClient) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, tmClient) }, expErr: errors.New("client state proof failed"), }, @@ -407,23 +407,23 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: upgraded height is not greater than current height", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: errors.New("consensus state proof failed"), }, @@ -431,23 +431,23 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: consensus state for upgrade height cannot be found", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+100)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+100)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -455,23 +455,23 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: client is expired", setup: func() { // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // expire chainB's client - suite.chainA.ExpireClient(ubdPeriod) + s.chainA.ExpireClient(ubdPeriod) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -479,23 +479,23 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: updated unbonding period is equal to trusting period", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -504,28 +504,28 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { setup: func() { // new client has smaller unbonding period such that old trusting period is no longer valid upgradedClient = ibctm.NewClientState(newChainID, ibctm.DefaultTrustLevel, trustingPeriod, trustingPeriod, maxClockDrift, newClientHeight, commitmenttypes.GetSDKSpecs(), upgradePath) - upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) + upgradedClientBz, err = clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for testing - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for testing + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for testing + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for testing // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), tmCs.LatestHeight.GetRevisionHeight()) }, expErr: commitmenttypes.ErrInvalidProof, }, @@ -533,51 +533,51 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { name: "unsuccessful upgrade: consensus state not found for latest height", setup: func() { // upgrade Height is at next block - lastHeight = clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) + lastHeight = clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // zero custom fields and store in upgrade store - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test - suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedClientBz) //nolint:errcheck // ignore error for test + s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(lastHeight.GetRevisionHeight()), upgradedConsStateBz) //nolint:errcheck // ignore error for test // commit upgrade store changes and update clients - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err := path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - cs, found := suite.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID) - suite.Require().True(found) + cs, found := s.chainA.App.GetIBCKeeper().ClientKeeper.GetClientState(s.chainA.GetContext(), path.EndpointA.ClientID) + s.Require().True(found) tmCs, ok := cs.(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) revisionHeight := tmCs.LatestHeight.GetRevisionHeight() // set latest height to a height where consensus state does not exist tmCs.LatestHeight = clienttypes.NewHeight(tmCs.LatestHeight.GetRevisionNumber(), tmCs.LatestHeight.GetRevisionHeight()+5) - suite.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(suite.chainA.GetContext(), path.EndpointA.ClientID, tmCs) + s.chainA.App.GetIBCKeeper().ClientKeeper.SetClientState(s.chainA.GetContext(), path.EndpointA.ClientID, tmCs) - upgradedClientProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), revisionHeight) - upgradedConsensusStateProof, _ = suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), revisionHeight) + upgradedClientProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(lastHeight.GetRevisionHeight())), revisionHeight) + upgradedConsensusStateProof, _ = s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(lastHeight.GetRevisionHeight())), revisionHeight) }, expErr: clienttypes.ErrConsensusStateNotFound, }, } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite - suite.SetupTest() - path = ibctesting.NewPath(suite.chainA, suite.chainB) + s.SetupTest() + path = ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) revisionNumber := clienttypes.ParseChainID(clientState.ChainId) var err error newChainID, err = clienttypes.SetRevisionNumber(clientState.ChainId, revisionNumber+1) - suite.Require().NoError(err) + s.Require().NoError(err) upgradedClient = ibctm.NewClientState(newChainID, ibctm.DefaultTrustLevel, trustingPeriod, ubdPeriod+trustingPeriod, maxClockDrift, clienttypes.NewHeight(revisionNumber+1, clientState.LatestHeight.GetRevisionHeight()+1), commitmenttypes.GetSDKSpecs(), upgradePath) @@ -585,20 +585,20 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { upgradedClient = upgraded.ZeroCustomFields() } - upgradedClientBz, err = clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradedClient) - suite.Require().NoError(err) + upgradedClientBz, err = clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradedClient) + s.Require().NoError(err) upgradedConsState = &ibctm.ConsensusState{ NextValidatorsHash: []byte("nextValsHash"), } - upgradedConsStateBz, err = clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState) - suite.Require().NoError(err) + upgradedConsStateBz, err = clienttypes.MarshalConsensusState(s.chainA.App.AppCodec(), upgradedConsState) + s.Require().NoError(err) tc.setup() - cs, ok := suite.chainA.GetClientState(path.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + cs, ok := s.chainA.GetClientState(path.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) // Call ZeroCustomFields on upgraded clients to clear any client-chosen parameters in test-case upgradedClient if upgraded, ok := upgradedClient.(*ibctm.ClientState); ok { @@ -606,8 +606,8 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { } err = cs.VerifyUpgradeAndUpdateState( - suite.chainA.GetContext(), - suite.cdc, + s.chainA.GetContext(), + s.cdc, clientStore, upgradedClient, upgradedConsState, @@ -616,76 +616,76 @@ func (suite *TendermintTestSuite) TestVerifyUpgrade() { ) if tc.expErr == nil { - suite.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) + s.Require().NoError(err, "verify upgrade failed on valid case: %s", tc.name) - clientState, ok := suite.chainA.GetClientState(path.EndpointA.ClientID).(*ibctm.ClientState) - suite.Require().True(ok) - suite.Require().NotNil(clientState, "verify upgrade failed on valid case: %s", tc.name) + clientState, ok := s.chainA.GetClientState(path.EndpointA.ClientID).(*ibctm.ClientState) + s.Require().True(ok) + s.Require().NotNil(clientState, "verify upgrade failed on valid case: %s", tc.name) - consensusState, found := suite.chainA.GetConsensusState(path.EndpointA.ClientID, clientState.LatestHeight) - suite.Require().NotNil(consensusState, "verify upgrade failed on valid case: %s", tc.name) - suite.Require().True(found) + consensusState, found := s.chainA.GetConsensusState(path.EndpointA.ClientID, clientState.LatestHeight) + s.Require().NotNil(consensusState, "verify upgrade failed on valid case: %s", tc.name) + s.Require().True(found) } else { - suite.Require().ErrorContains(err, tc.expErr.Error(), "verify upgrade passed on invalid case: %s", tc.name) + s.Require().ErrorContains(err, tc.expErr.Error(), "verify upgrade passed on invalid case: %s", tc.name) } }) } } -func (suite *TendermintTestSuite) TestVerifyUpgradeWithNewUnbonding() { - suite.SetupTest() - path := ibctesting.NewPath(suite.chainA, suite.chainB) +func (s *TendermintTestSuite) TestVerifyUpgradeWithNewUnbonding() { + s.SetupTest() + path := ibctesting.NewPath(s.chainA, s.chainB) path.SetupClients() clientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) newUnbondingPeriod := time.Hour * 24 * 7 * 2 // update the unbonding period to two weeks upgradeClient := ibctm.NewClientState(clientState.ChainId, ibctm.DefaultTrustLevel, trustingPeriod, newUnbondingPeriod, maxClockDrift, clienttypes.NewHeight(1, clientState.LatestHeight.GetRevisionHeight()+1), commitmenttypes.GetSDKSpecs(), upgradePath) - upgradedClientBz, err := clienttypes.MarshalClientState(suite.chainA.App.AppCodec(), upgradeClient.ZeroCustomFields()) - suite.Require().NoError(err) + upgradedClientBz, err := clienttypes.MarshalClientState(s.chainA.App.AppCodec(), upgradeClient.ZeroCustomFields()) + s.Require().NoError(err) upgradedConsState := &ibctm.ConsensusState{NextValidatorsHash: []byte("nextValsHash")} // mocked consensus state - upgradedConsStateBz, err := clienttypes.MarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState) - suite.Require().NoError(err) + upgradedConsStateBz, err := clienttypes.MarshalConsensusState(s.chainA.App.AppCodec(), upgradedConsState) + s.Require().NoError(err) // zero custom fields and store in chainB upgrade store - upgradeHeight := clienttypes.NewHeight(0, uint64(suite.chainB.GetContext().BlockHeight()+1)) // upgrade is at next block height - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), upgradedClientBz) - suite.Require().NoError(err) - err = suite.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(suite.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), upgradedConsStateBz) - suite.Require().NoError(err) + upgradeHeight := clienttypes.NewHeight(0, uint64(s.chainB.GetContext().BlockHeight()+1)) // upgrade is at next block height + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedClient(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), upgradedClientBz) + s.Require().NoError(err) + err = s.chainB.GetSimApp().UpgradeKeeper.SetUpgradedConsensusState(s.chainB.GetContext(), int64(upgradeHeight.GetRevisionHeight()), upgradedConsStateBz) + s.Require().NoError(err) // commit upgrade store changes on chainB and update client on chainA - suite.coordinator.CommitBlock(suite.chainB) + s.coordinator.CommitBlock(s.chainB) err = path.EndpointA.UpdateClient() - suite.Require().NoError(err) + s.Require().NoError(err) - upgradedClientProof, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), uint64(suite.chainB.LatestCommittedHeader.Header.Height)) - upgradedConsensusStateProof, _ := suite.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), uint64(suite.chainB.LatestCommittedHeader.Header.Height)) + upgradedClientProof, _ := s.chainB.QueryUpgradeProof(upgradetypes.UpgradedClientKey(int64(upgradeHeight.GetRevisionHeight())), uint64(s.chainB.LatestCommittedHeader.Header.Height)) + upgradedConsensusStateProof, _ := s.chainB.QueryUpgradeProof(upgradetypes.UpgradedConsStateKey(int64(upgradeHeight.GetRevisionHeight())), uint64(s.chainB.LatestCommittedHeader.Header.Height)) tmClientState, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), path.EndpointA.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), path.EndpointA.ClientID) err = tmClientState.VerifyUpgradeAndUpdateState( - suite.chainA.GetContext(), - suite.cdc, + s.chainA.GetContext(), + s.cdc, clientStore, upgradeClient, upgradedConsState, upgradedClientProof, upgradedConsensusStateProof, ) - suite.Require().NoError(err) + s.Require().NoError(err) upgradedClient, ok := path.EndpointA.GetClientState().(*ibctm.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // assert the unbonding period and the trusting period have been updated correctly - suite.Require().Equal(newUnbondingPeriod, upgradedClient.UnbondingPeriod) + s.Require().Equal(newUnbondingPeriod, upgradedClient.UnbondingPeriod) // expected trusting period = trustingPeriod * newUnbonding / originalUnbonding (224 hours = 9 days and 8 hours) origUnbondingDec := sdkmath.LegacyNewDec(ubdPeriod.Nanoseconds()) @@ -693,5 +693,5 @@ func (suite *TendermintTestSuite) TestVerifyUpgradeWithNewUnbonding() { trustingPeriodDec := sdkmath.LegacyNewDec(trustingPeriod.Nanoseconds()) expTrustingPeriod := trustingPeriodDec.Mul(newUnbondingDec).Quo(origUnbondingDec) - suite.Require().Equal(time.Duration(expTrustingPeriod.TruncateInt64()), upgradedClient.TrustingPeriod) + s.Require().Equal(time.Duration(expTrustingPeriod.TruncateInt64()), upgradedClient.TrustingPeriod) } diff --git a/modules/light-clients/08-wasm/CHANGELOG.md b/modules/light-clients/08-wasm/CHANGELOG.md deleted file mode 100644 index 754f5f21685..00000000000 --- a/modules/light-clients/08-wasm/CHANGELOG.md +++ /dev/null @@ -1,154 +0,0 @@ - - -# Changelog - -## [[Unreleased]] - -### Dependencies - -* [\#7114](https://github.com/cosmos/ibc-go/pull/7114) Bump WasmVM to v2.1.2. -* [\#6828](https://github.com/cosmos/ibc-go/pull/6828) Bump Cosmos SDK to v0.50.9. -* [\#7247](https://github.com/cosmos/ibc-go/pull/7247) Bump CometBFT to v0.38.12. - -### API Breaking - -* [\#6937](https://github.com/cosmos/ibc-go/pull/6937) Remove `WasmConsensusHost` implementation of the `ConsensusHost` interface. - -### State Machine Breaking - -### Improvements - -### Features - -### Bug Fixes - - -## [v0.4.1+ibc-go-v8.4-wasmvm-v2.0](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.4.1%2Bibc-go-v8.4-wasmvm-v2.0) - 2024-07-31 - -### Dependencies - -* [\#6992](https://github.com/cosmos/ibc-go/pull/6992) Bump ibc-go to v8.4.0 and CosmosSDK to v0.50.7. - -### API Breaking - -* [\#6923](https://github.com/cosmos/ibc-go/pull/6923) The JSON message API for `VerifyMembershipMsg` and `VerifyNonMembershipMsg` payloads for client contract `SudoMsg` has been updated. The field `path` has been changed to `merkle_path`. This change requires updates to 08-wasm client contracts for integration. - - -## [v0.3.0+ibc-go-v8.3-wasmvm-v2.0](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.3.0%2Bibc-go-v8.3-wasmvm-v2.0) - 2024-07-17 - -### Dependencies - -* [\#6807](https://github.com/cosmos/ibc-go/pull/6807) Update wasmvm to v2.1.0. - -### API Breaking - -* [\#6644](https://github.com/cosmos/ibc-go/pull/6644) Add `v2.MerklePath` for contract api `VerifyMembershipMsg` and `VerifyNonMembershipMsg` structs. Note, this requires a migration for existing client contracts to correctly handle deserialization of `MerklePath.KeyPath` which has changed from `[]string` to `[][]bytes`. In JSON message structures this change is reflected as the `KeyPath` being a marshalled as a list of base64 encoded byte strings. This change supports proving values stored under keys which contain non-utf8 encoded symbols. See migration docs for more details. - -### Improvements - -* [\#5923](https://github.com/cosmos/ibc-go/pull/5923) imp: add 08-wasm build opts for libwasmvm linking disabled - -### Features - -* [\#6055](https://github.com/cosmos/ibc-go/pull/6055) feat: add 08-wasm `ConsensusHost` implementation for custom self client/consensus state validation in 03-connection handshake. - -### Bug Fixes - -* [\#6815](https://github.com/cosmos/ibc-go/pull/6815) Decode to bytes the hex-encoded checksum argument of the `migrate-contract` CLI. - - -## [v0.3.1+ibc-go-v7.4-wasmvm-v1.5](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.3.1%2Bibc-go-v7.4-wasmvm-v1.5) - 2024-07-31 - -### Dependencies - -* [\#6996](https://github.com/cosmos/ibc-go/pull/6992) Bump ibc-go to v7.4.0, CosmosSDK to v0.47.8 and CometBFT to v0.37.4. - -### API Breaking - -* [\#6923](https://github.com/cosmos/ibc-go/pull/6923) The JSON message API for `VerifyMembershipMsg` and `VerifyNonMembershipMsg` payloads for client contract `SudoMsg` has been updated. The field `path` has been changed to `merkle_path`. This change requires updates to 08-wasm client contracts for integration. - - -## [v0.2.0+ibc-go-v7.3-wasmvm-v1.5](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.2.0%2Bibc-go-v7.3-wasmvm-v1.5) - 2024-07-17 - -### API Breaking - -* [\#6644](https://github.com/cosmos/ibc-go/pull/6644) Add `v2.MerklePath` for contract api `VerifyMembershipMsg` and `VerifyNonMembershipMsg` structs. Note, this requires a migration for existing client contracts to correctly handle deserialization of `MerklePath.KeyPath` which has changed from `[]string` to `[][]byte`. In JSON message structures this change is reflected as the `KeyPath` being a marshalled as a list of base64 encoded byte strings. This change supports proving values stored under keys which contain non-utf8 encoded symbols. See migration docs for more details. - -### Features - -* [#\6231](https://github.com/cosmos/ibc-go/pull/6231) feat: add CLI to broadcast transaction with `MsgMigrateContract`. - -### Bug Fixes - -* [\#6815](https://github.com/cosmos/ibc-go/pull/6815) Decode to bytes the hex-encoded checksum argument of the `migrate-contract` CLI. - - -## [v0.2.0+ibc-go-v8.3-wasmvm-v2.0](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.2.0%2Bibc-go-v8.3-wasmvm-v2.0) - 2024-05-23 - -### Dependencies - -* [\#5909](https://github.com/cosmos/ibc-go/pull/5909) Update wasmvm to v2.0.0 and cometBFT to v0.38.6. -* [\#6097](https://github.com/cosmos/ibc-go/pull/6097) Update wasmvm to v2.0.1. -* [\#6350](https://github.com/cosmos/ibc-go/pull/6350) Upgrade Cosmos SDK to v0.50.6. - -### Features - -* [\#5821](https://github.com/cosmos/ibc-go/pull/5821) feat: add `VerifyMembershipProof` RPC query (querier approach for conditional clients). -* [\#6231](https://github.com/cosmos/ibc-go/pull/6231) feat: add CLI to broadcast transaction with `MsgMigrateContract`. - - -## [v0.1.1+ibc-go-v7.3-wasmvm-v1.5](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.1.1%2Bibc-go-v7.3-wasmvm-v1.5) - 2024-04-12 - -### Dependencies - -* [\#6149](https://github.com/cosmos/ibc-go/pull/6149) Bump wasmvm to v1.5.2. - -### Bug Fixes - -* (cli) [\#5610](https://github.com/cosmos/ibc-go/pull/5610) Register wasm tx cli. - - -## [v0.1.0+ibc-go-v8.0-wasmvm-v1.5](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.1.0%2Bibc-go-v7.3-wasmvm-v1.5) - 2023-12-18 - -### Features - -* [\#5079](https://github.com/cosmos/ibc-go/pull/5079) feat: 08-wasm light client proxy module for wasm clients. - - -## [v0.1.0+ibc-go-v7.3-wasmvm-v1.5](https://github.com/cosmos/ibc-go/releases/tag/modules%2Flight-clients%2F08-wasm%2Fv0.1.0%2Bibc-go-v8.0-wasmvm-v1.5) - 2023-12-18 - -### Features - -* [\#5079](https://github.com/cosmos/ibc-go/pull/5079) feat: 08-wasm light client proxy module for wasm clients. diff --git a/modules/light-clients/08-wasm/Dockerfile b/modules/light-clients/08-wasm/Dockerfile index a7df9d51e9b..96bd79cbbe6 100644 --- a/modules/light-clients/08-wasm/Dockerfile +++ b/modules/light-clients/08-wasm/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23.8-alpine AS builder-base +FROM golang:1.24-alpine AS builder-base ARG LIBWASM_VERSION ARG TARGETARCH diff --git a/modules/light-clients/08-wasm/blsverifier/crypto.go b/modules/light-clients/08-wasm/blsverifier/crypto.go index d3c39dc0209..bc4ff955ba1 100644 --- a/modules/light-clients/08-wasm/blsverifier/crypto.go +++ b/modules/light-clients/08-wasm/blsverifier/crypto.go @@ -3,7 +3,7 @@ package blsverifier import ( "fmt" - "github.com/prysmaticlabs/prysm/v5/crypto/bls" + "github.com/OffchainLabs/prysm/v6/crypto/bls" ) func AggregatePublicKeys(publicKeys [][]byte) (bls.PublicKey, error) { @@ -13,7 +13,7 @@ func AggregatePublicKeys(publicKeys [][]byte) (bls.PublicKey, error) { func VerifySignature(signature []byte, message [32]byte, publicKeys [][]byte) (bool, error) { aggregatedPublicKey, err := AggregatePublicKeys(publicKeys) if err != nil { - return false, fmt.Errorf("failed to aggregate public keys %v", err) + return false, fmt.Errorf("failed to aggregate public keys %w", err) } return bls.VerifySignature(signature, message, aggregatedPublicKey) } diff --git a/modules/light-clients/08-wasm/blsverifier/handler.go b/modules/light-clients/08-wasm/blsverifier/handler.go index f793b61e683..5ab8117ba25 100644 --- a/modules/light-clients/08-wasm/blsverifier/handler.go +++ b/modules/light-clients/08-wasm/blsverifier/handler.go @@ -34,14 +34,14 @@ func CustomQuerier() func(sdk.Context, json.RawMessage) ([]byte, error) { var customQuery CustomQuery err := json.Unmarshal([]byte(request), &customQuery) if err != nil { - return nil, fmt.Errorf("failed to parse custom query %v", err) + return nil, fmt.Errorf("failed to parse custom query %w", err) } switch { case customQuery.Aggregate != nil: aggregatedPublicKeys, err := AggregatePublicKeys(customQuery.Aggregate.PublicKeys) if err != nil { - return nil, fmt.Errorf("failed to aggregate public keys %v", err) + return nil, fmt.Errorf("failed to aggregate public keys %w", err) } return json.Marshal(aggregatedPublicKeys.Marshal()) @@ -56,7 +56,7 @@ func CustomQuerier() func(sdk.Context, json.RawMessage) ([]byte, error) { } result, err := VerifySignature(customQuery.AggregateVerify.Signature, msg, customQuery.AggregateVerify.PublicKeys) if err != nil { - return nil, fmt.Errorf("failed to verify signature %v", err) + return nil, fmt.Errorf("failed to verify signature %w", err) } return json.Marshal(result) diff --git a/modules/light-clients/08-wasm/go.mod b/modules/light-clients/08-wasm/go.mod index 46222b5dada..3491ed01946 100644 --- a/modules/light-clients/08-wasm/go.mod +++ b/modules/light-clients/08-wasm/go.mod @@ -1,18 +1,19 @@ module github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10 -go 1.23.8 +go 1.24.3 -replace github.com/cosmos/ibc-go/v10 => ../../../ - -replace github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 +replace ( + github.com/cosmos/ibc-go/v10 => ../../../ + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 +) require ( cosmossdk.io/api v0.9.2 cosmossdk.io/client/v2 v2.0.0-beta.9 - cosmossdk.io/collections v1.2.1 + cosmossdk.io/collections v1.3.1 cosmossdk.io/core v0.11.3 cosmossdk.io/errors v1.0.2 - cosmossdk.io/log v1.6.0 + cosmossdk.io/log v1.6.1 cosmossdk.io/math v1.5.3 cosmossdk.io/store v1.1.2 cosmossdk.io/tools/confix v0.1.2 @@ -22,39 +23,39 @@ require ( cosmossdk.io/x/tx v0.14.0 cosmossdk.io/x/upgrade v0.2.0 github.com/CosmWasm/wasmvm/v2 v2.2.4 + github.com/OffchainLabs/prysm/v6 v6.0.4 github.com/cometbft/cometbft v0.38.17 - github.com/cosmos/cosmos-db v1.1.1 - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-db v1.1.3 + github.com/cosmos/cosmos-sdk v0.53.4 github.com/cosmos/gogoproto v1.7.0 - github.com/cosmos/ibc-go/v10 v10.0.0 + github.com/cosmos/ibc-go/v10 v10.3.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/prysmaticlabs/prysm/v5 v5.3.0 - github.com/spf13/cast v1.8.0 - github.com/spf13/cobra v1.9.1 + github.com/spf13/cast v1.9.2 + github.com/spf13/cobra v1.10.1 github.com/spf13/viper v1.20.1 - github.com/stretchr/testify v1.10.0 - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e - google.golang.org/grpc v1.72.0 + github.com/stretchr/testify v1.11.1 + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 + google.golang.org/grpc v1.75.0 ) require ( - cel.dev/expr v0.20.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.116.0 // indirect cloud.google.com/go/auth v0.14.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect cloud.google.com/go/storage v1.49.0 // indirect - cosmossdk.io/depinject v1.2.0 // indirect + cosmossdk.io/depinject v1.2.1 // indirect cosmossdk.io/schema v1.1.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -63,13 +64,13 @@ require ( github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.22.0 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cloudwego/base64x v0.1.5 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect github.com/cockroachdb/errors v1.12.0 // indirect github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect @@ -98,22 +99,22 @@ require ( github.com/emicklei/dot v1.6.2 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/ethereum/go-ethereum v1.15.11 // indirect + github.com/ethereum/go-ethereum v1.16.3 // indirect github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.32.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/getsentry/sentry-go v0.33.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect github.com/google/btree v1.1.3 // indirect @@ -191,7 +192,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/supranational/blst v0.3.14 // indirect @@ -207,35 +208,36 @@ require ( go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.15.0 // indirect - golang.org/x/crypto v0.37.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/arch v0.17.0 // indirect + golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.10.0 // indirect google.golang.org/api v0.222.0 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect nhooyr.io/websocket v1.8.11 // indirect pgregory.net/rapid v1.2.0 // indirect rsc.io/qr v0.2.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/modules/light-clients/08-wasm/go.sum b/modules/light-clients/08-wasm/go.sum index 1e16ced78bd..e1e8785d0e0 100644 --- a/modules/light-clients/08-wasm/go.sum +++ b/modules/light-clients/08-wasm/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -618,16 +618,16 @@ cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/client/v2 v2.0.0-beta.9 h1:xc06zg4G858/pK5plhf8RCfo+KR2mdDKJNrEkfrVAqc= cosmossdk.io/client/v2 v2.0.0-beta.9/go.mod h1:pHf3CCHX5gmbL9rDCVbXhGI2+/DdAVTEZSLpdd5V9Zs= -cosmossdk.io/collections v1.2.1 h1:mAlNMs5vJwkda4TA+k5q/43p24RVAQ/qyDrjANu3BXE= -cosmossdk.io/collections v1.2.1/go.mod h1:PSsEJ/fqny0VPsHLFT6gXDj/2C1tBOTS9eByK0+PBFU= +cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= +cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= -cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= -cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= -cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= +cosmossdk.io/log v1.6.1 h1:YXNwAgbDwMEKwDlCdH8vPcoggma48MgZrTQXCfmMBeI= +cosmossdk.io/log v1.6.1/go.mod h1:gMwsWyyDBjpdG9u2avCFdysXqxq28WJapJvu+vF1y+E= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= @@ -668,8 +668,8 @@ github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bp github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= @@ -682,6 +682,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OffchainLabs/prysm/v6 v6.0.4 h1:aqWCb2U3LfeahzjORvxXYsL1ebKWT1AUu3Ya3y7LApE= +github.com/OffchainLabs/prysm/v6 v6.0.4/go.mod h1:lMkHT3gWiCOqo4rbuhLTU4FoQ/THni9v6z4w9P6FRyU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -739,11 +741,11 @@ github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/ github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -788,8 +790,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -812,10 +814,8 @@ github.com/cometbft/cometbft v0.38.17 h1:FkrQNbAjiFqXydeAO81FUzriL4Bz0abYxN/eOHr github.com/cometbft/cometbft v0.38.17/go.mod h1:5l0SkgeLRXi6bBfQuevXjKqML1jjfJJlvI1Ulp02/o4= github.com/cometbft/cometbft-db v0.14.1 h1:SxoamPghqICBAIcGpleHbmoPqy+crij/++eZz3DlerQ= github.com/cometbft/cometbft-db v0.14.1/go.mod h1:KHP1YghilyGV/xjD5DP3+2hyigWx0WTp9X+0Gnx0RxQ= -github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs= -github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo= -github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -824,12 +824,12 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= +github.com/cosmos/cosmos-sdk v0.53.4 h1:kPF6vY68+/xi1/VebSZGpoxQqA52qkhUzqkrgeBn3Mg= +github.com/cosmos/cosmos-sdk v0.53.4/go.mod h1:7U3+WHZtI44dEOnU46+lDzBb2tFh1QlMvi8Z5JugopI= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -921,10 +921,10 @@ github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6Ni github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= -github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= -github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= -github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= -github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= +github.com/ethereum/c-kzg-4844/v2 v2.1.1 h1:KhzBVjmURsfr1+S3k/VE35T02+AW2qU9t9gr4R6YpSo= +github.com/ethereum/c-kzg-4844/v2 v2.1.1/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -945,8 +945,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= -github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= +github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= @@ -960,8 +960,8 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -978,8 +978,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= @@ -1013,8 +1013,8 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1346,8 +1346,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1487,8 +1485,6 @@ github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b h1:VK7thFOnhxAZ/5aolr5Os4beiubuD08WiuiHyRqgwks= github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b/go.mod h1:HRuvtXLZ4WkaB1MItToVH2e8ZwKwZPY5/Rcby+CvvLY= -github.com/prysmaticlabs/prysm/v5 v5.3.0 h1:7Lr8ndapBTZg00YE+MgujN6+yvJR6Bdfn28ZDSJ00II= -github.com/prysmaticlabs/prysm/v5 v5.3.0/go.mod h1:r1KhlduqDMIGZ1GhR5pjZ2Ko8Q89noTDYTRoPKwf1+c= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1539,15 +1535,15 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -1572,8 +1568,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= @@ -1635,24 +1631,24 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1674,8 +1670,12 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= -golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= +golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1693,8 +1693,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1832,8 +1832,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1863,8 +1863,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1885,8 +1885,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1999,8 +1999,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2015,8 +2015,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2037,8 +2037,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2133,6 +2133,8 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -2344,10 +2346,10 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2395,8 +2397,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2416,8 +2418,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2515,13 +2517,11 @@ rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY= rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/modules/light-clients/08-wasm/internal/types/store_test.go b/modules/light-clients/08-wasm/internal/types/store_test.go index eb20a979baa..5e21919f6ff 100644 --- a/modules/light-clients/08-wasm/internal/types/store_test.go +++ b/modules/light-clients/08-wasm/internal/types/store_test.go @@ -36,9 +36,9 @@ func TestWasmTestSuite(t *testing.T) { testifysuite.Run(t, new(TypesTestSuite)) } -func (suite *TypesTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCustomAppCoordinator(suite.T(), 1, setupTestingApp) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +func (s *TypesTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCustomAppCoordinator(s.T(), 1, setupTestingApp) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) } // GetSimApp returns the duplicated SimApp from within the 08-wasm directory. @@ -59,8 +59,8 @@ func setupTestingApp() (ibctesting.TestingApp, map[string]json.RawMessage) { } // TestClientRecoveryStoreGetStore tests the GetStore method of the ClientRecoveryStore. -func (suite *TypesTestSuite) TestClientRecoveryStoreGetStore() { - subjectStore, substituteStore := suite.GetSubjectAndSubstituteStore() +func (s *TypesTestSuite) TestClientRecoveryStoreGetStore() { + subjectStore, substituteStore := s.GetSubjectAndSubstituteStore() testCases := []struct { name string @@ -90,26 +90,25 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreGetStore() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { + s.Run(tc.name, func() { wrappedStore := internaltypes.NewClientRecoveryStore(subjectStore, substituteStore) store, found := wrappedStore.GetStore(tc.prefix) storeFound := tc.expStore != nil if storeFound { - suite.Require().Equal(tc.expStore, store) - suite.Require().True(found) + s.Require().Equal(tc.expStore, store) + s.Require().True(found) } else { - suite.Require().Nil(store) - suite.Require().False(found) + s.Require().Nil(store) + s.Require().False(found) } }) } } // TestSplitPrefix tests the SplitPrefix function. -func (suite *TypesTestSuite) TestSplitPrefix() { +func (s *TypesTestSuite) TestSplitPrefix() { clientStateKey := host.ClientStateKey() testCases := []struct { @@ -140,19 +139,18 @@ func (suite *TypesTestSuite) TestSplitPrefix() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { + s.Run(tc.name, func() { keyPrefix, key := internaltypes.SplitPrefix(tc.prefix) - suite.Require().Equal(tc.expValues[0], keyPrefix) - suite.Require().Equal(tc.expValues[1], key) + s.Require().Equal(tc.expValues[0], keyPrefix) + s.Require().Equal(tc.expValues[1], key) }) } } // TestClientRecoveryStoreGet tests the Get method of the ClientRecoveryStore. -func (suite *TypesTestSuite) TestClientRecoveryStoreGet() { - subjectStore, substituteStore := suite.GetSubjectAndSubstituteStore() +func (s *TypesTestSuite) TestClientRecoveryStoreGet() { + subjectStore, substituteStore := s.GetSubjectAndSubstituteStore() testCases := []struct { name string @@ -181,8 +179,7 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreGet() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { + s.Run(tc.name, func() { wrappedStore := internaltypes.NewClientRecoveryStore(subjectStore, substituteStore) prefixedKey := tc.prefix @@ -192,17 +189,17 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreGet() { if storeFound { expValue := tc.expStore.Get(tc.key) - suite.Require().Equal(expValue, wrappedStore.Get(prefixedKey)) + s.Require().Equal(expValue, wrappedStore.Get(prefixedKey)) } else { // expected value when types is not found is an empty byte slice - suite.Require().Equal([]byte(nil), wrappedStore.Get(prefixedKey)) + s.Require().Equal([]byte(nil), wrappedStore.Get(prefixedKey)) } }) } } // TestClientRecoveryStoreSet tests the Set method of the ClientRecoveryStore. -func (suite *TypesTestSuite) TestClientRecoveryStoreSet() { +func (s *TypesTestSuite) TestClientRecoveryStoreSet() { testCases := []struct { name string prefix []byte @@ -224,9 +221,8 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreSet() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - subjectStore, substituteStore := suite.GetSubjectAndSubstituteStore() + s.Run(tc.name, func() { + subjectStore, substituteStore := s.GetSubjectAndSubstituteStore() wrappedStore := internaltypes.NewClientRecoveryStore(subjectStore, substituteStore) prefixedKey := tc.prefix @@ -236,23 +232,23 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreSet() { if tc.expSet { store, found := wrappedStore.GetStore(tc.prefix) - suite.Require().True(found) - suite.Require().Equal(subjectStore, store) + s.Require().True(found) + s.Require().Equal(subjectStore, store) value := store.Get(tc.key) - suite.Require().Equal(wasmtesting.MockClientStateBz, value) + s.Require().Equal(wasmtesting.MockClientStateBz, value) } else { // Assert that no writes happened to subject or substitute types - suite.Require().NotEqual(wasmtesting.MockClientStateBz, subjectStore.Get(tc.key)) - suite.Require().NotEqual(wasmtesting.MockClientStateBz, substituteStore.Get(tc.key)) + s.Require().NotEqual(wasmtesting.MockClientStateBz, subjectStore.Get(tc.key)) + s.Require().NotEqual(wasmtesting.MockClientStateBz, substituteStore.Get(tc.key)) } }) } } // TestClientRecoveryStoreDelete tests the Delete method of the ClientRecoveryStore. -func (suite *TypesTestSuite) TestClientRecoveryStoreDelete() { +func (s *TypesTestSuite) TestClientRecoveryStoreDelete() { var ( mockStoreKey = []byte("mock-key") mockStoreValue = []byte("mock-value") @@ -279,9 +275,8 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreDelete() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - subjectStore, substituteStore := suite.GetSubjectAndSubstituteStore() + s.Run(tc.name, func() { + subjectStore, substituteStore := s.GetSubjectAndSubstituteStore() // Set values under the mock key: subjectStore.Set(mockStoreKey, mockStoreValue) @@ -296,22 +291,22 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreDelete() { if tc.expDelete { store, found := wrappedStore.GetStore(tc.prefix) - suite.Require().True(found) - suite.Require().Equal(subjectStore, store) + s.Require().True(found) + s.Require().Equal(subjectStore, store) - suite.Require().False(store.Has(tc.key)) + s.Require().False(store.Has(tc.key)) } else { // Assert that no deletions happened to subject or substitute types - suite.Require().True(subjectStore.Has(tc.key)) - suite.Require().True(substituteStore.Has(tc.key)) + s.Require().True(subjectStore.Has(tc.key)) + s.Require().True(substituteStore.Has(tc.key)) } }) } } // TestClientRecoveryStoreIterators tests the Iterator/ReverseIterator methods of the ClientRecoveryStore. -func (suite *TypesTestSuite) TestClientRecoveryStoreIterators() { - subjectStore, substituteStore := suite.GetSubjectAndSubstituteStore() +func (s *TypesTestSuite) TestClientRecoveryStoreIterators() { + subjectStore, substituteStore := s.GetSubjectAndSubstituteStore() testCases := []struct { name string @@ -356,8 +351,7 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreIterators() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { + s.Run(tc.name, func() { wrappedStore := internaltypes.NewClientRecoveryStore(subjectStore, substituteStore) prefixedKeyStart := tc.prefixStart @@ -366,19 +360,19 @@ func (suite *TypesTestSuite) TestClientRecoveryStoreIterators() { prefixedKeyEnd = append(prefixedKeyEnd, tc.end...) if tc.expValid { - suite.Require().NotNil(wrappedStore.Iterator(prefixedKeyStart, prefixedKeyEnd)) - suite.Require().NotNil(wrappedStore.ReverseIterator(prefixedKeyStart, prefixedKeyEnd)) + s.Require().NotNil(wrappedStore.Iterator(prefixedKeyStart, prefixedKeyEnd)) + s.Require().NotNil(wrappedStore.ReverseIterator(prefixedKeyStart, prefixedKeyEnd)) } else { // Iterator returned should be Closed, calling `Valid` should return false - suite.Require().False(wrappedStore.Iterator(prefixedKeyStart, prefixedKeyEnd).Valid()) - suite.Require().False(wrappedStore.ReverseIterator(prefixedKeyStart, prefixedKeyEnd).Valid()) + s.Require().False(wrappedStore.Iterator(prefixedKeyStart, prefixedKeyEnd).Valid()) + s.Require().False(wrappedStore.ReverseIterator(prefixedKeyStart, prefixedKeyEnd).Valid()) } }) } } -func (suite *TypesTestSuite) TestNewClientRecoveryStore() { - subjectStore, substituteStore := suite.GetSubjectAndSubstituteStore() +func (s *TypesTestSuite) TestNewClientRecoveryStore() { + subjectStore, substituteStore := s.GetSubjectAndSubstituteStore() testCases := []struct { name string @@ -407,16 +401,15 @@ func (suite *TypesTestSuite) TestNewClientRecoveryStore() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() if !tc.expPanic { - suite.Require().NotPanics(func() { + s.Require().NotPanics(func() { internaltypes.NewClientRecoveryStore(subjectStore, substituteStore) }) } else { - suite.Require().Panics(func() { + s.Require().Panics(func() { internaltypes.NewClientRecoveryStore(subjectStore, substituteStore) }) } @@ -425,11 +418,11 @@ func (suite *TypesTestSuite) TestNewClientRecoveryStore() { } // GetSubjectAndSubstituteStore returns two KVStores for testing the migrate client wrapping types. -func (suite *TypesTestSuite) GetSubjectAndSubstituteStore() (storetypes.KVStore, storetypes.KVStore) { - suite.SetupTest() +func (s *TypesTestSuite) GetSubjectAndSubstituteStore() (storetypes.KVStore, storetypes.KVStore) { + s.SetupTest() - ctx := suite.chainA.GetContext() - storeKey := GetSimApp(suite.chainA).GetKey(ibcexported.StoreKey) + ctx := s.chainA.GetContext() + storeKey := GetSimApp(s.chainA).GetKey(ibcexported.StoreKey) subjectClientStore := prefix.NewStore(ctx.KVStore(storeKey), []byte(clienttypes.FormatClientIdentifier(types.Wasm, 0))) substituteClientStore := prefix.NewStore(ctx.KVStore(storeKey), []byte(clienttypes.FormatClientIdentifier(types.Wasm, 1))) diff --git a/modules/light-clients/08-wasm/keeper/contract_keeper.go b/modules/light-clients/08-wasm/keeper/contract_keeper.go index 89fc8b1e4d0..25a6b522dd0 100644 --- a/modules/light-clients/08-wasm/keeper/contract_keeper.go +++ b/modules/light-clients/08-wasm/keeper/contract_keeper.go @@ -34,7 +34,7 @@ var ( ) // instantiateContract calls vm.Instantiate with appropriate arguments. -func (k Keeper) instantiateContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.ContractResult, error) { +func (k *Keeper) instantiateContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.ContractResult, error) { sdkGasMeter := ctx.GasMeter() multipliedGasMeter := types.NewMultipliedGasMeter(sdkGasMeter, types.VMGasRegister) gasLimit := VMGasRegister.RuntimeGasForContract(ctx) @@ -53,7 +53,7 @@ func (k Keeper) instantiateContract(ctx sdk.Context, clientID string, clientStor } // callContract calls vm.Sudo with internally constructed gas meter and environment. -func (k Keeper) callContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.ContractResult, error) { +func (k *Keeper) callContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.ContractResult, error) { sdkGasMeter := ctx.GasMeter() multipliedGasMeter := types.NewMultipliedGasMeter(sdkGasMeter, VMGasRegister) gasLimit := VMGasRegister.RuntimeGasForContract(ctx) @@ -67,7 +67,7 @@ func (k Keeper) callContract(ctx sdk.Context, clientID string, clientStore store } // queryContract calls vm.Query. -func (k Keeper) queryContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.QueryResult, error) { +func (k *Keeper) queryContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.QueryResult, error) { sdkGasMeter := ctx.GasMeter() multipliedGasMeter := types.NewMultipliedGasMeter(sdkGasMeter, VMGasRegister) gasLimit := VMGasRegister.RuntimeGasForContract(ctx) @@ -82,7 +82,7 @@ func (k Keeper) queryContract(ctx sdk.Context, clientID string, clientStore stor } // migrateContract calls vm.Migrate with internally constructed gas meter and environment. -func (k Keeper) migrateContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.ContractResult, error) { +func (k *Keeper) migrateContract(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, checksum types.Checksum, msg []byte) (*wasmvmtypes.ContractResult, error) { sdkGasMeter := ctx.GasMeter() multipliedGasMeter := types.NewMultipliedGasMeter(sdkGasMeter, VMGasRegister) gasLimit := VMGasRegister.RuntimeGasForContract(ctx) @@ -97,7 +97,7 @@ func (k Keeper) migrateContract(ctx sdk.Context, clientID string, clientStore st } // WasmInstantiate accepts a message to instantiate a wasm contract, JSON encodes it and calls instantiateContract. -func (k Keeper) WasmInstantiate(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, cs *types.ClientState, payload types.InstantiateMessage) error { +func (k *Keeper) WasmInstantiate(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, cs *types.ClientState, payload types.InstantiateMessage) error { encodedData, err := json.Marshal(payload) if err != nil { return errorsmod.Wrap(err, "failed to marshal payload for wasm contract instantiation") @@ -136,7 +136,7 @@ func (k Keeper) WasmInstantiate(ctx sdk.Context, clientID string, clientStore st // - the response of the contract call contains non-empty events // - the response of the contract call contains non-empty attributes // - the data bytes of the response cannot be unmarshaled into the result type -func (k Keeper) WasmSudo(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, cs *types.ClientState, payload types.SudoMsg) ([]byte, error) { +func (k *Keeper) WasmSudo(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, cs *types.ClientState, payload types.SudoMsg) ([]byte, error) { encodedData, err := json.Marshal(payload) if err != nil { return nil, errorsmod.Wrap(err, "failed to marshal payload for wasm execution") @@ -171,7 +171,7 @@ func (k Keeper) WasmSudo(ctx sdk.Context, clientID string, clientStore storetype // WasmMigrate migrate calls the migrate entry point of the contract with the given payload and returns the result. // WasmMigrate returns an error if: // - the contract migration returns an error -func (k Keeper) WasmMigrate(ctx sdk.Context, clientStore storetypes.KVStore, cs *types.ClientState, clientID string, payload []byte) error { +func (k *Keeper) WasmMigrate(ctx sdk.Context, clientStore storetypes.KVStore, cs *types.ClientState, clientID string, payload []byte) error { res, err := k.migrateContract(ctx, clientID, clientStore, cs.Checksum, payload) if err != nil { return errorsmod.Wrap(types.ErrVMError, err.Error()) @@ -192,7 +192,7 @@ func (k Keeper) WasmMigrate(ctx sdk.Context, clientStore storetypes.KVStore, cs // WasmQuery returns an error if: // - the contract query returns an error // - the data bytes of the response cannot be unmarshal into the result type -func (k Keeper) WasmQuery(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, cs *types.ClientState, payload types.QueryMsg) ([]byte, error) { +func (k *Keeper) WasmQuery(ctx sdk.Context, clientID string, clientStore storetypes.KVStore, cs *types.ClientState, payload types.QueryMsg) ([]byte, error) { encodedData, err := json.Marshal(payload) if err != nil { return nil, errorsmod.Wrap(err, "failed to marshal payload for wasm query") diff --git a/modules/light-clients/08-wasm/keeper/contract_keeper_test.go b/modules/light-clients/08-wasm/keeper/contract_keeper_test.go index 7105bf29f29..61ec401a26b 100644 --- a/modules/light-clients/08-wasm/keeper/contract_keeper_test.go +++ b/modules/light-clients/08-wasm/keeper/contract_keeper_test.go @@ -14,7 +14,7 @@ import ( ibctm "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint" ) -func (suite *KeeperTestSuite) TestWasmInstantiate() { +func (s *KeeperTestSuite) TestWasmInstantiate() { testCases := []struct { name string malleate func() @@ -23,25 +23,25 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "success", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { // Ensure GoAPI is set - suite.Require().NotNil(goapi.CanonicalizeAddress) - suite.Require().NotNil(goapi.HumanizeAddress) - suite.Require().NotNil(goapi.ValidateAddress) + s.Require().NotNil(goapi.CanonicalizeAddress) + s.Require().NotNil(goapi.HumanizeAddress) + s.Require().NotNil(goapi.ValidateAddress) var payload types.InstantiateMessage err := json.Unmarshal(initMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - wrappedClientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) - suite.Require().True(ok) + wrappedClientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) + s.Require().True(ok) clientState := types.NewClientState(payload.ClientState, payload.Checksum, wrappedClientState.LatestHeight) - clientStateBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) + clientStateBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) store.Set(host.ClientStateKey(), clientStateBz) consensusState := types.NewConsensusState(payload.ConsensusState) - consensusStateBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), consensusState) + consensusStateBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), consensusState) store.Set(host.ConsensusStateKey(clientState.LatestHeight), consensusStateBz) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{}}, 0, nil @@ -52,7 +52,7 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: vm returns error", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM } }, @@ -61,7 +61,7 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: contract returns error", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil } }, @@ -70,7 +70,7 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: contract returns non-empty messages", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Messages: []wasmvmtypes.SubMsg{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -81,7 +81,7 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: contract returns non-empty events", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Events: []wasmvmtypes.Event{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -92,7 +92,7 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: contract returns non-empty attributes", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Attributes: []wasmvmtypes.EventAttribute{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -103,11 +103,11 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: change clientstate type", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Set(host.ClientStateKey(), []byte("changed client state")) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } }, @@ -116,10 +116,10 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: delete clientstate", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Delete(host.ClientStateKey()) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } }, @@ -128,10 +128,10 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: unmarshallable clientstate", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Set(host.ClientStateKey(), []byte("invalid json")) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } }, @@ -140,19 +140,19 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { { "failure: change checksum", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.InstantiateMessage err := json.Unmarshal(initMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) // Change the checksum to something else. - wrappedClientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) - suite.Require().True(ok) + wrappedClientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) + s.Require().True(ok) clientState := types.NewClientState(payload.ClientState, []byte("new checksum"), wrappedClientState.LatestHeight) - store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState)) + store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState)) resp, err := json.Marshal(types.UpdateStateResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, wasmtesting.DefaultGasUsed, nil } @@ -162,33 +162,32 @@ func (suite *KeeperTestSuite) TestWasmInstantiate() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - checksum := suite.storeWasmCode(wasmtesting.Code) + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() + checksum := s.storeWasmCode(wasmtesting.Code) tc.malleate() initMsg := types.InstantiateMessage{ - ClientState: clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), wasmtesting.MockTendermitClientState), - ConsensusState: clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientConsensusState), + ClientState: clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), wasmtesting.MockTendermitClientState), + ConsensusState: clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientConsensusState), Checksum: checksum, } - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), defaultWasmClientID) - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - err := wasmClientKeeper.WasmInstantiate(suite.chainA.GetContext(), defaultWasmClientID, clientStore, &types.ClientState{Checksum: checksum}, initMsg) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), defaultWasmClientID) + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper + err := wasmClientKeeper.WasmInstantiate(s.chainA.GetContext(), defaultWasmClientID, clientStore, &types.ClientState{Checksum: checksum}, initMsg) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestWasmMigrate() { +func (s *KeeperTestSuite) TestWasmMigrate() { testCases := []struct { name string malleate func() @@ -197,14 +196,14 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "success", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { // Ensure GoAPI is set - suite.Require().NotNil(goapi.CanonicalizeAddress) - suite.Require().NotNil(goapi.HumanizeAddress) - suite.Require().NotNil(goapi.ValidateAddress) + s.Require().NotNil(goapi.CanonicalizeAddress) + s.Require().NotNil(goapi.HumanizeAddress) + s.Require().NotNil(goapi.ValidateAddress) resp, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, 0, nil } @@ -214,7 +213,7 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: vm returns error", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM } }, @@ -223,7 +222,7 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: contract returns error", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil } }, @@ -232,7 +231,7 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: contract returns non-empty messages", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Messages: []wasmvmtypes.SubMsg{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -243,7 +242,7 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: contract returns non-empty events", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Events: []wasmvmtypes.Event{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -254,7 +253,7 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: contract returns non-empty attributes", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Attributes: []wasmvmtypes.EventAttribute{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -265,11 +264,11 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: change clientstate type", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Set(host.ClientStateKey(), []byte("changed client state")) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } }, @@ -278,10 +277,10 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: delete clientstate", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Delete(host.ClientStateKey()) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } }, @@ -290,10 +289,10 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { { "failure: unmarshallable clientstate", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Set(host.ClientStateKey(), []byte("invalid json")) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } }, @@ -302,31 +301,30 @@ func (suite *KeeperTestSuite) TestWasmMigrate() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - _ = suite.storeWasmCode(wasmtesting.Code) + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() + _ = s.storeWasmCode(wasmtesting.Code) - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), defaultWasmClientID) - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - err = wasmClientKeeper.WasmMigrate(suite.chainA.GetContext(), clientStore, &types.ClientState{}, defaultWasmClientID, []byte("{}")) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), defaultWasmClientID) + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper + err = wasmClientKeeper.WasmMigrate(s.chainA.GetContext(), clientStore, &types.ClientState{}, defaultWasmClientID, []byte("{}")) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestWasmQuery() { +func (s *KeeperTestSuite) TestWasmQuery() { var payload types.QueryMsg testCases := []struct { @@ -337,14 +335,14 @@ func (suite *KeeperTestSuite) TestWasmQuery() { { "success", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { // Ensure GoAPI is set - suite.Require().NotNil(goapi.CanonicalizeAddress) - suite.Require().NotNil(goapi.HumanizeAddress) - suite.Require().NotNil(goapi.ValidateAddress) + s.Require().NotNil(goapi.CanonicalizeAddress) + s.Require().NotNil(goapi.HumanizeAddress) + s.Require().NotNil(goapi.ValidateAddress) resp, err := json.Marshal(types.StatusResult{Status: exported.Frozen.String()}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) @@ -354,7 +352,7 @@ func (suite *KeeperTestSuite) TestWasmQuery() { { "failure: vm returns error", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return nil, wasmtesting.DefaultGasUsed, wasmtesting.ErrMockVM }) }, @@ -363,7 +361,7 @@ func (suite *KeeperTestSuite) TestWasmQuery() { { "failure: contract returns error", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Err: wasmtesting.ErrMockContract.Error()}, wasmtesting.DefaultGasUsed, nil }) }, @@ -372,39 +370,38 @@ func (suite *KeeperTestSuite) TestWasmQuery() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - _ = suite.storeWasmCode(wasmtesting.Code) + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() + _ = s.storeWasmCode(wasmtesting.Code) - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientState := endpoint.GetClientState() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) wasmClientState, ok := clientState.(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) payload = types.QueryMsg{Status: &types.StatusMsg{}} tc.malleate() - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - res, err := wasmClientKeeper.WasmQuery(suite.chainA.GetContext(), endpoint.ClientID, clientStore, wasmClientState, payload) + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper + res, err := wasmClientKeeper.WasmQuery(s.chainA.GetContext(), endpoint.ClientID, clientStore, wasmClientState, payload) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestWasmSudo() { +func (s *KeeperTestSuite) TestWasmSudo() { var payload types.SudoMsg testCases := []struct { @@ -415,14 +412,14 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "success", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, goapi wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { // Ensure GoAPI is set - suite.Require().NotNil(goapi.CanonicalizeAddress) - suite.Require().NotNil(goapi.HumanizeAddress) - suite.Require().NotNil(goapi.ValidateAddress) + s.Require().NotNil(goapi.CanonicalizeAddress) + s.Require().NotNil(goapi.HumanizeAddress) + s.Require().NotNil(goapi.ValidateAddress) resp, err := json.Marshal(types.UpdateStateResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, wasmtesting.DefaultGasUsed, nil }) @@ -432,7 +429,7 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: vm returns error", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, wasmtesting.DefaultGasUsed, wasmtesting.ErrMockVM }) }, @@ -441,7 +438,7 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: contract returns error", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, wasmtesting.DefaultGasUsed, nil }) }, @@ -450,7 +447,7 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: contract returns non-empty messages", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Messages: []wasmvmtypes.SubMsg{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -461,7 +458,7 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: contract returns non-empty events", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Events: []wasmvmtypes.Event{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -472,7 +469,7 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: contract returns non-empty attributes", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { resp := wasmvmtypes.Response{Attributes: []wasmvmtypes.EventAttribute{{}}} return &wasmvmtypes.ContractResult{Ok: &resp}, wasmtesting.DefaultGasUsed, nil @@ -483,11 +480,11 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: unmarshallable clientstate bytes", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Set(host.ClientStateKey(), []byte("invalid json")) resp, err := json.Marshal(types.UpdateStateResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, wasmtesting.DefaultGasUsed, nil }) @@ -497,11 +494,11 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: delete clientstate", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { store.Delete(host.ClientStateKey()) resp, err := json.Marshal(types.UpdateStateResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, wasmtesting.DefaultGasUsed, nil }) @@ -511,13 +508,13 @@ func (suite *KeeperTestSuite) TestWasmSudo() { { "failure: change checksum", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { - clientState := suite.chainA.GetClientState(defaultWasmClientID) + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + clientState := s.chainA.GetClientState(defaultWasmClientID) clientState.(*types.ClientState).Checksum = []byte("new checksum") - store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState)) + store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState)) resp, err := json.Marshal(types.UpdateStateResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, wasmtesting.DefaultGasUsed, nil }) @@ -527,33 +524,32 @@ func (suite *KeeperTestSuite) TestWasmSudo() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - _ = suite.storeWasmCode(wasmtesting.Code) + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() + _ = s.storeWasmCode(wasmtesting.Code) - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientState := endpoint.GetClientState() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) wasmClientState, ok := clientState.(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) payload = types.SudoMsg{UpdateState: &types.UpdateStateMsg{}} tc.malleate() - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - res, err := wasmClientKeeper.WasmSudo(suite.chainA.GetContext(), endpoint.ClientID, clientStore, wasmClientState, payload) + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper + res, err := wasmClientKeeper.WasmSudo(s.chainA.GetContext(), endpoint.ClientID, clientStore, wasmClientState, payload) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } diff --git a/modules/light-clients/08-wasm/keeper/export_test.go b/modules/light-clients/08-wasm/keeper/export_test.go index 82f23ab307c..bd2fd3276ab 100644 --- a/modules/light-clients/08-wasm/keeper/export_test.go +++ b/modules/light-clients/08-wasm/keeper/export_test.go @@ -3,12 +3,12 @@ package keeper import sdk "github.com/cosmos/cosmos-sdk/types" // MigrateContractCode is a wrapper around k.migrateContractCode to allow the method to be directly called in tests. -func (k Keeper) MigrateContractCode(ctx sdk.Context, clientID string, newChecksum, migrateMsg []byte) error { +func (k *Keeper) MigrateContractCode(ctx sdk.Context, clientID string, newChecksum, migrateMsg []byte) error { return k.migrateContractCode(ctx, clientID, newChecksum, migrateMsg) } // GetQueryPlugins is a wrapper around k.getQueryPlugins to allow the method to be directly called in tests. -func (k Keeper) GetQueryPlugins() QueryPlugins { +func (k *Keeper) GetQueryPlugins() QueryPlugins { return k.getQueryPlugins() } diff --git a/modules/light-clients/08-wasm/keeper/genesis.go b/modules/light-clients/08-wasm/keeper/genesis.go index f53fd54f4d8..e84beb75e70 100644 --- a/modules/light-clients/08-wasm/keeper/genesis.go +++ b/modules/light-clients/08-wasm/keeper/genesis.go @@ -10,7 +10,7 @@ import ( // InitGenesis initializes the 08-wasm module's state from a provided genesis // state. -func (k Keeper) InitGenesis(ctx sdk.Context, gs types.GenesisState) error { +func (k *Keeper) InitGenesis(ctx sdk.Context, gs types.GenesisState) error { storeFn := func(code wasmvm.WasmCode, _ uint64) (wasmvm.Checksum, uint64, error) { checksum, err := k.GetVM().StoreCodeUnchecked(code) return checksum, 0, err @@ -27,7 +27,7 @@ func (k Keeper) InitGenesis(ctx sdk.Context, gs types.GenesisState) error { // ExportGenesis returns the 08-wasm module's exported genesis. This includes the code // for all contracts previously stored. -func (k Keeper) ExportGenesis(ctx sdk.Context) types.GenesisState { +func (k *Keeper) ExportGenesis(ctx sdk.Context) types.GenesisState { checksums, err := k.GetAllChecksums(ctx) if err != nil { panic(err) diff --git a/modules/light-clients/08-wasm/keeper/genesis_test.go b/modules/light-clients/08-wasm/keeper/genesis_test.go index ae1d1719b98..99f97bbf111 100644 --- a/modules/light-clients/08-wasm/keeper/genesis_test.go +++ b/modules/light-clients/08-wasm/keeper/genesis_test.go @@ -10,7 +10,7 @@ import ( "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" ) -func (suite *KeeperTestSuite) TestInitGenesis() { +func (s *KeeperTestSuite) TestInitGenesis() { var ( genesisState types.GenesisState expChecksums []string @@ -46,44 +46,44 @@ func (suite *KeeperTestSuite) TestInitGenesis() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() tc.malleate() - err := GetSimApp(suite.chainA).WasmClientKeeper.InitGenesis(ctx, genesisState) - suite.Require().NoError(err) + err := GetSimApp(s.chainA).WasmClientKeeper.InitGenesis(ctx, genesisState) + s.Require().NoError(err) var storedHashes []string - checksums, err := GetSimApp(suite.chainA).WasmClientKeeper.GetAllChecksums(suite.chainA.GetContext()) - suite.Require().NoError(err) + checksums, err := GetSimApp(s.chainA).WasmClientKeeper.GetAllChecksums(s.chainA.GetContext()) + s.Require().NoError(err) for _, hash := range checksums { storedHashes = append(storedHashes, hex.EncodeToString(hash)) } - suite.Require().Equal(len(expChecksums), len(storedHashes)) - suite.Require().ElementsMatch(expChecksums, storedHashes) + s.Require().Equal(len(expChecksums), len(storedHashes)) + s.Require().ElementsMatch(expChecksums, storedHashes) }) } } -func (suite *KeeperTestSuite) TestExportGenesis() { - suite.SetupWasmWithMockVM() +func (s *KeeperTestSuite) TestExportGenesis() { + s.SetupWasmWithMockVM() - ctx := suite.chainA.GetContext() + ctx := s.chainA.GetContext() expChecksum := "b3a49b2914f5e6a673215e74325c1d153bb6776e079774e52c5b7e674d9ad3ab" //nolint:gosec // these are not hard-coded credentials signer := authtypes.NewModuleAddress(govtypes.ModuleName).String() msg := types.NewMsgStoreCode(signer, wasmtesting.Code) - res, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(ctx, msg) - suite.Require().NoError(err) - suite.Require().Equal(expChecksum, hex.EncodeToString(res.Checksum)) + res, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(ctx, msg) + s.Require().NoError(err) + s.Require().Equal(expChecksum, hex.EncodeToString(res.Checksum)) - genesisState := GetSimApp(suite.chainA).WasmClientKeeper.ExportGenesis(ctx) - suite.Require().Len(genesisState.Contracts, 1) - suite.Require().NotEmpty(genesisState.Contracts[0].CodeBytes) + genesisState := GetSimApp(s.chainA).WasmClientKeeper.ExportGenesis(ctx) + s.Require().Len(genesisState.Contracts, 1) + s.Require().NotEmpty(genesisState.Contracts[0].CodeBytes) } diff --git a/modules/light-clients/08-wasm/keeper/grpc_query.go b/modules/light-clients/08-wasm/keeper/grpc_query.go index 39621d1cbe0..022e40a9a06 100644 --- a/modules/light-clients/08-wasm/keeper/grpc_query.go +++ b/modules/light-clients/08-wasm/keeper/grpc_query.go @@ -19,7 +19,7 @@ import ( var _ types.QueryServer = (*Keeper)(nil) // Code implements the Query/Code gRPC method -func (k Keeper) Code(goCtx context.Context, req *types.QueryCodeRequest) (*types.QueryCodeResponse, error) { +func (k *Keeper) Code(goCtx context.Context, req *types.QueryCodeRequest) (*types.QueryCodeResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "empty request") } @@ -45,7 +45,7 @@ func (k Keeper) Code(goCtx context.Context, req *types.QueryCodeRequest) (*types } // Checksums implements the Query/Checksums gRPC method. It returns a list of hex encoded checksums stored. -func (k Keeper) Checksums(goCtx context.Context, req *types.QueryChecksumsRequest) (*types.QueryChecksumsResponse, error) { +func (k *Keeper) Checksums(goCtx context.Context, req *types.QueryChecksumsRequest) (*types.QueryChecksumsResponse, error) { checksums, pageRes, err := sdkquery.CollectionPaginate( goCtx, k.GetChecksums(), diff --git a/modules/light-clients/08-wasm/keeper/grpc_query_test.go b/modules/light-clients/08-wasm/keeper/grpc_query_test.go index 38d9f70ba24..97c4129026a 100644 --- a/modules/light-clients/08-wasm/keeper/grpc_query_test.go +++ b/modules/light-clients/08-wasm/keeper/grpc_query_test.go @@ -15,7 +15,7 @@ import ( "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" ) -func (suite *KeeperTestSuite) TestQueryCode() { +func (s *KeeperTestSuite) TestQueryCode() { var req *types.QueryCodeRequest testCases := []struct { @@ -29,8 +29,8 @@ func (suite *KeeperTestSuite) TestQueryCode() { signer := authtypes.NewModuleAddress(govtypes.ModuleName).String() msg := types.NewMsgStoreCode(signer, wasmtesting.Code) - res, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + res, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(s.chainA.GetContext(), msg) + s.Require().NoError(err) req = &types.QueryCodeRequest{Checksum: hex.EncodeToString(res.Checksum)} }, @@ -59,26 +59,26 @@ func (suite *KeeperTestSuite) TestQueryCode() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() tc.malleate() - res, err := GetSimApp(suite.chainA).WasmClientKeeper.Code(suite.chainA.GetContext(), req) + res, err := GetSimApp(s.chainA).WasmClientKeeper.Code(s.chainA.GetContext(), req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().NotEmpty(res.Data) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().NotEmpty(res.Data) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestQueryChecksums() { +func (s *KeeperTestSuite) TestQueryChecksums() { var expChecksums []string testCases := []struct { @@ -99,8 +99,8 @@ func (suite *KeeperTestSuite) TestQueryChecksums() { signer := authtypes.NewModuleAddress(govtypes.ModuleName).String() msg := types.NewMsgStoreCode(signer, wasmtesting.Code) - res, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + res, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(s.chainA.GetContext(), msg) + s.Require().NoError(err) expChecksums = append(expChecksums, hex.EncodeToString(res.Checksum)) }, @@ -109,21 +109,21 @@ func (suite *KeeperTestSuite) TestQueryChecksums() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() tc.malleate() req := &types.QueryChecksumsRequest{} - res, err := GetSimApp(suite.chainA).WasmClientKeeper.Checksums(suite.chainA.GetContext(), req) + res, err := GetSimApp(s.chainA).WasmClientKeeper.Checksums(s.chainA.GetContext(), req) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().Equal(len(expChecksums), len(res.Checksums)) - suite.Require().ElementsMatch(expChecksums, res.Checksums) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().Equal(len(expChecksums), len(res.Checksums)) + s.Require().ElementsMatch(expChecksums, res.Checksums) } else { - suite.Require().Error(err) + s.Require().Error(err) } }) } diff --git a/modules/light-clients/08-wasm/keeper/keeper.go b/modules/light-clients/08-wasm/keeper/keeper.go index 77e3fee84a1..8c86c226533 100644 --- a/modules/light-clients/08-wasm/keeper/keeper.go +++ b/modules/light-clients/08-wasm/keeper/keeper.go @@ -38,17 +38,17 @@ type Keeper struct { } // Codec returns the 08-wasm module's codec. -func (k Keeper) Codec() codec.BinaryCodec { +func (k *Keeper) Codec() codec.BinaryCodec { return k.cdc } // GetAuthority returns the 08-wasm module's authority. -func (k Keeper) GetAuthority() string { +func (k *Keeper) GetAuthority() string { return k.authority } // Logger returns a module-specific logger. -func (Keeper) Logger(ctx sdk.Context) log.Logger { +func (*Keeper) Logger(ctx sdk.Context) log.Logger { return moduleLogger(ctx) } @@ -57,17 +57,17 @@ func moduleLogger(ctx sdk.Context) log.Logger { } // GetVM returns the keeper's vm engine. -func (k Keeper) GetVM() types.WasmEngine { +func (k *Keeper) GetVM() types.WasmEngine { return k.vm } // GetChecksums returns the stored checksums. -func (k Keeper) GetChecksums() collections.KeySet[[]byte] { +func (k *Keeper) GetChecksums() collections.KeySet[[]byte] { return k.checksums } // getQueryPlugins returns the set query plugins. -func (k Keeper) getQueryPlugins() QueryPlugins { +func (k *Keeper) getQueryPlugins() QueryPlugins { return k.queryPlugins } @@ -76,7 +76,7 @@ func (k *Keeper) setQueryPlugins(plugins QueryPlugins) { k.queryPlugins = plugins } -func (k Keeper) newQueryHandler(ctx sdk.Context, callerID string) *queryHandler { +func (k *Keeper) newQueryHandler(ctx sdk.Context, callerID string) *queryHandler { return newQueryHandler(ctx, k.getQueryPlugins(), callerID) } @@ -85,7 +85,7 @@ func (k Keeper) newQueryHandler(ctx sdk.Context, callerID string) *queryHandler // contract code before storing: // - Size bounds are checked. Contract length must not be 0 or exceed a specific size (maxWasmSize). // - The contract must not have already been stored in store. -func (k Keeper) storeWasmCode(ctx sdk.Context, code []byte, storeFn func(code wasmvm.WasmCode, gasLimit uint64) (wasmvm.Checksum, uint64, error)) ([]byte, error) { +func (k *Keeper) storeWasmCode(ctx sdk.Context, code []byte, storeFn func(code wasmvm.WasmCode, gasLimit uint64) (wasmvm.Checksum, uint64, error)) ([]byte, error) { var err error if types.IsGzip(code) { ctx.GasMeter().ConsumeGas(types.VMGasRegister.UncompressCosts(len(code)), "Uncompress gzip bytecode") @@ -139,7 +139,7 @@ func (k Keeper) storeWasmCode(ctx sdk.Context, code []byte, storeFn func(code wa // migrateContractCode migrates the contract for a given light client to one denoted by the given new checksum. The checksum we // are migrating to must first be stored using storeWasmCode and must not match the checksum currently stored for this light client. -func (k Keeper) migrateContractCode(ctx sdk.Context, clientID string, newChecksum, migrateMsg []byte) error { +func (k *Keeper) migrateContractCode(ctx sdk.Context, clientID string, newChecksum, migrateMsg []byte) error { clientStore := k.clientKeeper.ClientStore(ctx, clientID) wasmClientState, found := types.GetClientState(clientStore, k.cdc) if !found { @@ -184,7 +184,7 @@ func (k Keeper) migrateContractCode(ctx sdk.Context, clientID string, newChecksu } // GetWasmClientState returns the 08-wasm client state for the given client identifier. -func (k Keeper) GetWasmClientState(ctx sdk.Context, clientID string) (*types.ClientState, error) { +func (k *Keeper) GetWasmClientState(ctx sdk.Context, clientID string) (*types.ClientState, error) { clientState, found := k.clientKeeper.GetClientState(ctx, clientID) if !found { return nil, errorsmod.Wrapf(clienttypes.ErrClientTypeNotFound, "clientID %s", clientID) @@ -200,7 +200,7 @@ func (k Keeper) GetWasmClientState(ctx sdk.Context, clientID string) (*types.Cli // GetAllChecksums is a helper to get all checksums from the store. // It returns an empty slice if no checksums are found -func (k Keeper) GetAllChecksums(ctx sdk.Context) ([]types.Checksum, error) { +func (k *Keeper) GetAllChecksums(ctx sdk.Context) ([]types.Checksum, error) { iterator, err := k.GetChecksums().Iterate(ctx, nil) if err != nil { return nil, err @@ -221,7 +221,7 @@ func (k Keeper) GetAllChecksums(ctx sdk.Context) ([]types.Checksum, error) { // HasChecksum returns true if the given checksum exists in the store and // false otherwise. -func (k Keeper) HasChecksum(ctx sdk.Context, checksum types.Checksum) bool { +func (k *Keeper) HasChecksum(ctx sdk.Context, checksum types.Checksum) bool { found, err := k.GetChecksums().Has(ctx, checksum) if err != nil { return false @@ -231,7 +231,7 @@ func (k Keeper) HasChecksum(ctx sdk.Context, checksum types.Checksum) bool { } // InitializePinnedCodes updates wasmvm to pin to cache all contracts marked as pinned -func (k Keeper) InitializePinnedCodes(ctx sdk.Context) error { +func (k *Keeper) InitializePinnedCodes(ctx sdk.Context) error { checksums, err := k.GetAllChecksums(ctx) if err != nil { return err diff --git a/modules/light-clients/08-wasm/keeper/keeper_test.go b/modules/light-clients/08-wasm/keeper/keeper_test.go index 0f6d399c3fb..9e9b3b4b537 100644 --- a/modules/light-clients/08-wasm/keeper/keeper_test.go +++ b/modules/light-clients/08-wasm/keeper/keeper_test.go @@ -61,79 +61,79 @@ func GetSimApp(chain *ibctesting.TestChain) *simapp.SimApp { return app } -func (suite *KeeperTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCustomAppCoordinator(suite.T(), 1, setupTestingApp) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +func (s *KeeperTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCustomAppCoordinator(s.T(), 1, setupTestingApp) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) - queryHelper := baseapp.NewQueryServerTestHelper(suite.chainA.GetContext(), GetSimApp(suite.chainA).InterfaceRegistry()) - types.RegisterQueryServer(queryHelper, GetSimApp(suite.chainA).WasmClientKeeper) + queryHelper := baseapp.NewQueryServerTestHelper(s.chainA.GetContext(), GetSimApp(s.chainA).InterfaceRegistry()) + types.RegisterQueryServer(queryHelper, &GetSimApp(s.chainA).WasmClientKeeper) } // SetupWasmWithMockVM sets up mock cometbft chain with a mock vm. -func (suite *KeeperTestSuite) SetupWasmWithMockVM() { - suite.coordinator = ibctesting.NewCustomAppCoordinator(suite.T(), 1, suite.setupWasmWithMockVM) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +func (s *KeeperTestSuite) SetupWasmWithMockVM() { + s.coordinator = ibctesting.NewCustomAppCoordinator(s.T(), 1, s.setupWasmWithMockVM) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) } -func (suite *KeeperTestSuite) setupWasmWithMockVM() (ibctesting.TestingApp, map[string]json.RawMessage) { - suite.mockVM = wasmtesting.NewMockWasmEngine() +func (s *KeeperTestSuite) setupWasmWithMockVM() (ibctesting.TestingApp, map[string]json.RawMessage) { + s.mockVM = wasmtesting.NewMockWasmEngine() - suite.mockVM.InstantiateFn = func(checksum wasmvm.Checksum, env wasmvmtypes.Env, info wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(checksum wasmvm.Checksum, env wasmvmtypes.Env, info wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.InstantiateMessage err := json.Unmarshal(initMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - wrappedClientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) - suite.Require().True(ok) + wrappedClientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) + s.Require().True(ok) clientState := types.NewClientState(payload.ClientState, payload.Checksum, wrappedClientState.LatestHeight) - clientStateBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) + clientStateBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) store.Set(host.ClientStateKey(), clientStateBz) consensusState := types.NewConsensusState(payload.ConsensusState) - consensusStateBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), consensusState) + consensusStateBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), consensusState) store.Set(host.ConsensusStateKey(clientState.LatestHeight), consensusStateBz) resp, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, 0, nil } - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.StatusResult{Status: exported.Active.String()}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) db := dbm.NewMemDB() - app := simapp.NewUnitTestSimApp(log.NewNopLogger(), db, nil, true, simtestutil.EmptyAppOptions{}, suite.mockVM) + app := simapp.NewUnitTestSimApp(log.NewNopLogger(), db, nil, true, simtestutil.EmptyAppOptions{}, s.mockVM) return app, app.DefaultGenesis() } // storeWasmCode stores the wasm code on chain and returns the checksum. -func (suite *KeeperTestSuite) storeWasmCode(wasmCode []byte) []byte { - ctx := suite.chainA.GetContext().WithBlockGasMeter(storetypes.NewInfiniteGasMeter()) +func (s *KeeperTestSuite) storeWasmCode(wasmCode []byte) []byte { + ctx := s.chainA.GetContext().WithBlockGasMeter(storetypes.NewInfiniteGasMeter()) msg := types.NewMsgStoreCode(authtypes.NewModuleAddress(govtypes.ModuleName).String(), wasmCode) - response, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(ctx, msg) - suite.Require().NoError(err) - suite.Require().NotNil(response.Checksum) + response, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(ctx, msg) + s.Require().NoError(err) + s.Require().NotNil(response.Checksum) return response.Checksum } -func (suite *KeeperTestSuite) SetupSnapshotterWithMockVM() *simapp.SimApp { - suite.mockVM = wasmtesting.NewMockWasmEngine() +func (s *KeeperTestSuite) SetupSnapshotterWithMockVM() *simapp.SimApp { + s.mockVM = wasmtesting.NewMockWasmEngine() - return simapp.SetupWithSnapshotter(suite.T(), suite.mockVM) + return simapp.SetupWithSnapshotter(s.T(), s.mockVM) } func TestKeeperTestSuite(t *testing.T) { testifysuite.Run(t, new(KeeperTestSuite)) } -func (suite *KeeperTestSuite) TestNewKeeper() { +func (s *KeeperTestSuite) TestNewKeeper() { testCases := []struct { name string instantiateFn func() @@ -143,12 +143,12 @@ func (suite *KeeperTestSuite) TestNewKeeper() { "success", func() { keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), ) }, nil, @@ -157,12 +157,12 @@ func (suite *KeeperTestSuite) TestNewKeeper() { "failure: empty authority", func() { keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, "", // authority - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), ) }, errors.New("authority must be non-empty"), @@ -171,12 +171,12 @@ func (suite *KeeperTestSuite) TestNewKeeper() { "failure: nil client keeper", func() { keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), nil, // client keeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), ) }, errors.New("client keeper must not be nil"), @@ -185,12 +185,12 @@ func (suite *KeeperTestSuite) TestNewKeeper() { "failure: nil wasm VM", func() { keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), nil, - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).GRPCQueryRouter(), ) }, errors.New("wasm VM must not be nil"), @@ -199,12 +199,12 @@ func (suite *KeeperTestSuite) TestNewKeeper() { "failure: nil store service", func() { keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), + GetSimApp(s.chainA).AppCodec(), nil, - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), ) }, errors.New("store service must not be nil"), @@ -213,11 +213,11 @@ func (suite *KeeperTestSuite) TestNewKeeper() { "failure: nil query router", func() { keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), nil, ) }, @@ -226,16 +226,15 @@ func (suite *KeeperTestSuite) TestNewKeeper() { } for _, tc := range testCases { - tc := tc - suite.SetupTest() + s.SetupTest() - suite.Run(tc.name, func() { + s.Run(tc.name, func() { if tc.expError == nil { - suite.Require().NotPanics( + s.Require().NotPanics( tc.instantiateFn, ) } else { - suite.Require().PanicsWithError(tc.expError.Error(), func() { + s.Require().PanicsWithError(tc.expError.Error(), func() { tc.instantiateFn() }) } @@ -243,7 +242,7 @@ func (suite *KeeperTestSuite) TestNewKeeper() { } } -func (suite *KeeperTestSuite) TestInitializedPinnedCodes() { +func (s *KeeperTestSuite) TestInitializedPinnedCodes() { var capturedChecksums []wasmvm.Checksum testCases := []struct { @@ -254,7 +253,7 @@ func (suite *KeeperTestSuite) TestInitializedPinnedCodes() { { "success", func() { - suite.mockVM.PinFn = func(checksum wasmvm.Checksum) error { + s.mockVM.PinFn = func(checksum wasmvm.Checksum) error { capturedChecksums = append(capturedChecksums, checksum) return nil } @@ -264,7 +263,7 @@ func (suite *KeeperTestSuite) TestInitializedPinnedCodes() { { "failure: pin error", func() { - suite.mockVM.PinFn = func(checksum wasmvm.Checksum) error { + s.mockVM.PinFn = func(checksum wasmvm.Checksum) error { return wasmtesting.ErrMockVM } }, @@ -273,13 +272,11 @@ func (suite *KeeperTestSuite) TestInitializedPinnedCodes() { } for _, tc := range testCases { - tc := tc + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - - ctx := suite.chainA.GetContext() - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper + ctx := s.chainA.GetContext() + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper contracts := [][]byte{wasmtesting.Code, wasmtesting.CreateMockContract([]byte("gzipped-contract"))} checksumIDs := make([]types.Checksum, len(contracts)) @@ -290,7 +287,7 @@ func (suite *KeeperTestSuite) TestInitializedPinnedCodes() { msg := types.NewMsgStoreCode(signer, contract) res, err := wasmClientKeeper.StoreCode(ctx, msg) - suite.Require().NoError(err) + s.Require().NoError(err) checksumIDs[i] = res.Checksum } @@ -301,16 +298,16 @@ func (suite *KeeperTestSuite) TestInitializedPinnedCodes() { err := wasmClientKeeper.InitializePinnedCodes(ctx) if tc.expError == nil { - suite.Require().NoError(err) - suite.ElementsMatch(checksumIDs, capturedChecksums) + s.Require().NoError(err) + s.ElementsMatch(checksumIDs, capturedChecksums) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *KeeperTestSuite) TestMigrateContract() { +func (s *KeeperTestSuite) TestMigrateContract() { var ( oldHash []byte newHash []byte @@ -326,12 +323,12 @@ func (suite *KeeperTestSuite) TestMigrateContract() { { "success: update client state", func() { - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { expClientState = types.NewClientState([]byte{1}, newHash, clienttypes.NewHeight(2000, 2)) - store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), expClientState)) + store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), expClientState)) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } @@ -343,7 +340,7 @@ func (suite *KeeperTestSuite) TestMigrateContract() { func() { newHash = oldHash // this should not be called - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { panic("unreachable") } }, @@ -352,18 +349,18 @@ func (suite *KeeperTestSuite) TestMigrateContract() { { "failure: checksum not found", func() { - err := GetSimApp(suite.chainA).WasmClientKeeper.GetChecksums().Remove(suite.chainA.GetContext(), newHash) - suite.Require().NoError(err) + err := GetSimApp(s.chainA).WasmClientKeeper.GetChecksums().Remove(s.chainA.GetContext(), newHash) + s.Require().NoError(err) }, types.ErrWasmChecksumNotFound, }, { "failure: vm returns error", func() { - err := GetSimApp(suite.chainA).WasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), newHash) - suite.Require().NoError(err) + err := GetSimApp(s.chainA).WasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), newHash) + s.Require().NoError(err) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, wasmtesting.DefaultGasUsed, wasmtesting.ErrMockVM } }, @@ -372,10 +369,10 @@ func (suite *KeeperTestSuite) TestMigrateContract() { { "failure: contract returns error", func() { - err := GetSimApp(suite.chainA).WasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), newHash) - suite.Require().NoError(err) + err := GetSimApp(s.chainA).WasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), newHash) + s.Require().NoError(err) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, wasmtesting.DefaultGasUsed, nil } }, @@ -384,49 +381,48 @@ func (suite *KeeperTestSuite) TestMigrateContract() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - suite.storeWasmCode(wasmtesting.Code) + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() + s.storeWasmCode(wasmtesting.Code) var err error oldHash, err = types.CreateChecksum(wasmtesting.Code) - suite.Require().NoError(err) + s.Require().NoError(err) newHash, err = types.CreateChecksum(wasmtesting.CreateMockContract([]byte{1, 2, 3})) - suite.Require().NoError(err) + s.Require().NoError(err) - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - err = wasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), newHash) - suite.Require().NoError(err) + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper + err = wasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), newHash) + s.Require().NoError(err) - endpointA := wasmtesting.NewWasmEndpoint(suite.chainA) + endpointA := wasmtesting.NewWasmEndpoint(s.chainA) err = endpointA.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientState, ok := endpointA.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) expClientState = clientState tc.malleate() - err = wasmClientKeeper.MigrateContractCode(suite.chainA.GetContext(), endpointA.ClientID, newHash, payload) + err = wasmClientKeeper.MigrateContractCode(s.chainA.GetContext(), endpointA.ClientID, newHash, payload) // updated client state clientState, ok = endpointA.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(expClientState, clientState) + s.Require().NoError(err) + s.Require().Equal(expClientState, clientState) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *KeeperTestSuite) TestGetChecksums() { +func (s *KeeperTestSuite) TestGetChecksums() { testCases := []struct { name string malleate func() @@ -436,80 +432,79 @@ func (suite *KeeperTestSuite) TestGetChecksums() { "success: no contract stored.", func() {}, func(checksums []types.Checksum) { - suite.Require().Len(checksums, 0) + s.Require().Empty(checksums) }, }, { "success: default mock vm contract stored.", func() { - suite.SetupWasmWithMockVM() - suite.storeWasmCode(wasmtesting.Code) + s.SetupWasmWithMockVM() + s.storeWasmCode(wasmtesting.Code) }, func(checksums []types.Checksum) { - suite.Require().Len(checksums, 1) + s.Require().Len(checksums, 1) expectedChecksum, err := types.CreateChecksum(wasmtesting.Code) - suite.Require().NoError(err) - suite.Require().Equal(expectedChecksum, checksums[0]) + s.Require().NoError(err) + s.Require().Equal(expectedChecksum, checksums[0]) }, }, { "success: non-empty checksums", func() { - suite.SetupWasmWithMockVM() - suite.storeWasmCode(wasmtesting.Code) + s.SetupWasmWithMockVM() + s.storeWasmCode(wasmtesting.Code) - err := GetSimApp(suite.chainA).WasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), types.Checksum("checksum")) - suite.Require().NoError(err) + err := GetSimApp(s.chainA).WasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), types.Checksum("checksum")) + s.Require().NoError(err) }, func(checksums []types.Checksum) { - suite.Require().Len(checksums, 2) - suite.Require().Contains(checksums, types.Checksum("checksum")) + s.Require().Len(checksums, 2) + s.Require().Contains(checksums, types.Checksum("checksum")) }, }, } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { + s.Run(tc.name, func() { tc.malleate() - checksums, err := GetSimApp(suite.chainA).WasmClientKeeper.GetAllChecksums(suite.chainA.GetContext()) - suite.Require().NoError(err) + checksums, err := GetSimApp(s.chainA).WasmClientKeeper.GetAllChecksums(s.chainA.GetContext()) + s.Require().NoError(err) tc.expResult(checksums) }) } } -func (suite *KeeperTestSuite) TestAddChecksum() { - suite.SetupWasmWithMockVM() - suite.storeWasmCode(wasmtesting.Code) +func (s *KeeperTestSuite) TestAddChecksum() { + s.SetupWasmWithMockVM() + s.storeWasmCode(wasmtesting.Code) - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper - checksums, err := wasmClientKeeper.GetAllChecksums(suite.chainA.GetContext()) - suite.Require().NoError(err) + checksums, err := wasmClientKeeper.GetAllChecksums(s.chainA.GetContext()) + s.Require().NoError(err) // default mock vm contract is stored - suite.Require().Len(checksums, 1) + s.Require().Len(checksums, 1) checksum1 := types.Checksum("checksum1") checksum2 := types.Checksum("checksum2") - err = wasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), checksum1) - suite.Require().NoError(err) - err = wasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), checksum2) - suite.Require().NoError(err) + err = wasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), checksum1) + s.Require().NoError(err) + err = wasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), checksum2) + s.Require().NoError(err) // Test adding the same checksum twice - err = wasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), checksum1) - suite.Require().NoError(err) - - checksums, err = wasmClientKeeper.GetAllChecksums(suite.chainA.GetContext()) - suite.Require().NoError(err) - suite.Require().Len(checksums, 3) - suite.Require().Contains(checksums, checksum1) - suite.Require().Contains(checksums, checksum2) + err = wasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), checksum1) + s.Require().NoError(err) + + checksums, err = wasmClientKeeper.GetAllChecksums(s.chainA.GetContext()) + s.Require().NoError(err) + s.Require().Len(checksums, 3) + s.Require().Contains(checksums, checksum1) + s.Require().Contains(checksums, checksum2) } -func (suite *KeeperTestSuite) TestHasChecksum() { +func (s *KeeperTestSuite) TestHasChecksum() { var checksum types.Checksum testCases := []struct { @@ -521,8 +516,8 @@ func (suite *KeeperTestSuite) TestHasChecksum() { "success: checksum exists", func() { checksum = types.Checksum("checksum") - err := GetSimApp(suite.chainA).WasmClientKeeper.GetChecksums().Set(suite.chainA.GetContext(), checksum) - suite.Require().NoError(err) + err := GetSimApp(s.chainA).WasmClientKeeper.GetChecksums().Set(s.chainA.GetContext(), checksum) + s.Require().NoError(err) }, true, }, @@ -536,14 +531,13 @@ func (suite *KeeperTestSuite) TestHasChecksum() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() tc.malleate() - result := GetSimApp(suite.chainA).WasmClientKeeper.HasChecksum(suite.chainA.GetContext(), checksum) - suite.Require().Equal(tc.exprResult, result) + result := GetSimApp(s.chainA).WasmClientKeeper.HasChecksum(s.chainA.GetContext(), checksum) + s.Require().Equal(tc.exprResult, result) }) } } diff --git a/modules/light-clients/08-wasm/keeper/keeper_vm.go b/modules/light-clients/08-wasm/keeper/keeper_vm.go index 719034c259c..6426084f849 100644 --- a/modules/light-clients/08-wasm/keeper/keeper_vm.go +++ b/modules/light-clients/08-wasm/keeper/keeper_vm.go @@ -90,7 +90,7 @@ func NewKeeperWithConfig( ) Keeper { vm, err := wasmvm.NewVM(wasmConfig.DataDir, wasmConfig.SupportedCapabilities, types.ContractMemoryLimit, wasmConfig.ContractDebugMode, types.MemoryCacheSize) if err != nil { - panic(fmt.Errorf("failed to instantiate new Wasm VM instance: %v", err)) + panic(fmt.Errorf("failed to instantiate new Wasm VM instance: %w", err)) } return NewKeeperWithVM(cdc, storeService, clientKeeper, authority, vm, queryRouter, opts...) diff --git a/modules/light-clients/08-wasm/keeper/migrations.go b/modules/light-clients/08-wasm/keeper/migrations.go deleted file mode 100644 index 19a19f5e3f8..00000000000 --- a/modules/light-clients/08-wasm/keeper/migrations.go +++ /dev/null @@ -1,72 +0,0 @@ -package keeper - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" -) - -// Migrator is a struct for handling in-place store migrations. -type Migrator struct { - keeper Keeper -} - -// NewMigrator returns a new Migrator. -func NewMigrator(keeper Keeper) Migrator { - return Migrator{ - keeper: keeper, - } -} - -// MigrateChecksums migrates the wasm store from using a single key to -// store a list of checksums to using a collections.KeySet to store the checksums. -// -// It grabs the checksums stored previously under the old key and stores -// them in the global KeySet collection. It then deletes the old key and -// the checksums stored under it. -func (m Migrator) MigrateChecksums(ctx sdk.Context) error { - checksums, err := m.getStoredChecksums(ctx) - if err != nil { - return err - } - - for _, hash := range checksums { - if err := m.keeper.GetChecksums().Set(ctx, hash); err != nil { - return err - } - } - - // delete the previously stored checksums - if err := m.deleteChecksums(ctx); err != nil { - return err - } - - m.keeper.Logger(ctx).Info("successfully migrated Checksums to collections") - return nil -} - -// getStoredChecksums returns the checksums stored under the KeyChecksums key. -func (m Migrator) getStoredChecksums(ctx sdk.Context) ([][]byte, error) { - store := m.keeper.storeService.OpenKVStore(ctx) - - bz, err := store.Get([]byte(types.KeyChecksums)) - if err != nil { - return [][]byte{}, err - } - - var hashes types.Checksums - err = m.keeper.cdc.Unmarshal(bz, &hashes) - if err != nil { - return [][]byte{}, err - } - - return hashes.Checksums, nil -} - -// deleteChecksums deletes the checksums stored under the KeyChecksums key. -func (m Migrator) deleteChecksums(ctx sdk.Context) error { - store := m.keeper.storeService.OpenKVStore(ctx) - err := store.Delete([]byte(types.KeyChecksums)) - - return err -} diff --git a/modules/light-clients/08-wasm/keeper/migrations_test.go b/modules/light-clients/08-wasm/keeper/migrations_test.go deleted file mode 100644 index 25891ab0b76..00000000000 --- a/modules/light-clients/08-wasm/keeper/migrations_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package keeper_test - -import ( - "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/keeper" - "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" -) - -func (suite *KeeperTestSuite) TestMigrateWasmStore() { - testCases := []struct { - name string - checksums [][]byte - }{ - { - "success: empty checksums", - [][]byte{}, - }, - { - "success: multiple checksums", - [][]byte{[]byte("hash1"), []byte("hash2"), []byte("hash3")}, - }, - } - - for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupTest() - - suite.storeChecksums(tc.checksums) - - // run the migration - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - m := keeper.NewMigrator(wasmClientKeeper) - - err := m.MigrateChecksums(suite.chainA.GetContext()) - suite.Require().NoError(err) - - // check that they were stored in KeySet - for _, hash := range tc.checksums { - suite.Require().True(wasmClientKeeper.GetChecksums().Has(suite.chainA.GetContext(), hash)) - } - - // check that the data under the old key was deleted - store := suite.chainA.GetContext().KVStore(GetSimApp(suite.chainA).GetKey(types.StoreKey)) - suite.Require().Nil(store.Get([]byte(types.KeyChecksums))) - }) - } -} - -// storeChecksums stores the given checksums under the KeyChecksums key, it runs -// each time on an empty store so we don't need to read the previous checksums. -func (suite *KeeperTestSuite) storeChecksums(checksums [][]byte) { - ctx := suite.chainA.GetContext() - - store := ctx.KVStore(GetSimApp(suite.chainA).GetKey(types.StoreKey)) - checksum := types.Checksums{Checksums: checksums} - bz, err := GetSimApp(suite.chainA).AppCodec().Marshal(&checksum) - suite.Require().NoError(err) - - store.Set([]byte(types.KeyChecksums), bz) -} diff --git a/modules/light-clients/08-wasm/keeper/msg_server.go b/modules/light-clients/08-wasm/keeper/msg_server.go index aab269a30dc..4a7a19f6f7b 100644 --- a/modules/light-clients/08-wasm/keeper/msg_server.go +++ b/modules/light-clients/08-wasm/keeper/msg_server.go @@ -15,7 +15,7 @@ import ( var _ types.MsgServer = (*Keeper)(nil) // StoreCode defines a rpc handler method for MsgStoreCode -func (k Keeper) StoreCode(goCtx context.Context, msg *types.MsgStoreCode) (*types.MsgStoreCodeResponse, error) { +func (k *Keeper) StoreCode(goCtx context.Context, msg *types.MsgStoreCode) (*types.MsgStoreCodeResponse, error) { if k.GetAuthority() != msg.Signer { return nil, errorsmod.Wrapf(ibcerrors.ErrUnauthorized, "expected %s, got %s", k.GetAuthority(), msg.Signer) } @@ -34,7 +34,7 @@ func (k Keeper) StoreCode(goCtx context.Context, msg *types.MsgStoreCode) (*type } // RemoveChecksum defines a rpc handler method for MsgRemoveChecksum -func (k Keeper) RemoveChecksum(goCtx context.Context, msg *types.MsgRemoveChecksum) (*types.MsgRemoveChecksumResponse, +func (k *Keeper) RemoveChecksum(goCtx context.Context, msg *types.MsgRemoveChecksum) (*types.MsgRemoveChecksumResponse, error, ) { if k.GetAuthority() != msg.Signer { @@ -61,7 +61,7 @@ func (k Keeper) RemoveChecksum(goCtx context.Context, msg *types.MsgRemoveChecks } // MigrateContract defines a rpc handler method for MsgMigrateContract -func (k Keeper) MigrateContract(goCtx context.Context, msg *types.MsgMigrateContract) (*types.MsgMigrateContractResponse, error) { +func (k *Keeper) MigrateContract(goCtx context.Context, msg *types.MsgMigrateContract) (*types.MsgMigrateContractResponse, error) { if k.GetAuthority() != msg.Signer { return nil, errorsmod.Wrapf(ibcerrors.ErrUnauthorized, "expected %s, got %s", k.GetAuthority(), msg.Signer) } diff --git a/modules/light-clients/08-wasm/keeper/msg_server_test.go b/modules/light-clients/08-wasm/keeper/msg_server_test.go index feff3cddd90..72d9a3b16a3 100644 --- a/modules/light-clients/08-wasm/keeper/msg_server_test.go +++ b/modules/light-clients/08-wasm/keeper/msg_server_test.go @@ -20,7 +20,7 @@ import ( ibctesting "github.com/cosmos/ibc-go/v10/testing" ) -func (suite *KeeperTestSuite) TestMsgStoreCode() { +func (s *KeeperTestSuite) TestMsgStoreCode() { var ( msg *types.MsgStoreCode signer string @@ -44,8 +44,8 @@ func (suite *KeeperTestSuite) TestMsgStoreCode() { func() { msg = types.NewMsgStoreCode(signer, data) - _, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(suite.chainA.GetContext(), msg) - suite.Require().NoError(err) + _, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(s.chainA.GetContext(), msg) + s.Require().NoError(err) }, types.ErrWasmCodeExists, }, @@ -73,7 +73,7 @@ func (suite *KeeperTestSuite) TestMsgStoreCode() { { "fails with unauthorized signer", func() { - signer = suite.chainA.SenderAccount.GetAddress().String() + signer = s.chainA.SenderAccount.GetAddress().String() msg = types.NewMsgStoreCode(signer, data) }, ibcerrors.ErrUnauthorized, @@ -83,7 +83,7 @@ func (suite *KeeperTestSuite) TestMsgStoreCode() { func() { msg = types.NewMsgStoreCode(signer, data) - suite.mockVM.PinFn = func(_ wasmvm.Checksum) error { + s.mockVM.PinFn = func(_ wasmvm.Checksum) error { return wasmtesting.ErrMockVM } }, @@ -92,22 +92,22 @@ func (suite *KeeperTestSuite) TestMsgStoreCode() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() signer = authtypes.NewModuleAddress(govtypes.ModuleName).String() data = wasmtesting.Code tc.malleate() - ctx := suite.chainA.GetContext() - res, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(ctx, msg) + ctx := s.chainA.GetContext() + res, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(ctx, msg) events := ctx.EventManager().Events() if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) - suite.Require().NotEmpty(res.Checksum) + s.Require().NoError(err) + s.Require().NotNil(res) + s.Require().NotEmpty(res.Checksum) // Verify events expectedEvents := sdk.Events{ @@ -122,20 +122,20 @@ func (suite *KeeperTestSuite) TestMsgStoreCode() { } for _, evt := range expectedEvents { - suite.Require().Contains(events, evt) + s.Require().Contains(events, evt) } } else { - suite.Require().Contains(err.Error(), tc.expError.Error()) - suite.Require().Nil(res) - suite.Require().Empty(events) + s.Require().Contains(err.Error(), tc.expError.Error()) + s.Require().Nil(res) + s.Require().Empty(events) } }) } } -func (suite *KeeperTestSuite) TestMsgMigrateContract() { +func (s *KeeperTestSuite) TestMsgMigrateContract() { oldChecksum, err := types.CreateChecksum(wasmtesting.Code) - suite.Require().NoError(err) + s.Require().NoError(err) newByteCode := wasmtesting.CreateMockContract([]byte("MockByteCode-TestMsgMigrateContract")) @@ -157,9 +157,9 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { func() { msg = types.NewMsgMigrateContract(govAcc, defaultWasmClientID, newChecksum, []byte("{}")) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } @@ -171,16 +171,16 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { func() { msg = types.NewMsgMigrateContract(govAcc, defaultWasmClientID, newChecksum, []byte("{}")) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { // the checksum written in the client state will later be overwritten by the message server. - expClientStateBz := wasmtesting.CreateMockClientStateBz(suite.chainA.App.AppCodec(), []byte("invalid checksum")) + expClientStateBz := wasmtesting.CreateMockClientStateBz(s.chainA.App.AppCodec(), []byte("invalid checksum")) var ok bool - expClientState, ok = clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), expClientStateBz).(*types.ClientState) - suite.Require().True(ok) + expClientState, ok = clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), expClientStateBz).(*types.ClientState) + s.Require().True(ok) store.Set(host.ClientStateKey(), expClientStateBz) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } @@ -192,7 +192,7 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { func() { msg = types.NewMsgMigrateContract(govAcc, defaultWasmClientID, oldChecksum, []byte("{}")) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { panic("unreachable") } }, @@ -201,7 +201,7 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { { "failure: unauthorized signer", func() { - msg = types.NewMsgMigrateContract(suite.chainA.SenderAccount.GetAddress().String(), defaultWasmClientID, newChecksum, []byte("{}")) + msg = types.NewMsgMigrateContract(s.chainA.SenderAccount.GetAddress().String(), defaultWasmClientID, newChecksum, []byte("{}")) }, ibcerrors.ErrUnauthorized, }, @@ -224,7 +224,7 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { func() { msg = types.NewMsgMigrateContract(govAcc, defaultWasmClientID, newChecksum, []byte("{}")) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, wasmtesting.DefaultGasUsed, wasmtesting.ErrMockVM } }, @@ -235,7 +235,7 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { func() { msg = types.NewMsgMigrateContract(govAcc, defaultWasmClientID, newChecksum, []byte("{}")) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, wasmtesting.DefaultGasUsed, nil } }, @@ -246,12 +246,12 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { func() { msg = types.NewMsgMigrateContract(govAcc, defaultWasmClientID, newChecksum, []byte("{}")) - suite.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.MigrateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { // the checksum written in here will be overwritten store.Set(host.ClientStateKey(), []byte("changed client state")) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil } @@ -261,38 +261,38 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { var ok bool - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - _ = suite.storeWasmCode(wasmtesting.Code) - newChecksum = suite.storeWasmCode(newByteCode) + _ = s.storeWasmCode(wasmtesting.Code) + newChecksum = s.storeWasmCode(newByteCode) - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) // this is the old client state expClientState, ok = endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) tc.malleate() - ctx := suite.chainA.GetContext() - res, err := GetSimApp(suite.chainA).WasmClientKeeper.MigrateContract(ctx, msg) + ctx := s.chainA.GetContext() + res, err := GetSimApp(s.chainA).WasmClientKeeper.MigrateContract(ctx, msg) events := ctx.EventManager().Events().ToABCIEvents() if tc.expError == nil { expClientState.Checksum = newChecksum - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) // updated client state clientState, ok := endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - suite.Require().Equal(expClientState, clientState) + s.Require().Equal(expClientState, clientState) // Verify events expectedEvents := sdk.Events{ @@ -309,19 +309,19 @@ func (suite *KeeperTestSuite) TestMsgMigrateContract() { }.ToABCIEvents() for _, evt := range expectedEvents { - suite.Require().Contains(events, evt) + s.Require().Contains(events, evt) } } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } } -func (suite *KeeperTestSuite) TestMsgRemoveChecksum() { +func (s *KeeperTestSuite) TestMsgRemoveChecksum() { checksum, err := types.CreateChecksum(wasmtesting.Code) - suite.Require().NoError(err) + s.Require().NoError(err) govAcc := authtypes.NewModuleAddress(govtypes.ModuleName).String() @@ -354,11 +354,11 @@ func (suite *KeeperTestSuite) TestMsgRemoveChecksum() { for i := range 20 { mockCode := wasmtesting.CreateMockContract([]byte{byte(i)}) checksum, err := types.CreateChecksum(mockCode) - suite.Require().NoError(err) + s.Require().NoError(err) - keeper := GetSimApp(suite.chainA).WasmClientKeeper - err = keeper.GetChecksums().Set(suite.chainA.GetContext(), checksum) - suite.Require().NoError(err) + keeper := GetSimApp(s.chainA).WasmClientKeeper + err = keeper.GetChecksums().Set(s.chainA.GetContext(), checksum) + s.Require().NoError(err) expChecksums = append(expChecksums, checksum) } @@ -375,7 +375,7 @@ func (suite *KeeperTestSuite) TestMsgRemoveChecksum() { { "failure: unauthorized signer", func() { - msg = types.NewMsgRemoveChecksum(suite.chainA.SenderAccount.GetAddress().String(), checksum) + msg = types.NewMsgRemoveChecksum(s.chainA.SenderAccount.GetAddress().String(), checksum) }, ibcerrors.ErrUnauthorized, }, @@ -384,7 +384,7 @@ func (suite *KeeperTestSuite) TestMsgRemoveChecksum() { func() { msg = types.NewMsgRemoveChecksum(govAcc, checksum) - suite.mockVM.UnpinFn = func(_ wasmvm.Checksum) error { + s.mockVM.UnpinFn = func(_ wasmvm.Checksum) error { return wasmtesting.ErrMockVM } }, @@ -393,36 +393,36 @@ func (suite *KeeperTestSuite) TestMsgRemoveChecksum() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - _ = suite.storeWasmCode(wasmtesting.Code) + _ = s.storeWasmCode(wasmtesting.Code) - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - ctx := suite.chainA.GetContext() - res, err := GetSimApp(suite.chainA).WasmClientKeeper.RemoveChecksum(ctx, msg) + ctx := s.chainA.GetContext() + res, err := GetSimApp(s.chainA).WasmClientKeeper.RemoveChecksum(ctx, msg) events := ctx.EventManager().Events().ToABCIEvents() if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) - checksums, err := GetSimApp(suite.chainA).WasmClientKeeper.GetAllChecksums(suite.chainA.GetContext()) - suite.Require().NoError(err) + checksums, err := GetSimApp(s.chainA).WasmClientKeeper.GetAllChecksums(s.chainA.GetContext()) + s.Require().NoError(err) // Check equality of checksums up to order - suite.Require().ElementsMatch(expChecksums, checksums) + s.Require().ElementsMatch(expChecksums, checksums) // Verify events - suite.Require().Len(events, 0) + s.Require().Len(events, 0) } else { - suite.Require().ErrorIs(err, tc.expError) - suite.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) } }) } diff --git a/modules/light-clients/08-wasm/keeper/options_test.go b/modules/light-clients/08-wasm/keeper/options_test.go index 916d5e6bd3f..8723ced3e16 100644 --- a/modules/light-clients/08-wasm/keeper/options_test.go +++ b/modules/light-clients/08-wasm/keeper/options_test.go @@ -25,7 +25,7 @@ func mockErrorStargateQuerier() func(sdk.Context, *wasmvmtypes.StargateQuery) ([ } } -func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { +func (s *KeeperTestSuite) TestNewKeeperWithOptions() { var k keeper.Keeper testCases := []struct { name string @@ -36,22 +36,22 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { "success: no options", func() { k = keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), ) }, func(k keeper.Keeper) { plugins := k.GetQueryPlugins() _, err := plugins.Custom(sdk.Context{}, nil) - suite.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "Custom queries are not allowed"}) + s.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "Custom queries are not allowed"}) _, err = plugins.Stargate(sdk.Context{}, &wasmvmtypes.StargateQuery{}) - suite.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "'' path is not allowed from the contract"}) + s.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "'' path is not allowed from the contract"}) }, }, { @@ -61,12 +61,12 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { Custom: mockErrorCustomQuerier(), }) k = keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), querierOption, ) }, @@ -74,10 +74,10 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { plugins := k.GetQueryPlugins() _, err := plugins.Custom(sdk.Context{}, nil) - suite.Require().ErrorContains(err, "custom querier error for TestNewKeeperWithOptions") + s.Require().ErrorContains(err, "custom querier error for TestNewKeeperWithOptions") _, err = plugins.Stargate(sdk.Context{}, &wasmvmtypes.StargateQuery{}) - suite.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "'' path is not allowed from the contract"}) + s.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "'' path is not allowed from the contract"}) }, }, { @@ -87,12 +87,12 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { Stargate: mockErrorStargateQuerier(), }) k = keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), querierOption, ) }, @@ -100,10 +100,10 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { plugins := k.GetQueryPlugins() _, err := plugins.Custom(sdk.Context{}, nil) - suite.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "Custom queries are not allowed"}) + s.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "Custom queries are not allowed"}) _, err = plugins.Stargate(sdk.Context{}, &wasmvmtypes.StargateQuery{}) - suite.Require().ErrorContains(err, "stargate querier error for TestNewKeeperWithOptions") + s.Require().ErrorContains(err, "stargate querier error for TestNewKeeperWithOptions") }, }, { @@ -114,12 +114,12 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { Stargate: mockErrorStargateQuerier(), }) k = keeper.NewKeeperWithVM( - GetSimApp(suite.chainA).AppCodec(), - runtime.NewKVStoreService(GetSimApp(suite.chainA).GetKey(types.StoreKey)), - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper, - GetSimApp(suite.chainA).WasmClientKeeper.GetAuthority(), - GetSimApp(suite.chainA).WasmClientKeeper.GetVM(), - GetSimApp(suite.chainA).GRPCQueryRouter(), + GetSimApp(s.chainA).AppCodec(), + runtime.NewKVStoreService(GetSimApp(s.chainA).GetKey(types.StoreKey)), + GetSimApp(s.chainA).IBCKeeper.ClientKeeper, + GetSimApp(s.chainA).WasmClientKeeper.GetAuthority(), + GetSimApp(s.chainA).WasmClientKeeper.GetVM(), + GetSimApp(s.chainA).GRPCQueryRouter(), querierOption, ) }, @@ -127,27 +127,26 @@ func (suite *KeeperTestSuite) TestNewKeeperWithOptions() { plugins := k.GetQueryPlugins() _, err := plugins.Custom(sdk.Context{}, nil) - suite.Require().ErrorContains(err, "custom querier error for TestNewKeeperWithOptions") + s.Require().ErrorContains(err, "custom querier error for TestNewKeeperWithOptions") _, err = plugins.Stargate(sdk.Context{}, &wasmvmtypes.StargateQuery{}) - suite.Require().ErrorContains(err, "stargate querier error for TestNewKeeperWithOptions") + s.Require().ErrorContains(err, "stargate querier error for TestNewKeeperWithOptions") }, }, } for _, tc := range testCases { - tc := tc - suite.SetupTest() + s.SetupTest() - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // make sure the default query plugins are set - k.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(suite.chainA).GRPCQueryRouter())) + k.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(s.chainA).GRPCQueryRouter())) tc.malleate() tc.verifyFn(k) // reset query plugins after each test - k.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(suite.chainA).GRPCQueryRouter())) + k.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(s.chainA).GRPCQueryRouter())) }) } } diff --git a/modules/light-clients/08-wasm/keeper/querier_test.go b/modules/light-clients/08-wasm/keeper/querier_test.go index ca35705cfda..7f3ddf8e7a2 100644 --- a/modules/light-clients/08-wasm/keeper/querier_test.go +++ b/modules/light-clients/08-wasm/keeper/querier_test.go @@ -44,7 +44,7 @@ func mockCustomQuerier() func(sdk.Context, json.RawMessage) ([]byte, error) { } } -func (suite *KeeperTestSuite) TestCustomQuery() { +func (s *KeeperTestSuite) TestCustomQuery() { testCases := []struct { name string malleate func() @@ -57,28 +57,28 @@ func (suite *KeeperTestSuite) TestCustomQuery() { Custom: mockCustomQuerier(), } - GetSimApp(suite.chainA).WasmClientKeeper.SetQueryPlugins(querierPlugin) - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + GetSimApp(s.chainA).WasmClientKeeper.SetQueryPlugins(querierPlugin) + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { echo := CustomQuery{ Echo: &QueryEcho{ Data: "hello world", }, } echoJSON, err := json.Marshal(echo) - suite.Require().NoError(err) + s.Require().NoError(err) resp, err := querier.Query(wasmvmtypes.QueryRequest{ Custom: json.RawMessage(echoJSON), }, math.MaxUint64) - suite.Require().NoError(err) + s.Require().NoError(err) var respData string err = json.Unmarshal(resp, &respData) - suite.Require().NoError(err) - suite.Require().Equal("hello world", respData) + s.Require().NoError(err) + s.Require().Equal("hello world", respData) resp, err = json.Marshal(types.StatusResult{Status: exported.Active.String()}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) @@ -88,10 +88,10 @@ func (suite *KeeperTestSuite) TestCustomQuery() { { "failure: default query", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := querier.Query(wasmvmtypes.QueryRequest{Custom: json.RawMessage("{}")}, math.MaxUint64) - suite.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "Custom queries are not allowed"}) - suite.Require().Nil(resp) + s.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: "Custom queries are not allowed"}) + s.Require().Nil(resp) return nil, wasmtesting.DefaultGasUsed, err }) @@ -101,39 +101,39 @@ func (suite *KeeperTestSuite) TestCustomQuery() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - _ = suite.storeWasmCode(wasmtesting.Code) + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() + _ = s.storeWasmCode(wasmtesting.Code) - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) clientState, ok := endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - res, err := wasmClientKeeper.WasmQuery(suite.chainA.GetContext(), endpoint.ClientID, clientStore, clientState, types.QueryMsg{Status: &types.StatusMsg{}}) + res, err := wasmClientKeeper.WasmQuery(s.chainA.GetContext(), endpoint.ClientID, clientStore, clientState, types.QueryMsg{Status: &types.StatusMsg{}}) if tc.expError == nil { - suite.Require().Nil(err) - suite.Require().NotNil(res) + s.Require().Nil(err) + s.Require().NotNil(res) } else { - suite.Require().Nil(res) - suite.Require().ErrorIs(err, tc.expError) + s.Require().Nil(res) + s.Require().ErrorIs(err, tc.expError) } // reset query plugins after each test - wasmClientKeeper.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(suite.chainA).GRPCQueryRouter())) + wasmClientKeeper.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(s.chainA).GRPCQueryRouter())) }) } } -func (suite *KeeperTestSuite) TestStargateQuery() { +func (s *KeeperTestSuite) TestStargateQuery() { typeURL := "/ibc.lightclients.wasm.v1.Query/Checksums" var ( @@ -154,15 +154,15 @@ func (suite *KeeperTestSuite) TestStargateQuery() { "success: custom query", func() { querierPlugin := keeper.QueryPlugins{ - Stargate: keeper.AcceptListStargateQuerier([]string{typeURL}, GetSimApp(suite.chainA).GRPCQueryRouter()), + Stargate: keeper.AcceptListStargateQuerier([]string{typeURL}, GetSimApp(s.chainA).GRPCQueryRouter()), } - GetSimApp(suite.chainA).WasmClientKeeper.SetQueryPlugins(querierPlugin) + GetSimApp(s.chainA).WasmClientKeeper.SetQueryPlugins(querierPlugin) - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { queryRequest := types.QueryChecksumsRequest{} bz, err := queryRequest.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) resp, err := querier.Query(wasmvmtypes.QueryRequest{ Stargate: &wasmvmtypes.StargateQuery{ @@ -170,21 +170,21 @@ func (suite *KeeperTestSuite) TestStargateQuery() { Data: bz, }, }, math.MaxUint64) - suite.Require().NoError(err) + s.Require().NoError(err) var respData types.QueryChecksumsResponse err = respData.Unmarshal(resp) - suite.Require().NoError(err) + s.Require().NoError(err) expChecksum := hex.EncodeToString(checksum) - suite.Require().Len(respData.Checksums, 1) - suite.Require().Equal(expChecksum, respData.Checksums[0]) + s.Require().Len(respData.Checksums, 1) + s.Require().Equal(expChecksum, respData.Checksums[0]) store.Set(testKey, value) result, err := json.Marshal(types.TimestampAtHeightResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: result}, wasmtesting.DefaultGasUsed, nil }) @@ -201,22 +201,22 @@ func (suite *KeeperTestSuite) TestStargateQuery() { "success: verify membership query", func() { querierPlugin := keeper.QueryPlugins{ - Stargate: keeper.AcceptListStargateQuerier([]string{""}, GetSimApp(suite.chainA).GRPCQueryRouter()), + Stargate: keeper.AcceptListStargateQuerier([]string{""}, GetSimApp(s.chainA).GRPCQueryRouter()), } - GetSimApp(suite.chainA).WasmClientKeeper.SetQueryPlugins(querierPlugin) + GetSimApp(s.chainA).WasmClientKeeper.SetQueryPlugins(querierPlugin) - store := suite.chainA.GetContext().KVStore(GetSimApp(suite.chainA).GetKey(exported.StoreKey)) + store := s.chainA.GetContext().KVStore(GetSimApp(s.chainA).GetKey(exported.StoreKey)) store.Set(proofKey, value) - suite.coordinator.CommitBlock(suite.chainA) - proof, proofHeight := endpoint.QueryProofAtHeight(proofKey, uint64(suite.chainA.GetContext().BlockHeight())) + s.coordinator.CommitBlock(s.chainA) + proof, proofHeight := endpoint.QueryProofAtHeight(proofKey, uint64(s.chainA.GetContext().BlockHeight())) merklePath := commitmenttypes.NewMerklePath(proofKey) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chainA.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chainA.GetPrefix(), merklePath) + s.Require().NoError(err) - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { queryRequest := clienttypes.QueryVerifyMembershipRequest{ ClientId: endpoint.ClientID, Proof: proof, @@ -226,7 +226,7 @@ func (suite *KeeperTestSuite) TestStargateQuery() { } bz, err := queryRequest.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) resp, err := querier.Query(wasmvmtypes.QueryRequest{ Stargate: &wasmvmtypes.StargateQuery{ @@ -234,37 +234,37 @@ func (suite *KeeperTestSuite) TestStargateQuery() { Data: bz, }, }, math.MaxUint64) - suite.Require().NoError(err) + s.Require().NoError(err) var respData clienttypes.QueryVerifyMembershipResponse err = respData.Unmarshal(resp) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().True(respData.Success) + s.Require().True(respData.Success) result, err := json.Marshal(types.TimestampAtHeightResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: result}, wasmtesting.DefaultGasUsed, nil }) - suite.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, + s.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) var merkleProof commitmenttypes.MerkleProof - err = suite.chainA.Codec.Unmarshal(payload.VerifyMembership.Proof, &merkleProof) - suite.Require().NoError(err) + err = s.chainA.Codec.Unmarshal(payload.VerifyMembership.Proof, &merkleProof) + s.Require().NoError(err) - root := commitmenttypes.NewMerkleRoot(suite.chainA.App.LastCommitID().Hash) + root := commitmenttypes.NewMerkleRoot(s.chainA.App.LastCommitID().Hash) err = merkleProof.VerifyMembership(commitmenttypes.GetSDKSpecs(), root, merklePath, payload.VerifyMembership.Value) - suite.Require().NoError(err) + s.Require().NoError(err) bz, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) expDiscardedState = true store.Set(testKey, value) @@ -277,10 +277,10 @@ func (suite *KeeperTestSuite) TestStargateQuery() { { "failure: default querier", func() { - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, querier wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { queryRequest := types.QueryChecksumsRequest{} bz, err := queryRequest.Marshal() - suite.Require().NoError(err) + s.Require().NoError(err) resp, err := querier.Query(wasmvmtypes.QueryRequest{ Stargate: &wasmvmtypes.StargateQuery{ @@ -288,8 +288,8 @@ func (suite *KeeperTestSuite) TestStargateQuery() { Data: bz, }, }, math.MaxUint64) - suite.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: fmt.Sprintf("'%s' path is not allowed from the contract", typeURL)}) - suite.Require().Nil(resp) + s.Require().ErrorIs(err, wasmvmtypes.UnsupportedRequest{Kind: fmt.Sprintf("'%s' path is not allowed from the contract", typeURL)}) + s.Require().Nil(resp) store.Set(testKey, value) @@ -301,18 +301,18 @@ func (suite *KeeperTestSuite) TestStargateQuery() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { expDiscardedState = false - suite.SetupWasmWithMockVM() - checksum = suite.storeWasmCode(wasmtesting.Code) + s.SetupWasmWithMockVM() + checksum = s.storeWasmCode(wasmtesting.Code) - endpoint = wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint = wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) tc.malleate() - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper payload := types.QueryMsg{ TimestampAtHeight: &types.TimestampAtHeightMsg{ @@ -320,33 +320,33 @@ func (suite *KeeperTestSuite) TestStargateQuery() { }, } - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) clientState, ok := endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) // NOTE: we register query callbacks against: types.TimestampAtHeightMsg{} // in practise, this can against any client state msg, however registering against types.StatusMsg{} introduces recursive loops // due to test case: "success: verify membership query" - res, err := wasmClientKeeper.WasmQuery(suite.chainA.GetContext(), endpoint.ClientID, clientStore, clientState, payload) + res, err := wasmClientKeeper.WasmQuery(s.chainA.GetContext(), endpoint.ClientID, clientStore, clientState, payload) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) } else { - suite.Require().Nil(res) + s.Require().Nil(res) // use error contains as wasmvm errors do not implement errors.Is method - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) } - clientStore = suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) + clientStore = s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) if expDiscardedState { - suite.Require().False(clientStore.Has(testKey)) + s.Require().False(clientStore.Has(testKey)) } else { - suite.Require().True(clientStore.Has(testKey)) + s.Require().True(clientStore.Has(testKey)) } // reset query plugins after each test - wasmClientKeeper.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(suite.chainA).GRPCQueryRouter())) + wasmClientKeeper.SetQueryPlugins(keeper.NewDefaultQueryPlugins(GetSimApp(s.chainA).GRPCQueryRouter())) }) } } diff --git a/modules/light-clients/08-wasm/keeper/snapshotter.go b/modules/light-clients/08-wasm/keeper/snapshotter.go index b24c2b369d9..6ce6c9e5bc1 100644 --- a/modules/light-clients/08-wasm/keeper/snapshotter.go +++ b/modules/light-clients/08-wasm/keeper/snapshotter.go @@ -2,6 +2,7 @@ package keeper import ( "encoding/hex" + "errors" "io" errorsmod "cosmossdk.io/errors" @@ -131,7 +132,7 @@ func (ws *WasmSnapshotter) processAllItems( ctx := sdk.NewContext(ws.cms, cmtproto.Header{Height: int64(height)}, false, nil) for { payload, err := payloadReader() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } else if err != nil { return err diff --git a/modules/light-clients/08-wasm/keeper/snapshotter_test.go b/modules/light-clients/08-wasm/keeper/snapshotter_test.go index 0c225836db5..3ca22497632 100644 --- a/modules/light-clients/08-wasm/keeper/snapshotter_test.go +++ b/modules/light-clients/08-wasm/keeper/snapshotter_test.go @@ -14,9 +14,9 @@ import ( "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" ) -func (suite *KeeperTestSuite) TestSnapshotter() { +func (s *KeeperTestSuite) TestSnapshotter() { gzippedContract, err := types.GzipIt(wasmtesting.CreateMockContract([]byte("gzipped-contract"))) - suite.Require().NoError(err) + s.Require().NoError(err) testCases := []struct { name string @@ -33,11 +33,9 @@ func (suite *KeeperTestSuite) TestSnapshotter() { } for _, tc := range testCases { - tc := tc - - suite.Run(tc.name, func() { - t := suite.T() - wasmClientApp := suite.SetupSnapshotterWithMockVM() + s.Run(tc.name, func() { + t := s.T() + wasmClientApp := s.SetupSnapshotterWithMockVM() ctx := wasmClientApp.NewUncachedContext(false, cmtproto.Header{ ChainID: "foo", @@ -53,26 +51,26 @@ func (suite *KeeperTestSuite) TestSnapshotter() { msg := types.NewMsgStoreCode(signer, contract) res, err := wasmClientApp.WasmClientKeeper.StoreCode(ctx, msg) - suite.Require().NoError(err) + s.Require().NoError(err) checksums = append(checksums, res.Checksum) srcChecksumCodes = append(srcChecksumCodes, res.Checksum...) - suite.Require().NoError(err) + s.Require().NoError(err) } // create snapshot res, err := wasmClientApp.Commit() - suite.Require().NoError(err) - suite.Require().NotNil(res) + s.Require().NoError(err) + s.Require().NotNil(res) snapshotHeight := uint64(wasmClientApp.LastBlockHeight()) snapshot, err := wasmClientApp.SnapshotManager().Create(snapshotHeight) - suite.Require().NoError(err) - suite.Require().NotNil(snapshot) + s.Require().NoError(err) + s.Require().NotNil(snapshot) // setup dest app with snapshot imported - destWasmClientApp := simapp.SetupWithEmptyStore(t, suite.mockVM) + destWasmClientApp := simapp.SetupWithEmptyStore(t, s.mockVM) destCtx := destWasmClientApp.NewUncachedContext(false, cmtproto.Header{ ChainID: "bar", Height: destWasmClientApp.LastBlockHeight() + 1, @@ -80,17 +78,17 @@ func (suite *KeeperTestSuite) TestSnapshotter() { }) resp, err := destWasmClientApp.WasmClientKeeper.Checksums(destCtx, &types.QueryChecksumsRequest{}) - suite.Require().NoError(err) - suite.Require().Empty(resp.Checksums) + s.Require().NoError(err) + s.Require().Empty(resp.Checksums) - suite.Require().NoError(destWasmClientApp.SnapshotManager().Restore(*snapshot)) + s.Require().NoError(destWasmClientApp.SnapshotManager().Restore(*snapshot)) - for i := uint32(0); i < snapshot.Chunks; i++ { + for i := range snapshot.Chunks { chunkBz, err := wasmClientApp.SnapshotManager().LoadChunk(snapshot.Height, snapshot.Format, i) - suite.Require().NoError(err) + s.Require().NoError(err) end, err := destWasmClientApp.SnapshotManager().RestoreChunk(chunkBz) - suite.Require().NoError(err) + s.Require().NoError(err) if end { break @@ -107,15 +105,15 @@ func (suite *KeeperTestSuite) TestSnapshotter() { for _, checksum := range checksums { resp, err := destWasmClientApp.WasmClientKeeper.Code(ctx, &types.QueryCodeRequest{Checksum: hex.EncodeToString(checksum)}) - suite.Require().NoError(err) + s.Require().NoError(err) checksum, err := types.CreateChecksum(resp.Data) - suite.Require().NoError(err) + s.Require().NoError(err) allDestAppChecksumsInWasmVMStore = append(allDestAppChecksumsInWasmVMStore, checksum...) } - suite.Require().Equal(srcChecksumCodes, allDestAppChecksumsInWasmVMStore) + s.Require().Equal(srcChecksumCodes, allDestAppChecksumsInWasmVMStore) }) } } diff --git a/modules/light-clients/08-wasm/light_client_module_test.go b/modules/light-clients/08-wasm/light_client_module_test.go index cc574f95976..ef890ae6fd9 100644 --- a/modules/light-clients/08-wasm/light_client_module_test.go +++ b/modules/light-clients/08-wasm/light_client_module_test.go @@ -32,7 +32,7 @@ const ( unusedWasmClientID = "08-wasm-100" ) -func (suite *WasmTestSuite) TestStatus() { +func (s *WasmTestSuite) TestStatus() { var clientID string testCases := []struct { @@ -48,9 +48,9 @@ func (suite *WasmTestSuite) TestStatus() { { "client is frozen", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.StatusResult{Status: exported.Frozen.String()}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) }, @@ -59,9 +59,9 @@ func (suite *WasmTestSuite) TestStatus() { { "client status is expired", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.StatusResult{Status: exported.Expired.String()}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) }, @@ -70,7 +70,7 @@ func (suite *WasmTestSuite) TestStatus() { { "client status is unknown: vm returns an error", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return nil, 0, wasmtesting.ErrMockContract }) }, @@ -79,9 +79,9 @@ func (suite *WasmTestSuite) TestStatus() { { "client status is unauthorized: checksum is not stored", func() { - wasmClientKeeper := GetSimApp(suite.chainA).WasmClientKeeper - err := wasmClientKeeper.GetChecksums().Remove(suite.chainA.GetContext(), suite.checksum) - suite.Require().NoError(err) + wasmClientKeeper := GetSimApp(s.chainA).WasmClientKeeper + err := wasmClientKeeper.GetChecksums().Remove(s.chainA.GetContext(), s.checksum) + s.Require().NoError(err) }, exported.Unauthorized, }, @@ -95,7 +95,7 @@ func (suite *WasmTestSuite) TestStatus() { { "failure: response fails to unmarshal", func() { - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Ok: []byte("invalid json")}, wasmtesting.DefaultGasUsed, nil }) }, @@ -104,28 +104,26 @@ func (suite *WasmTestSuite) TestStatus() { } for _, tc := range testCases { - tc := tc + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() - - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - status := lightClientModule.Status(suite.chainA.GetContext(), clientID) - suite.Require().Equal(tc.expStatus, status) + status := lightClientModule.Status(s.chainA.GetContext(), clientID) + s.Require().Equal(tc.expStatus, status) }) } } -func (suite *WasmTestSuite) TestTimestampAtHeight() { +func (s *WasmTestSuite) TestTimestampAtHeight() { var ( clientID string height exported.Height @@ -140,18 +138,18 @@ func (suite *WasmTestSuite) TestTimestampAtHeight() { { "success", func() { - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { var payload types.QueryMsg err := json.Unmarshal(queryMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(payload.TimestampAtHeight) - suite.Require().Nil(payload.CheckForMisbehaviour) - suite.Require().Nil(payload.Status) - suite.Require().Nil(payload.VerifyClientMessage) + s.Require().NotNil(payload.TimestampAtHeight) + s.Require().Nil(payload.CheckForMisbehaviour) + s.Require().Nil(payload.Status) + s.Require().Nil(payload.VerifyClientMessage) resp, err := json.Marshal(types.TimestampAtHeightResult{Timestamp: expectedTimestamp}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) @@ -161,7 +159,7 @@ func (suite *WasmTestSuite) TestTimestampAtHeight() { { "failure: vm returns error", func() { - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM }) }, @@ -170,7 +168,7 @@ func (suite *WasmTestSuite) TestTimestampAtHeight() { { "failure: contract returns error", func() { - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil }) }, @@ -193,7 +191,7 @@ func (suite *WasmTestSuite) TestTimestampAtHeight() { { "failure: response fails to unmarshal", func() { - suite.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.TimestampAtHeightMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Ok: []byte("invalid json")}, wasmtesting.DefaultGasUsed, nil }) }, @@ -202,39 +200,38 @@ func (suite *WasmTestSuite) TestTimestampAtHeight() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID clientState, ok := endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) height = clientState.LatestHeight - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - timestamp, err := lightClientModule.TimestampAtHeight(suite.chainA.GetContext(), clientID, height) + timestamp, err := lightClientModule.TimestampAtHeight(s.chainA.GetContext(), clientID, height) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(expectedTimestamp, timestamp) + s.Require().NoError(err) + s.Require().Equal(expectedTimestamp, timestamp) } else { - suite.Require().ErrorIs(err, tc.expErr) - suite.Require().Equal(uint64(0), timestamp) + s.Require().ErrorIs(err, tc.expErr) + s.Require().Equal(uint64(0), timestamp) } }) } } -func (suite *WasmTestSuite) TestInitialize() { +func (s *WasmTestSuite) TestInitialize() { var ( consensusState exported.ConsensusState clientState exported.ClientState @@ -253,26 +250,26 @@ func (suite *WasmTestSuite) TestInitialize() { { "success: validate contract address", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, env wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, env wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.InstantiateMessage err := json.Unmarshal(initMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().Equal(env.Contract.Address, wasmClientID) + s.Require().Equal(env.Contract.Address, wasmClientID) - wrappedClientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) - suite.Require().True(ok) + wrappedClientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) + s.Require().True(ok) clientState := types.NewClientState(payload.ClientState, payload.Checksum, wrappedClientState.LatestHeight) - clientStateBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) + clientStateBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) store.Set(host.ClientStateKey(), clientStateBz) consensusState := types.NewConsensusState(payload.ConsensusState) - consensusStateBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), consensusState) + consensusStateBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), consensusState) store.Set(host.ConsensusStateKey(clientState.LatestHeight), consensusStateBz) resp, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, 0, nil } @@ -311,7 +308,7 @@ func (suite *WasmTestSuite) TestInitialize() { { "failure: vm returns error", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM } }, @@ -320,7 +317,7 @@ func (suite *WasmTestSuite) TestInitialize() { { "failure: contract returns error", func() { - suite.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ wasmvmtypes.MessageInfo, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil } }, @@ -329,37 +326,37 @@ func (suite *WasmTestSuite) TestInitialize() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - wrappedClientStateBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), wasmtesting.MockTendermitClientState) - wrappedClientConsensusStateBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientConsensusState) - clientState = types.NewClientState(wrappedClientStateBz, suite.checksum, wasmtesting.MockTendermitClientState.LatestHeight) + wrappedClientStateBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), wasmtesting.MockTendermitClientState) + wrappedClientConsensusStateBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientConsensusState) + clientState = types.NewClientState(wrappedClientStateBz, s.checksum, wasmtesting.MockTendermitClientState.LatestHeight) consensusState = types.NewConsensusState(wrappedClientConsensusStateBz) - clientID := suite.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(suite.chainA.GetContext(), clientState.ClientType()) + clientID := s.chainA.App.GetIBCKeeper().ClientKeeper.GenerateClientIdentifier(s.chainA.GetContext(), clientState.ClientType()) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() // Marshal client state and consensus state: - clientStateBz := suite.chainA.Codec.MustMarshal(clientState) - consensusStateBz := suite.chainA.Codec.MustMarshal(consensusState) + clientStateBz := s.chainA.Codec.MustMarshal(clientState) + consensusStateBz := s.chainA.Codec.MustMarshal(consensusState) - err = lightClientModule.Initialize(suite.chainA.GetContext(), clientID, clientStateBz, consensusStateBz) + err = lightClientModule.Initialize(s.chainA.GetContext(), clientID, clientStateBz, consensusStateBz) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorContains(err, tc.expError.Error()) + s.Require().ErrorContains(err, tc.expError.Error()) } }) } } -func (suite *WasmTestSuite) TestVerifyMembership() { +func (s *WasmTestSuite) TestVerifyMembership() { var ( clientState *types.ClientState expClientStateBz []byte @@ -378,16 +375,16 @@ func (suite *WasmTestSuite) TestVerifyMembership() { { "success", func() { - expClientStateBz = clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) - suite.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, _ wasmvm.KVStore, + expClientStateBz = clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) + s.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) bz, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: bz}}, wasmtesting.DefaultGasUsed, nil }) @@ -397,27 +394,27 @@ func (suite *WasmTestSuite) TestVerifyMembership() { { "success: with update client state", func() { - suite.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, + s.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) - - suite.Require().NotNil(payload.VerifyMembership) - suite.Require().Nil(payload.UpdateState) - suite.Require().Nil(payload.UpdateStateOnMisbehaviour) - suite.Require().Nil(payload.VerifyNonMembership) - suite.Require().Nil(payload.VerifyUpgradeAndUpdateState) - suite.Require().Equal(proofHeight, payload.VerifyMembership.Height) - suite.Require().Equal(proof, payload.VerifyMembership.Proof) - suite.Require().Equal(path, payload.VerifyMembership.Path) - suite.Require().Equal(value, payload.VerifyMembership.Value) + s.Require().NoError(err) + + s.Require().NotNil(payload.VerifyMembership) + s.Require().Nil(payload.UpdateState) + s.Require().Nil(payload.UpdateStateOnMisbehaviour) + s.Require().Nil(payload.VerifyNonMembership) + s.Require().Nil(payload.VerifyUpgradeAndUpdateState) + s.Require().Equal(proofHeight, payload.VerifyMembership.Height) + s.Require().Equal(proof, payload.VerifyMembership.Proof) + s.Require().Equal(path, payload.VerifyMembership.Path) + s.Require().Equal(value, payload.VerifyMembership.Value) bz, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) - expClientStateBz = wasmtesting.CreateMockClientStateBz(suite.chainA.Codec, suite.checksum) + expClientStateBz = wasmtesting.CreateMockClientStateBz(s.chainA.Codec, s.checksum) store.Set(host.ClientStateKey(), expClientStateBz) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: bz}}, wasmtesting.DefaultGasUsed, nil @@ -437,7 +434,7 @@ func (suite *WasmTestSuite) TestVerifyMembership() { func() { proof = wasmtesting.MockInvalidProofBz - suite.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, + s.mockVM.RegisterSudoCallback(types.VerifyMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: commitmenttypes.ErrInvalidProof.Error()}, wasmtesting.DefaultGasUsed, nil @@ -469,13 +466,13 @@ func (suite *WasmTestSuite) TestVerifyMembership() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { var ok bool - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID path = commitmenttypes.NewMerklePath([]byte("/ibc/key/path")) @@ -484,28 +481,28 @@ func (suite *WasmTestSuite) TestVerifyMembership() { value = []byte("value") clientState, ok = endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyMembership(suite.chainA.GetContext(), clientID, proofHeight, 0, 0, proof, path, value) + err = lightClientModule.VerifyMembership(s.chainA.GetContext(), clientID, proofHeight, 0, 0, proof, path, value) if tc.expError == nil { - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) - suite.Require().NoError(err) - suite.Require().Equal(expClientStateBz, clientStore.Get(host.ClientStateKey())) + s.Require().NoError(err) + s.Require().Equal(expClientStateBz, clientStore.Get(host.ClientStateKey())) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *WasmTestSuite) TestVerifyNonMembership() { +func (s *WasmTestSuite) TestVerifyNonMembership() { var ( clientState *types.ClientState expClientStateBz []byte @@ -523,25 +520,25 @@ func (suite *WasmTestSuite) TestVerifyNonMembership() { { "success", func() { - expClientStateBz = clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) - suite.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, _ wasmvm.KVStore, + expClientStateBz = clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) + s.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(payload.VerifyNonMembership) - suite.Require().Nil(payload.UpdateState) - suite.Require().Nil(payload.UpdateStateOnMisbehaviour) - suite.Require().Nil(payload.VerifyMembership) - suite.Require().Nil(payload.VerifyUpgradeAndUpdateState) - suite.Require().Equal(proofHeight, payload.VerifyNonMembership.Height) - suite.Require().Equal(proof, payload.VerifyNonMembership.Proof) - suite.Require().Equal(path, payload.VerifyNonMembership.Path) + s.Require().NotNil(payload.VerifyNonMembership) + s.Require().Nil(payload.UpdateState) + s.Require().Nil(payload.UpdateStateOnMisbehaviour) + s.Require().Nil(payload.VerifyMembership) + s.Require().Nil(payload.VerifyUpgradeAndUpdateState) + s.Require().Equal(proofHeight, payload.VerifyNonMembership.Height) + s.Require().Equal(proof, payload.VerifyNonMembership.Proof) + s.Require().Equal(path, payload.VerifyNonMembership.Path) bz, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: bz}}, wasmtesting.DefaultGasUsed, nil }) @@ -551,26 +548,26 @@ func (suite *WasmTestSuite) TestVerifyNonMembership() { { "success: with update client state", func() { - suite.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, + s.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(payload.VerifyNonMembership) - suite.Require().Nil(payload.UpdateState) - suite.Require().Nil(payload.UpdateStateOnMisbehaviour) - suite.Require().Nil(payload.VerifyMembership) - suite.Require().Nil(payload.VerifyUpgradeAndUpdateState) - suite.Require().Equal(proofHeight, payload.VerifyNonMembership.Height) - suite.Require().Equal(proof, payload.VerifyNonMembership.Proof) - suite.Require().Equal(path, payload.VerifyNonMembership.Path) + s.Require().NotNil(payload.VerifyNonMembership) + s.Require().Nil(payload.UpdateState) + s.Require().Nil(payload.UpdateStateOnMisbehaviour) + s.Require().Nil(payload.VerifyMembership) + s.Require().Nil(payload.VerifyUpgradeAndUpdateState) + s.Require().Equal(proofHeight, payload.VerifyNonMembership.Height) + s.Require().Equal(proof, payload.VerifyNonMembership.Proof) + s.Require().Equal(path, payload.VerifyNonMembership.Path) bz, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) - expClientStateBz = wasmtesting.CreateMockClientStateBz(suite.chainA.Codec, suite.checksum) + expClientStateBz = wasmtesting.CreateMockClientStateBz(s.chainA.Codec, s.checksum) store.Set(host.ClientStateKey(), expClientStateBz) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: bz}}, wasmtesting.DefaultGasUsed, nil @@ -590,7 +587,7 @@ func (suite *WasmTestSuite) TestVerifyNonMembership() { func() { proof = wasmtesting.MockInvalidProofBz - suite.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, + s.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { return nil, wasmtesting.DefaultGasUsed, wasmtesting.ErrMockVM @@ -603,7 +600,7 @@ func (suite *WasmTestSuite) TestVerifyNonMembership() { func() { proof = wasmtesting.MockInvalidProofBz - suite.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, + s.mockVM.RegisterSudoCallback(types.VerifyNonMembershipMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction, ) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: commitmenttypes.ErrInvalidProof.Error()}, wasmtesting.DefaultGasUsed, nil @@ -635,13 +632,13 @@ func (suite *WasmTestSuite) TestVerifyNonMembership() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { var ok bool - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID path = commitmenttypes.NewMerklePath([]byte("/ibc/key/path")) @@ -649,28 +646,28 @@ func (suite *WasmTestSuite) TestVerifyNonMembership() { proofHeight = clienttypes.NewHeight(0, 1) clientState, ok = endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyNonMembership(suite.chainA.GetContext(), clientID, proofHeight, 0, 0, proof, path) + err = lightClientModule.VerifyNonMembership(s.chainA.GetContext(), clientID, proofHeight, 0, 0, proof, path) if tc.expError == nil { - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), clientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), clientID) - suite.Require().NoError(err) - suite.Require().Equal(expClientStateBz, clientStore.Get(host.ClientStateKey())) + s.Require().NoError(err) + s.Require().Equal(expClientStateBz, clientStore.Get(host.ClientStateKey())) } else { - suite.Require().ErrorIs(err, tc.expError) + s.Require().ErrorIs(err, tc.expError) } }) } } -func (suite *WasmTestSuite) TestVerifyClientMessage() { +func (s *WasmTestSuite) TestVerifyClientMessage() { var ( clientMsg exported.ClientMessage clientID string @@ -684,22 +681,22 @@ func (suite *WasmTestSuite) TestVerifyClientMessage() { { "success: valid misbehaviour", func() { - suite.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { var msg *types.QueryMsg err := json.Unmarshal(queryMsg, &msg) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(msg.VerifyClientMessage) - suite.Require().NotNil(msg.VerifyClientMessage.ClientMessage) - suite.Require().Nil(msg.Status) - suite.Require().Nil(msg.CheckForMisbehaviour) - suite.Require().Nil(msg.TimestampAtHeight) + s.Require().NotNil(msg.VerifyClientMessage) + s.Require().NotNil(msg.VerifyClientMessage.ClientMessage) + s.Require().Nil(msg.Status) + s.Require().Nil(msg.CheckForMisbehaviour) + s.Require().Nil(msg.TimestampAtHeight) - suite.Require().Equal(env.Contract.Address, wasmClientID) + s.Require().Equal(env.Contract.Address, wasmClientID) resp, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) @@ -718,9 +715,9 @@ func (suite *WasmTestSuite) TestVerifyClientMessage() { func() { clientMsg = &ibctm.Header{} - suite.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) @@ -730,7 +727,7 @@ func (suite *WasmTestSuite) TestVerifyClientMessage() { { "failure: error return from vm", func() { - suite.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM }) }, @@ -739,7 +736,7 @@ func (suite *WasmTestSuite) TestVerifyClientMessage() { { "failure: error return from contract", func() { - suite.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.VerifyClientMessageMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, queryMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil }) }, @@ -748,36 +745,36 @@ func (suite *WasmTestSuite) TestVerifyClientMessage() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite to create fresh application state - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID clientMsg = &types.ClientMessage{ - Data: clienttypes.MustMarshalClientMessage(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientHeader), + Data: clienttypes.MustMarshalClientMessage(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientHeader), } - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.VerifyClientMessage(suite.chainA.GetContext(), clientID, clientMsg) + err = lightClientModule.VerifyClientMessage(s.chainA.GetContext(), clientID, clientMsg) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { +func (s *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { var ( upgradedClient exported.ClientState upgradedConsState exported.ConsensusState @@ -794,37 +791,37 @@ func (suite *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { { "success: successful upgrade", func() { - suite.mockVM.RegisterSudoCallback(types.VerifyUpgradeAndUpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.VerifyUpgradeAndUpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) expectedUpgradedClient, ok := upgradedClient.(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) expectedUpgradedConsensus, ok := upgradedConsState.(*types.ConsensusState) - suite.Require().True(ok) + s.Require().True(ok) // verify payload values - suite.Require().Equal(expectedUpgradedClient.Data, payload.VerifyUpgradeAndUpdateState.UpgradeClientState) - suite.Require().Equal(expectedUpgradedConsensus.Data, payload.VerifyUpgradeAndUpdateState.UpgradeConsensusState) - suite.Require().Equal(upgradedClientProof, payload.VerifyUpgradeAndUpdateState.ProofUpgradeClient) - suite.Require().Equal(upgradedConsensusStateProof, payload.VerifyUpgradeAndUpdateState.ProofUpgradeConsensusState) + s.Require().Equal(expectedUpgradedClient.Data, payload.VerifyUpgradeAndUpdateState.UpgradeClientState) + s.Require().Equal(expectedUpgradedConsensus.Data, payload.VerifyUpgradeAndUpdateState.UpgradeConsensusState) + s.Require().Equal(upgradedClientProof, payload.VerifyUpgradeAndUpdateState.ProofUpgradeClient) + s.Require().Equal(upgradedConsensusStateProof, payload.VerifyUpgradeAndUpdateState.ProofUpgradeConsensusState) // verify other Sudo fields are nil - suite.Require().Nil(payload.UpdateState) - suite.Require().Nil(payload.UpdateStateOnMisbehaviour) - suite.Require().Nil(payload.VerifyMembership) - suite.Require().Nil(payload.VerifyNonMembership) + s.Require().Nil(payload.UpdateState) + s.Require().Nil(payload.UpdateStateOnMisbehaviour) + s.Require().Nil(payload.VerifyMembership) + s.Require().Nil(payload.VerifyNonMembership) data, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) // set new client state and consensus state - wrappedUpgradedClient, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), expectedUpgradedClient.Data).(*ibctm.ClientState) - suite.Require().True(ok) - store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), upgradedClient)) - store.Set(host.ConsensusStateKey(wrappedUpgradedClient.LatestHeight), clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), upgradedConsState)) + wrappedUpgradedClient, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), expectedUpgradedClient.Data).(*ibctm.ClientState) + s.Require().True(ok) + store.Set(host.ClientStateKey(), clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), upgradedClient)) + store.Set(host.ConsensusStateKey(wrappedUpgradedClient.LatestHeight), clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), upgradedConsState)) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: data}}, wasmtesting.DefaultGasUsed, nil }) @@ -867,7 +864,7 @@ func (suite *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { { "failure: vm returns error", func() { - suite.mockVM.RegisterSudoCallback(types.VerifyUpgradeAndUpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.VerifyUpgradeAndUpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM }) }, @@ -876,7 +873,7 @@ func (suite *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { { "failure: contract returns error", func() { - suite.mockVM.RegisterSudoCallback(types.VerifyUpgradeAndUpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.VerifyUpgradeAndUpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil }) }, @@ -885,42 +882,42 @@ func (suite *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID clientState, ok := endpoint.GetClientState().(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) newLatestHeight := clienttypes.NewHeight(2, 10) wrappedUpgradedClient := wasmtesting.CreateMockTendermintClientState(newLatestHeight) - wrappedUpgradedClientBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), wrappedUpgradedClient) + wrappedUpgradedClientBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), wrappedUpgradedClient) upgradedClient = types.NewClientState(wrappedUpgradedClientBz, clientState.Checksum, newLatestHeight) wrappedUpgradedConsensus := ibctm.NewConsensusState(time.Now(), commitmenttypes.NewMerkleRoot([]byte("new-hash")), []byte("new-nextValsHash")) - wrappedUpgradedConsensusBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), wrappedUpgradedConsensus) + wrappedUpgradedConsensusBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), wrappedUpgradedConsensus) upgradedConsState = types.NewConsensusState(wrappedUpgradedConsensusBz) - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), wasmClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), wasmClientID) upgradedClientProof = wasmtesting.MockUpgradedClientStateProofBz upgradedConsensusStateProof = wasmtesting.MockUpgradedConsensusStateProofBz - newClient := suite.chainA.Codec.MustMarshal(upgradedClient) - newConsensusState := suite.chainA.Codec.MustMarshal(upgradedConsState) + newClient := s.chainA.Codec.MustMarshal(upgradedClient) + newConsensusState := s.chainA.Codec.MustMarshal(upgradedConsState) err = lightClientModule.VerifyUpgradeAndUpdateState( - suite.chainA.GetContext(), + s.chainA.GetContext(), clientID, newClient, newConsensusState, @@ -929,25 +926,25 @@ func (suite *WasmTestSuite) TestVerifyUpgradeAndUpdateState() { ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) // verify new client state and consensus state clientStateBz := clientStore.Get(host.ClientStateKey()) - suite.Require().NotEmpty(clientStateBz) - suite.Require().Equal(upgradedClient, clienttypes.MustUnmarshalClientState(suite.chainA.Codec, clientStateBz)) + s.Require().NotEmpty(clientStateBz) + s.Require().Equal(upgradedClient, clienttypes.MustUnmarshalClientState(s.chainA.Codec, clientStateBz)) consStateBz := clientStore.Get(host.ConsensusStateKey(upgradedClient.(*types.ClientState).LatestHeight)) - suite.Require().NotEmpty(consStateBz) - suite.Require().Equal(upgradedConsState, clienttypes.MustUnmarshalConsensusState(suite.chainA.Codec, consStateBz)) + s.Require().NotEmpty(consStateBz) + s.Require().Equal(upgradedConsState, clienttypes.MustUnmarshalConsensusState(s.chainA.Codec, consStateBz)) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *WasmTestSuite) TestCheckForMisbehaviour() { +func (s *WasmTestSuite) TestCheckForMisbehaviour() { var ( clientMessage exported.ClientMessage clientID string @@ -962,9 +959,9 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { { "success: no misbehaviour", func() { - suite.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.CheckForMisbehaviourResult{FoundMisbehaviour: false}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) }, @@ -973,9 +970,9 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { }, { "success: misbehaviour found", func() { - suite.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.CheckForMisbehaviourResult{FoundMisbehaviour: true}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) }, @@ -984,7 +981,7 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { }, { "success: contract error, resp cannot be marshalled", func() { - suite.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp := "cannot be unmarshalled" return &wasmvmtypes.QueryResult{Ok: []byte(resp)}, wasmtesting.DefaultGasUsed, nil }) @@ -994,7 +991,7 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { }, { "success: contract returns error", func() { - suite.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Err: wasmtesting.ErrMockContract.Error()}, wasmtesting.DefaultGasUsed, nil }) }, @@ -1003,7 +1000,7 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { }, { "success: vm returns error, ", func() { - suite.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return nil, 0, errors.New("invalid block ID") }) }, @@ -1024,12 +1021,12 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { clientID = unusedWasmClientID }, false, // not applicable - fmt.Errorf("%s: %s", unusedWasmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedWasmClientID, clienttypes.ErrClientNotFound), }, { "failure: response fails to unmarshal", func() { - suite.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.CheckForMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { return &wasmvmtypes.QueryResult{Ok: []byte("invalid json")}, wasmtesting.DefaultGasUsed, nil }) }, @@ -1039,40 +1036,40 @@ func (suite *WasmTestSuite) TestCheckForMisbehaviour() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite to create fresh application state - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID clientMessage = &types.ClientMessage{ - Data: clienttypes.MustMarshalClientMessage(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientMisbehaviour), + Data: clienttypes.MustMarshalClientMessage(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientMisbehaviour), } - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() var foundMisbehaviour bool foundMisbehaviourFunc := func() { - foundMisbehaviour = lightClientModule.CheckForMisbehaviour(suite.chainA.GetContext(), clientID, clientMessage) + foundMisbehaviour = lightClientModule.CheckForMisbehaviour(s.chainA.GetContext(), clientID, clientMessage) } if tc.expPanic == nil { foundMisbehaviourFunc() - suite.Require().Equal(tc.foundMisbehaviour, foundMisbehaviour) + s.Require().Equal(tc.foundMisbehaviour, foundMisbehaviour) } else { - suite.Require().PanicsWithError(tc.expPanic.Error(), foundMisbehaviourFunc) + s.Require().PanicsWithError(tc.expPanic.Error(), foundMisbehaviourFunc) } }) } } -func (suite *WasmTestSuite) TestUpdateState() { +func (s *WasmTestSuite) TestUpdateState() { mockHeight := clienttypes.NewHeight(1, 50) var ( @@ -1090,20 +1087,20 @@ func (suite *WasmTestSuite) TestUpdateState() { { "success: no update", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, env wasmvmtypes.Env, sudoMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, env wasmvmtypes.Env, sudoMsg []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var msg types.SudoMsg err := json.Unmarshal(sudoMsg, &msg) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(msg.UpdateState) - suite.Require().NotNil(msg.UpdateState.ClientMessage) - suite.Require().Equal(msg.UpdateState.ClientMessage, clienttypes.MustMarshalClientMessage(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientHeader)) - suite.Require().Nil(msg.VerifyMembership) - suite.Require().Nil(msg.VerifyNonMembership) - suite.Require().Nil(msg.UpdateStateOnMisbehaviour) - suite.Require().Nil(msg.VerifyUpgradeAndUpdateState) + s.Require().NotNil(msg.UpdateState) + s.Require().NotNil(msg.UpdateState.ClientMessage) + s.Require().Equal(msg.UpdateState.ClientMessage, clienttypes.MustMarshalClientMessage(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientHeader)) + s.Require().Nil(msg.VerifyMembership) + s.Require().Nil(msg.VerifyNonMembership) + s.Require().Nil(msg.UpdateStateOnMisbehaviour) + s.Require().Nil(msg.VerifyUpgradeAndUpdateState) - suite.Require().Equal(env.Contract.Address, wasmClientID) + s.Require().Equal(env.Contract.Address, wasmClientID) updateStateResp := types.UpdateStateResult{ Heights: []clienttypes.Height{}, @@ -1123,17 +1120,17 @@ func (suite *WasmTestSuite) TestUpdateState() { { "success: update client", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var msg types.SudoMsg err := json.Unmarshal(sudoMsg, &msg) - suite.Require().NoError(err) + s.Require().NoError(err) bz := store.Get(host.ClientStateKey()) - suite.Require().NotEmpty(bz) - clientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.Codec, bz).(*types.ClientState) - suite.Require().True(ok) + s.Require().NotEmpty(bz) + clientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.Codec, bz).(*types.ClientState) + s.Require().True(ok) clientState.LatestHeight = mockHeight - expectedClientStateBz = clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) + expectedClientStateBz = clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) store.Set(host.ClientStateKey(), expectedClientStateBz) updateStateResp := types.UpdateStateResult{ @@ -1156,7 +1153,7 @@ func (suite *WasmTestSuite) TestUpdateState() { func() { clientID = unusedWasmClientID }, - fmt.Errorf("08-wasm-100: %s", clienttypes.ErrClientNotFound), + fmt.Errorf("08-wasm-100: %w", clienttypes.ErrClientNotFound), nil, }, { @@ -1171,7 +1168,7 @@ func (suite *WasmTestSuite) TestUpdateState() { { "failure: VM returns error", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM }) }, @@ -1181,17 +1178,17 @@ func (suite *WasmTestSuite) TestUpdateState() { { "failure: response fails to unmarshal", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: []byte("invalid json")}}, wasmtesting.DefaultGasUsed, nil }) }, - fmt.Errorf("invalid character 'i' looking for beginning of value: %s", types.ErrWasmInvalidResponseData), + fmt.Errorf("invalid character 'i' looking for beginning of value: %w", types.ErrWasmInvalidResponseData), nil, }, { "failure: callbackFn returns error", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil }) }, @@ -1201,48 +1198,48 @@ func (suite *WasmTestSuite) TestUpdateState() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() // reset + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() // reset - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID expectedClientStateBz = nil clientMsg = &types.ClientMessage{ - Data: clienttypes.MustMarshalClientMessage(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientHeader), + Data: clienttypes.MustMarshalClientMessage(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientHeader), } - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() var heights []exported.Height updateState := func() { - heights = lightClientModule.UpdateState(suite.chainA.GetContext(), clientID, clientMsg) + heights = lightClientModule.UpdateState(s.chainA.GetContext(), clientID, clientMsg) } if tc.expPanic == nil { updateState() - suite.Require().Equal(tc.expHeights, heights) + s.Require().Equal(tc.expHeights, heights) if expectedClientStateBz != nil { - clientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) + clientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) clientStateBz := clientStore.Get(host.ClientStateKey()) - suite.Require().Equal(expectedClientStateBz, clientStateBz) + s.Require().Equal(expectedClientStateBz, clientStateBz) } } else { - suite.Require().PanicsWithError(tc.expPanic.Error(), updateState) + s.Require().PanicsWithError(tc.expPanic.Error(), updateState) } }) } } -func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { +func (s *WasmTestSuite) TestUpdateStateOnMisbehaviour() { mockHeight := clienttypes.NewHeight(1, 50) var ( @@ -1260,19 +1257,19 @@ func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { { "success: no update", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var msg types.SudoMsg err := json.Unmarshal(sudoMsg, &msg) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(msg.UpdateStateOnMisbehaviour) - suite.Require().NotNil(msg.UpdateStateOnMisbehaviour.ClientMessage) - suite.Require().Nil(msg.UpdateState) - suite.Require().Nil(msg.UpdateState) - suite.Require().Nil(msg.VerifyMembership) - suite.Require().Nil(msg.VerifyNonMembership) - suite.Require().Nil(msg.VerifyUpgradeAndUpdateState) + s.Require().NotNil(msg.UpdateStateOnMisbehaviour) + s.Require().NotNil(msg.UpdateStateOnMisbehaviour.ClientMessage) + s.Require().Nil(msg.UpdateState) + s.Require().Nil(msg.UpdateState) + s.Require().Nil(msg.VerifyMembership) + s.Require().Nil(msg.VerifyNonMembership) + s.Require().Nil(msg.VerifyUpgradeAndUpdateState) resp, err := json.Marshal(types.EmptyResult{}) if err != nil { @@ -1288,18 +1285,18 @@ func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { { "success: client state updated on valid misbehaviour", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var msg types.SudoMsg err := json.Unmarshal(sudoMsg, &msg) - suite.Require().NoError(err) + s.Require().NoError(err) // set new client state in store bz := store.Get(host.ClientStateKey()) - suite.Require().NotEmpty(bz) - clientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), bz).(*types.ClientState) - suite.Require().True(ok) + s.Require().NotEmpty(bz) + clientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), bz).(*types.ClientState) + s.Require().True(ok) clientState.LatestHeight = mockHeight - expectedClientStateBz = clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) + expectedClientStateBz = clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) store.Set(host.ClientStateKey(), expectedClientStateBz) resp, err := json.Marshal(types.EmptyResult{}) @@ -1311,14 +1308,14 @@ func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { }) }, nil, - clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), wasmtesting.CreateMockTendermintClientState(mockHeight)), + clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), wasmtesting.CreateMockTendermintClientState(mockHeight)), }, { "failure: cannot find client state", func() { clientID = unusedWasmClientID }, - fmt.Errorf("%s: %s", unusedWasmClientID, clienttypes.ErrClientNotFound), + fmt.Errorf("%s: %w", unusedWasmClientID, clienttypes.ErrClientNotFound), nil, }, { @@ -1333,7 +1330,7 @@ func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { { "failure: err return from vm", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, 0, wasmtesting.ErrMockVM }) }, @@ -1343,7 +1340,7 @@ func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { { "failure: err return from contract", func() { - suite.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.RegisterSudoCallback(types.UpdateStateOnMisbehaviourMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, 0, nil }) }, @@ -1353,44 +1350,44 @@ func (suite *WasmTestSuite) TestUpdateStateOnMisbehaviour() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // reset suite to create fresh application state - suite.SetupWasmWithMockVM() + s.SetupWasmWithMockVM() - endpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + endpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := endpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = endpoint.ClientID expectedClientStateBz = nil clientMsg = &types.ClientMessage{ - Data: clienttypes.MustMarshalClientMessage(suite.chainA.App.AppCodec(), wasmtesting.MockTendermintClientMisbehaviour), + Data: clienttypes.MustMarshalClientMessage(s.chainA.App.AppCodec(), wasmtesting.MockTendermintClientMisbehaviour), } - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() updateFunc := func() { - lightClientModule.UpdateStateOnMisbehaviour(suite.chainA.GetContext(), clientID, clientMsg) + lightClientModule.UpdateStateOnMisbehaviour(s.chainA.GetContext(), clientID, clientMsg) } if tc.panicErr == nil { updateFunc() if expectedClientStateBz != nil { - store := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), endpoint.ClientID) - suite.Require().Equal(expectedClientStateBz, store.Get(host.ClientStateKey())) + store := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), endpoint.ClientID) + s.Require().Equal(expectedClientStateBz, store.Get(host.ClientStateKey())) } } else { - suite.Require().PanicsWithError(tc.panicErr.Error(), updateFunc) + s.Require().PanicsWithError(tc.panicErr.Error(), updateFunc) } }) } } -func (suite *WasmTestSuite) TestRecoverClient() { +func (s *WasmTestSuite) TestRecoverClient() { var ( expectedClientStateBz []byte subjectClientID, substituteClientID string @@ -1404,26 +1401,26 @@ func (suite *WasmTestSuite) TestRecoverClient() { { "success", func() { - suite.mockVM.RegisterSudoCallback( + s.mockVM.RegisterSudoCallback( types.MigrateClientStoreMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, sudoMsg []byte, store wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.SudoMsg err := json.Unmarshal(sudoMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - suite.Require().NotNil(payload.MigrateClientStore) - suite.Require().Nil(payload.UpdateState) - suite.Require().Nil(payload.UpdateStateOnMisbehaviour) - suite.Require().Nil(payload.VerifyMembership) - suite.Require().Nil(payload.VerifyNonMembership) - suite.Require().Nil(payload.VerifyUpgradeAndUpdateState) + s.Require().NotNil(payload.MigrateClientStore) + s.Require().Nil(payload.UpdateState) + s.Require().Nil(payload.UpdateStateOnMisbehaviour) + s.Require().Nil(payload.VerifyMembership) + s.Require().Nil(payload.VerifyNonMembership) + s.Require().Nil(payload.VerifyUpgradeAndUpdateState) bz, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) prefixedKey := internaltypes.SubjectPrefix prefixedKey = append(prefixedKey, host.ClientStateKey()...) - expectedClientStateBz = wasmtesting.CreateMockClientStateBz(suite.chainA.Codec, suite.checksum) + expectedClientStateBz = wasmtesting.CreateMockClientStateBz(s.chainA.Codec, s.checksum) store.Set(prefixedKey, expectedClientStateBz) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: bz}}, wasmtesting.DefaultGasUsed, nil @@ -1463,21 +1460,21 @@ func (suite *WasmTestSuite) TestRecoverClient() { { "failure: checksums do not match", func() { - substituteClientState, found := GetSimApp(suite.chainA).IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), substituteClientID) - suite.Require().True(found) + substituteClientState, found := GetSimApp(s.chainA).IBCKeeper.ClientKeeper.GetClientState(s.chainA.GetContext(), substituteClientID) + s.Require().True(found) wasmSubstituteClientState, ok := substituteClientState.(*types.ClientState) - suite.Require().True(ok) + s.Require().True(ok) wasmSubstituteClientState.Checksum = []byte("invalid") - GetSimApp(suite.chainA).IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), substituteClientID, wasmSubstituteClientState) + GetSimApp(s.chainA).IBCKeeper.ClientKeeper.SetClientState(s.chainA.GetContext(), substituteClientID, wasmSubstituteClientState) }, clienttypes.ErrInvalidClient, }, { "failure: vm returns error", func() { - suite.mockVM.RegisterSudoCallback( + s.mockVM.RegisterSudoCallback( types.MigrateClientStoreMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return nil, wasmtesting.DefaultGasUsed, wasmtesting.ErrMockVM @@ -1489,7 +1486,7 @@ func (suite *WasmTestSuite) TestRecoverClient() { { "failure: contract returns error", func() { - suite.mockVM.RegisterSudoCallback( + s.mockVM.RegisterSudoCallback( types.MigrateClientStoreMsg{}, func(_ wasmvm.Checksum, _ wasmvmtypes.Env, _ []byte, _ wasmvm.KVStore, _ wasmvm.GoAPI, _ wasmvm.Querier, _ wasmvm.GasMeter, _ uint64, _ wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { return &wasmvmtypes.ContractResult{Err: wasmtesting.ErrMockContract.Error()}, wasmtesting.DefaultGasUsed, nil @@ -1501,42 +1498,41 @@ func (suite *WasmTestSuite) TestRecoverClient() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - subjectEndpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + subjectEndpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := subjectEndpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) subjectClientID = subjectEndpoint.ClientID - substituteEndpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + substituteEndpoint := wasmtesting.NewWasmEndpoint(s.chainA) err = substituteEndpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) substituteClientID = substituteEndpoint.ClientID expectedClientStateBz = nil - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), subjectClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), subjectClientID) + s.Require().NoError(err) tc.malleate() - err = lightClientModule.RecoverClient(suite.chainA.GetContext(), subjectClientID, substituteClientID) + err = lightClientModule.RecoverClient(s.chainA.GetContext(), subjectClientID, substituteClientID) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) - subjectClientStore := suite.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(suite.chainA.GetContext(), subjectClientID) - suite.Require().Equal(expectedClientStateBz, subjectClientStore.Get(host.ClientStateKey())) + subjectClientStore := s.chainA.App.GetIBCKeeper().ClientKeeper.ClientStore(s.chainA.GetContext(), subjectClientID) + s.Require().Equal(expectedClientStateBz, subjectClientStore.Get(host.ClientStateKey())) } else { - suite.Require().ErrorIs(err, tc.expErr) + s.Require().ErrorIs(err, tc.expErr) } }) } } -func (suite *WasmTestSuite) TestLatestHeight() { +func (s *WasmTestSuite) TestLatestHeight() { var clientID string testCases := []struct { @@ -1560,23 +1556,22 @@ func (suite *WasmTestSuite) TestLatestHeight() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupWasmWithMockVM() + s.Run(tc.name, func() { + s.SetupWasmWithMockVM() - subjectEndpoint := wasmtesting.NewWasmEndpoint(suite.chainA) + subjectEndpoint := wasmtesting.NewWasmEndpoint(s.chainA) err := subjectEndpoint.CreateClient() - suite.Require().NoError(err) + s.Require().NoError(err) clientID = subjectEndpoint.ClientID - lightClientModule, err := suite.chainA.App.GetIBCKeeper().ClientKeeper.Route(suite.chainA.GetContext(), clientID) - suite.Require().NoError(err) + lightClientModule, err := s.chainA.App.GetIBCKeeper().ClientKeeper.Route(s.chainA.GetContext(), clientID) + s.Require().NoError(err) tc.malleate() - height := lightClientModule.LatestHeight(suite.chainA.GetContext(), clientID) + height := lightClientModule.LatestHeight(s.chainA.GetContext(), clientID) - suite.Require().Equal(tc.expHeight, height) + s.Require().Equal(tc.expHeight, height) }) } } diff --git a/modules/light-clients/08-wasm/module.go b/modules/light-clients/08-wasm/module.go index d7714ac602f..3956317db58 100644 --- a/modules/light-clients/08-wasm/module.go +++ b/modules/light-clients/08-wasm/module.go @@ -97,13 +97,13 @@ func (AppModuleBasic) GetQueryCmd() *cobra.Command { // AppModule represents the AppModule for this module type AppModule struct { AppModuleBasic - keeper keeper.Keeper + keeper *keeper.Keeper } // NewAppModule creates a new 08-wasm module func NewAppModule(k keeper.Keeper) AppModule { return AppModule{ - keeper: k, + keeper: &k, } } @@ -111,11 +111,6 @@ func NewAppModule(k keeper.Keeper) AppModule { func (am AppModule) RegisterServices(cfg module.Configurator) { types.RegisterMsgServer(cfg.MsgServer(), am.keeper) types.RegisterQueryServer(cfg.QueryServer(), am.keeper) - - wasmMigrator := keeper.NewMigrator(am.keeper) - if err := cfg.RegisterMigration(types.ModuleName, 1, wasmMigrator.MigrateChecksums); err != nil { - panic(fmt.Errorf("failed to migrate 08-wasm module from version 1 to 2 (checksums migration to collections): %v", err)) - } } // ConsensusVersion implements AppModule/ConsensusVersion. @@ -130,7 +125,7 @@ func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, bz json.Ra var gs types.GenesisState err := cdc.UnmarshalJSON(bz, &gs) if err != nil { - panic(fmt.Errorf("failed to unmarshal %s genesis state: %s", am.Name(), err)) + panic(fmt.Errorf("failed to unmarshal %s genesis state: %w", am.Name(), err)) } err = am.keeper.InitGenesis(ctx, gs) if err != nil { diff --git a/modules/light-clients/08-wasm/simulation/proposals_test.go b/modules/light-clients/08-wasm/simulation/proposals_test.go index 69be78a7423..f65fb892357 100644 --- a/modules/light-clients/08-wasm/simulation/proposals_test.go +++ b/modules/light-clients/08-wasm/simulation/proposals_test.go @@ -26,7 +26,7 @@ func TestProposalMsgs(t *testing.T) { // execute ProposalMsgs function weightedProposalMsgs := simulation.ProposalMsgs() - require.Equal(t, 1, len(weightedProposalMsgs)) + require.Len(t, weightedProposalMsgs, 1) w0 := weightedProposalMsgs[0] require.Equal(t, simulation.OpWeightMsgStoreCode, w0.AppParamsKey()) @@ -37,5 +37,5 @@ func TestProposalMsgs(t *testing.T) { require.True(t, ok) require.Equal(t, sdk.AccAddress(address.Module("gov")).String(), msgStoreCode.Signer) - require.Equal(t, msgStoreCode.WasmByteCode, []byte{0x01}) + require.Equal(t, []byte{0x01}, msgStoreCode.WasmByteCode) } diff --git a/modules/light-clients/08-wasm/testing/simapp/app.go b/modules/light-clients/08-wasm/testing/simapp/app.go index 14588114ff9..af3134dd446 100644 --- a/modules/light-clients/08-wasm/testing/simapp/app.go +++ b/modules/light-clients/08-wasm/testing/simapp/app.go @@ -74,9 +74,6 @@ import ( "github.com/cosmos/cosmos-sdk/x/consensus" consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" - "github.com/cosmos/cosmos-sdk/x/crisis" - crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" - crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" distr "github.com/cosmos/cosmos-sdk/x/distribution" distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" @@ -86,16 +83,9 @@ import ( govclient "github.com/cosmos/cosmos-sdk/x/gov/client" govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - "github.com/cosmos/cosmos-sdk/x/group" - groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" - groupmodule "github.com/cosmos/cosmos-sdk/x/group/module" "github.com/cosmos/cosmos-sdk/x/mint" mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - "github.com/cosmos/cosmos-sdk/x/params" - paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" - paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" "github.com/cosmos/cosmos-sdk/x/slashing" slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" @@ -124,8 +114,6 @@ import ( ibctransfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" transferv2 "github.com/cosmos/ibc-go/v10/modules/apps/transfer/v2" ibc "github.com/cosmos/ibc-go/v10/modules/core" - ibcclienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" - ibcconnectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" ibcapi "github.com/cosmos/ibc-go/v10/modules/core/api" ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" @@ -171,7 +159,6 @@ type SimApp struct { // keys to access the substores keys map[string]*storetypes.KVStoreKey - tkeys map[string]*storetypes.TransientStoreKey memKeys map[string]*storetypes.MemoryStoreKey // keepers @@ -182,24 +169,21 @@ type SimApp struct { MintKeeper mintkeeper.Keeper DistrKeeper distrkeeper.Keeper GovKeeper govkeeper.Keeper - CrisisKeeper *crisiskeeper.Keeper UpgradeKeeper *upgradekeeper.Keeper - ParamsKeeper paramskeeper.Keeper AuthzKeeper authzkeeper.Keeper IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly - ICAControllerKeeper icacontrollerkeeper.Keeper - ICAHostKeeper icahostkeeper.Keeper + ICAControllerKeeper *icacontrollerkeeper.Keeper + ICAHostKeeper *icahostkeeper.Keeper EvidenceKeeper evidencekeeper.Keeper - TransferKeeper ibctransferkeeper.Keeper + TransferKeeper *ibctransferkeeper.Keeper WasmClientKeeper ibcwasmkeeper.Keeper FeeGrantKeeper feegrantkeeper.Keeper - GroupKeeper groupkeeper.Keeper ConsensusParamsKeeper consensusparamkeeper.Keeper CircuitKeeper circuitkeeper.Keeper // make IBC modules public for test purposes // these modules are never directly routed to by the IBC Router - ICAAuthModule ibcmock.IBCModule + ICAAuthModule *ibcmock.IBCModule // the module manager ModuleManager *module.Manager @@ -309,9 +293,9 @@ func newSimApp( bApp.SetTxEncoder(txConfig.TxEncoder()) keys := storetypes.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, crisistypes.StoreKey, + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, group.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, + govtypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, authzkeeper.StoreKey, consensusparamtypes.StoreKey, circuittypes.StoreKey, ibcwasmtypes.StoreKey, ) @@ -321,9 +305,7 @@ func newSimApp( panic(err) } - tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey) memKeys := storetypes.NewMemoryStoreKeys(ibcmock.MemStoreKey) - app := &SimApp{ BaseApp: bApp, legacyAmino: legacyAmino, @@ -331,12 +313,9 @@ func newSimApp( txConfig: txConfig, interfaceRegistry: interfaceRegistry, keys: keys, - tkeys: tkeys, memKeys: memKeys, } - app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) - // set the BaseApp's parameter store app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), runtime.EventService{}) bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore) @@ -365,10 +344,6 @@ func newSimApp( appCodec, legacyAmino, runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), app.StakingKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) - invCheckPeriod := cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)) - app.CrisisKeeper = crisiskeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[crisistypes.StoreKey]), invCheckPeriod, - app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec()) - app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[feegrant.StoreKey]), app.AccountKeeper) // register the staking hooks @@ -382,13 +357,6 @@ func newSimApp( app.AuthzKeeper = authzkeeper.NewKeeper(runtime.NewKVStoreService(keys[authzkeeper.StoreKey]), appCodec, app.MsgServiceRouter(), app.AccountKeeper) - groupConfig := group.DefaultConfig() - /* - Example of setting group params: - groupConfig.MaxMetadataLen = 1000 - */ - app.GroupKeeper = groupkeeper.NewKeeper(keys[group.StoreKey], appCodec, app.MsgServiceRouter(), app.AccountKeeper, groupConfig) - // get skipUpgradeHeights from the app options skipUpgradeHeights := map[int64]bool{} for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) { @@ -399,7 +367,7 @@ func newSimApp( app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.GetSubspace(ibcexported.ModuleName), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) govConfig := govtypes.DefaultConfig() @@ -470,8 +438,7 @@ func newSimApp( // ICA Controller keeper app.ICAControllerKeeper = icacontrollerkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.GetSubspace(icacontrollertypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), @@ -479,8 +446,7 @@ func newSimApp( // ICA Host keeper app.ICAHostKeeper = icahostkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.GetSubspace(icahosttypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.AccountKeeper, app.MsgServiceRouter(), app.GRPCQueryRouter(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), @@ -494,8 +460,9 @@ func newSimApp( // Create Transfer Keeper app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.GetSubspace(ibctransfertypes.ModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, + app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), app.AccountKeeper, app.BankKeeper, @@ -537,7 +504,7 @@ func newSimApp( var icaControllerStack porttypes.IBCModule var ok bool icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp("")) - app.ICAAuthModule, ok = icaControllerStack.(ibcmock.IBCModule) + app.ICAAuthModule, ok = icaControllerStack.(*ibcmock.IBCModule) if !ok { panic(fmt.Errorf("cannot convert %T into %T", icaControllerStack, app.ICAAuthModule)) } @@ -582,10 +549,6 @@ func newSimApp( // **** Module Options **** - // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment - // we prefer to be more strict in what arguments the modules expect. - skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)) - // NOTE: Any module instantiated in the module manager that is later modified // must be passed by reference here. app.ModuleManager = module.NewManager( @@ -593,28 +556,25 @@ func newSimApp( app.AccountKeeper, app.StakingKeeper, app, txConfig, ), - auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), - bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, app.GetSubspace(banktypes.ModuleName)), - crisis.NewAppModule(app.CrisisKeeper, skipGenesisInvariants, app.GetSubspace(crisistypes.ModuleName)), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil), feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), - gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(govtypes.ModuleName)), - mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, app.GetSubspace(minttypes.ModuleName)), - slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(slashingtypes.ModuleName), app.interfaceRegistry), - distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(distrtypes.ModuleName)), - staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName)), + gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry), + distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil), upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()), evidence.NewAppModule(app.EvidenceKeeper), - params.NewAppModule(app.ParamsKeeper), authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), - groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper), circuit.NewAppModule(appCodec, app.CircuitKeeper), // IBC modules ibc.NewAppModule(app.IBCKeeper), transfer.NewAppModule(app.TransferKeeper), - ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), + ica.NewAppModule(app.ICAControllerKeeper, app.ICAHostKeeper), mockModule, // IBC light clients @@ -631,11 +591,7 @@ func newSimApp( app.ModuleManager, map[string]module.AppModuleBasic{ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator), - govtypes.ModuleName: gov.NewAppModuleBasic( - []govclient.ProposalHandler{ - paramsclient.ProposalHandler, - }, - ), + govtypes.ModuleName: gov.NewAppModuleBasic([]govclient.ProposalHandler{}), }) app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino) app.BasicModuleManager.RegisterInterfaces(interfaceRegistry) @@ -665,7 +621,6 @@ func newSimApp( ibcmock.ModuleName, ) app.ModuleManager.SetOrderEndBlockers( - crisistypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, ibcexported.ModuleName, @@ -675,7 +630,6 @@ func newSimApp( icatypes.ModuleName, ibcwasmtypes.ModuleName, ibcmock.ModuleName, - group.ModuleName, ) // NOTE: The genutils module must occur after staking so that pools are @@ -684,10 +638,10 @@ func newSimApp( genesisModuleOrder := []string{ authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, - slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName, + slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, ibcexported.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, - icatypes.ModuleName, ibcmock.ModuleName, feegrant.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, - vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, ibcwasmtypes.ModuleName, + icatypes.ModuleName, ibcmock.ModuleName, feegrant.ModuleName, upgradetypes.ModuleName, + vestingtypes.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, ibcwasmtypes.ModuleName, } app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...) app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...) @@ -695,7 +649,6 @@ func newSimApp( // Uncomment if you want to set a custom migration order here. // app.ModuleManager.SetOrderMigrations(custom order) - app.ModuleManager.RegisterInvariants(app.CrisisKeeper) app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) err := app.ModuleManager.RegisterServices(app.configurator) if err != nil { @@ -722,7 +675,7 @@ func newSimApp( // NOTE: this is not required apps that don't use the simulator for fuzz testing // transactions overrideModules := map[string]module.AppModuleSimulation{ - authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), } app.simulationManager = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules) @@ -730,7 +683,6 @@ func newSimApp( // initialize stores app.MountKVStores(keys) - app.MountTransientStores(tkeys) app.MountMemoryStores(memKeys) // initialize BaseApp @@ -746,7 +698,7 @@ func newSimApp( ibcwasmkeeper.NewWasmSnapshotter(app.CommitMultiStore(), &app.WasmClientKeeper), ) if err != nil { - panic(fmt.Errorf("failed to register snapshot extension: %s", err)) + panic(fmt.Errorf("failed to register snapshot extension: %w", err)) } } @@ -940,14 +892,6 @@ func (app *SimApp) GetStoreKeys() []storetypes.StoreKey { return keys } -// GetSubspace returns a param subspace for a given module name. -// -// NOTE: This is solely to be used for testing purposes. -func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace { - subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) - return subspace -} - // SimulationManager implements the SimulationApp interface func (app *SimApp) SimulationManager() *module.SimulationManager { return app.simulationManager @@ -1019,21 +963,6 @@ func BlockedAddresses() map[string]bool { return modAccAddrs } -// initParamsKeeper init params keeper and its subspaces -func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { - paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) - - // register the key tables for legacy param subspaces - keyTable := ibcclienttypes.ParamKeyTable() - keyTable.RegisterParamSet(&ibcconnectiontypes.Params{}) - paramsKeeper.Subspace(ibcexported.ModuleName).WithKeyTable(keyTable) - paramsKeeper.Subspace(ibctransfertypes.ModuleName).WithKeyTable(ibctransfertypes.ParamKeyTable()) - paramsKeeper.Subspace(icacontrollertypes.SubModuleName).WithKeyTable(icacontrollertypes.ParamKeyTable()) - paramsKeeper.Subspace(icahosttypes.SubModuleName).WithKeyTable(icahosttypes.ParamKeyTable()) - - return paramsKeeper -} - // IBC TestingApp functions // GetBaseApp implements the TestingApp interface. diff --git a/modules/light-clients/08-wasm/testing/simapp/export.go b/modules/light-clients/08-wasm/testing/simapp/export.go index 67e968f8226..bbef46d324e 100644 --- a/modules/light-clients/08-wasm/testing/simapp/export.go +++ b/modules/light-clients/08-wasm/testing/simapp/export.go @@ -66,13 +66,10 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] allowedAddrsMap[addr] = true } - /* Just to be safe, assert the invariants on current state. */ - app.CrisisKeeper.AssertInvariants(ctx) - /* Handle fee distribution state. */ // withdraw all validator commission - err := app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + err := app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) bool { valBz, err := app.StakingKeeper.ValidatorAddressCodec().StringToBytes(val.GetOperator()) if err != nil { panic(err) @@ -113,7 +110,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] ctx = ctx.WithBlockHeight(0) // reinitialize all validators - err = app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + err = app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) bool { valBz, err := sdk.ValAddressFromBech32(val.GetOperator()) if err != nil { panic(err) @@ -167,7 +164,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] /* Handle staking state. */ // iterate through redelegations, reset creation height - err = app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) { + err = app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) bool { for i := range red.Entries { red.Entries[i].CreationHeight = 0 } @@ -182,7 +179,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] } // iterate through unbonding delegations, reset creation height - err = app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) { + err = app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) bool { for i := range ubd.Entries { ubd.Entries[i].CreationHeight = 0 } @@ -232,7 +229,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] // reset start height on signing infos err = app.SlashingKeeper.IterateValidatorSigningInfos( ctx, - func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { + func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) bool { info.StartHeight = 0 if err := app.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info); err != nil { panic(err) diff --git a/modules/light-clients/08-wasm/testing/simapp/simd/cmd/root.go b/modules/light-clients/08-wasm/testing/simapp/simd/cmd/root.go index 37e3e3a120f..337a475e594 100644 --- a/modules/light-clients/08-wasm/testing/simapp/simd/cmd/root.go +++ b/modules/light-clients/08-wasm/testing/simapp/simd/cmd/root.go @@ -38,7 +38,6 @@ import ( "github.com/cosmos/cosmos-sdk/x/auth/tx" txmodule "github.com/cosmos/cosmos-sdk/x/auth/tx/config" "github.com/cosmos/cosmos-sdk/x/auth/types" - "github.com/cosmos/cosmos-sdk/x/crisis" genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" cmtcfg "github.com/cometbft/cometbft/config" @@ -241,7 +240,6 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, b } func addModuleInitFlags(startCmd *cobra.Command) { - crisis.AddModuleInitFlags(startCmd) preCheck := func(cmd *cobra.Command, _ []string) error { return CheckLibwasmVersion(getExpectedLibwasmVersion()) } diff --git a/modules/light-clients/08-wasm/testing/wasm_endpoint.go b/modules/light-clients/08-wasm/testing/wasm_endpoint.go index 1eb1d9f6253..e9df2eb1284 100644 --- a/modules/light-clients/08-wasm/testing/wasm_endpoint.go +++ b/modules/light-clients/08-wasm/testing/wasm_endpoint.go @@ -25,28 +25,28 @@ func NewWasmEndpoint(chain *ibctesting.TestChain) *WasmEndpoint { // CreateClient creates an wasm client on a mock cometbft chain. // The client and consensus states are represented by byte slices // and the starting height is 1. -func (endpoint *WasmEndpoint) CreateClient() error { +func (ep *WasmEndpoint) CreateClient() error { checksum, err := types.CreateChecksum(Code) - require.NoError(endpoint.Chain.TB, err) + require.NoError(ep.Chain.TB, err) - wrappedClientStateBz := clienttypes.MustMarshalClientState(endpoint.Chain.App.AppCodec(), CreateMockTendermintClientState(clienttypes.NewHeight(1, 5))) - wrappedClientConsensusStateBz := clienttypes.MustMarshalConsensusState(endpoint.Chain.App.AppCodec(), MockTendermintClientConsensusState) + wrappedClientStateBz := clienttypes.MustMarshalClientState(ep.Chain.App.AppCodec(), CreateMockTendermintClientState(clienttypes.NewHeight(1, 5))) + wrappedClientConsensusStateBz := clienttypes.MustMarshalConsensusState(ep.Chain.App.AppCodec(), MockTendermintClientConsensusState) clientState := types.NewClientState(wrappedClientStateBz, checksum, clienttypes.NewHeight(0, 1)) consensusState := types.NewConsensusState(wrappedClientConsensusStateBz) msg, err := clienttypes.NewMsgCreateClient( - clientState, consensusState, endpoint.Chain.SenderAccount.GetAddress().String(), + clientState, consensusState, ep.Chain.SenderAccount.GetAddress().String(), ) - require.NoError(endpoint.Chain.TB, err) + require.NoError(ep.Chain.TB, err) - res, err := endpoint.Chain.SendMsgs(msg) + res, err := ep.Chain.SendMsgs(msg) if err != nil { return err } - endpoint.ClientID, err = ibctesting.ParseClientIDFromEvents(res.Events) - require.NoError(endpoint.Chain.TB, err) + ep.ClientID, err = ibctesting.ParseClientIDFromEvents(res.Events) + require.NoError(ep.Chain.TB, err) return nil } diff --git a/modules/light-clients/08-wasm/types/client_message_test.go b/modules/light-clients/08-wasm/types/client_message_test.go index ac6026f0ed3..b62224ca735 100644 --- a/modules/light-clients/08-wasm/types/client_message_test.go +++ b/modules/light-clients/08-wasm/types/client_message_test.go @@ -6,7 +6,7 @@ import ( "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" ) -func (suite *TypesTestSuite) TestClientMessageValidateBasic() { +func (s *TypesTestSuite) TestClientMessageValidateBasic() { testCases := []struct { name string clientMessage *types.ClientMessage @@ -36,17 +36,17 @@ func (suite *TypesTestSuite) TestClientMessageValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { clientMessage := tc.clientMessage - suite.Require().Equal(types.Wasm, clientMessage.ClientType()) + s.Require().Equal(types.Wasm, clientMessage.ClientType()) err := clientMessage.ValidateBasic() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/light-clients/08-wasm/types/client_state.go b/modules/light-clients/08-wasm/types/client_state.go index 6d7604231ad..4c4299d7861 100644 --- a/modules/light-clients/08-wasm/types/client_state.go +++ b/modules/light-clients/08-wasm/types/client_state.go @@ -29,9 +29,5 @@ func (cs ClientState) Validate() error { return errorsmod.Wrap(ErrInvalidData, "data cannot be empty") } - if err := ValidateWasmChecksum(cs.Checksum); err != nil { - return err - } - - return nil + return ValidateWasmChecksum(cs.Checksum) } diff --git a/modules/light-clients/08-wasm/types/client_state_test.go b/modules/light-clients/08-wasm/types/client_state_test.go index 845dfe5ee10..2ada5d05bd2 100644 --- a/modules/light-clients/08-wasm/types/client_state_test.go +++ b/modules/light-clients/08-wasm/types/client_state_test.go @@ -8,7 +8,7 @@ import ( clienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" ) -func (suite *TypesTestSuite) TestValidate() { +func (s *TypesTestSuite) TestValidate() { testCases := []struct { name string clientState *types.ClientState @@ -56,13 +56,13 @@ func (suite *TypesTestSuite) TestValidate() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { err := tc.clientState.Validate() if tc.expErr == nil { - suite.Require().NoError(err, tc.name) + s.Require().NoError(err, tc.name) } else { - suite.Require().Error(err, tc.name) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err, tc.name) + s.Require().ErrorIs(err, tc.expErr) } }) } diff --git a/modules/light-clients/08-wasm/types/codec_test.go b/modules/light-clients/08-wasm/types/codec_test.go index 4f3eddb00c8..84bbcd3ecdf 100644 --- a/modules/light-clients/08-wasm/types/codec_test.go +++ b/modules/light-clients/08-wasm/types/codec_test.go @@ -57,8 +57,6 @@ func TestCodecTypeRegistration(t *testing.T) { } for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { encodingCfg := moduletestutil.MakeTestEncodingConfig(wasm.AppModuleBasic{}) msg, err := encodingCfg.Codec.InterfaceRegistry().Resolve(tc.typeURL) diff --git a/modules/light-clients/08-wasm/types/consensus_state_test.go b/modules/light-clients/08-wasm/types/consensus_state_test.go index d3fe8ca06cc..6bc004e26ed 100644 --- a/modules/light-clients/08-wasm/types/consensus_state_test.go +++ b/modules/light-clients/08-wasm/types/consensus_state_test.go @@ -4,7 +4,7 @@ import ( "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" ) -func (suite *TypesTestSuite) TestConsensusStateValidateBasic() { +func (s *TypesTestSuite) TestConsensusStateValidateBasic() { testCases := []struct { name string consensusState *types.ConsensusState @@ -28,15 +28,15 @@ func (suite *TypesTestSuite) TestConsensusStateValidateBasic() { } for _, tc := range testCases { - suite.Run(tc.name, func() { + s.Run(tc.name, func() { // check just to increase coverage - suite.Require().Equal(types.Wasm, tc.consensusState.ClientType()) + s.Require().Equal(types.Wasm, tc.consensusState.ClientType()) err := tc.consensusState.ValidateBasic() if tc.expectPass { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) + s.Require().Error(err) } }) } diff --git a/modules/light-clients/08-wasm/types/genesis_test.go b/modules/light-clients/08-wasm/types/genesis_test.go index 30392d45f3c..a88f9e29dd7 100644 --- a/modules/light-clients/08-wasm/types/genesis_test.go +++ b/modules/light-clients/08-wasm/types/genesis_test.go @@ -6,7 +6,7 @@ import ( "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/v10/types" ) -func (suite *TypesTestSuite) TestValidateGenesis() { +func (s *TypesTestSuite) TestValidateGenesis() { testCases := []struct { name string genState *types.GenesisState @@ -29,13 +29,12 @@ func (suite *TypesTestSuite) TestValidateGenesis() { } for _, tc := range testCases { - tc := tc err := tc.genState.Validate() if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.Require().ErrorIs(err, tc.expErr) + s.Require().Error(err) + s.Require().ErrorIs(err, tc.expErr) } } } diff --git a/modules/light-clients/08-wasm/types/msgs.go b/modules/light-clients/08-wasm/types/msgs.go index 8c17b8fdc83..db00e4acc30 100644 --- a/modules/light-clients/08-wasm/types/msgs.go +++ b/modules/light-clients/08-wasm/types/msgs.go @@ -54,11 +54,7 @@ func (m MsgRemoveChecksum) ValidateBasic() error { return errorsmod.Wrapf(ibcerrors.ErrInvalidAddress, "string could not be parsed as address: %v", err) } - if err := ValidateWasmChecksum(m.Checksum); err != nil { - return err - } - - return nil + return ValidateWasmChecksum(m.Checksum) } // MsgMigrateContract creates a new MsgMigrateContract instance diff --git a/modules/light-clients/08-wasm/types/msgs_test.go b/modules/light-clients/08-wasm/types/msgs_test.go index fd84565835d..1da2715752c 100644 --- a/modules/light-clients/08-wasm/types/msgs_test.go +++ b/modules/light-clients/08-wasm/types/msgs_test.go @@ -47,8 +47,6 @@ func TestMsgStoreCodeValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc - err := tc.msg.ValidateBasic() if tc.expErr == nil { require.NoError(t, err) @@ -58,7 +56,7 @@ func TestMsgStoreCodeValidateBasic(t *testing.T) { } } -func (suite *TypesTestSuite) TestMsgStoreCodeGetSigners() { +func (s *TypesTestSuite) TestMsgStoreCodeGetSigners() { testCases := []struct { name string address sdk.AccAddress @@ -69,20 +67,19 @@ func (suite *TypesTestSuite) TestMsgStoreCodeGetSigners() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() address := tc.address msg := types.NewMsgStoreCode(address.String(), wasmtesting.Code) - signers, _, err := GetSimApp(suite.chainA).AppCodec().GetMsgV1Signers(msg) + signers, _, err := GetSimApp(s.chainA).AppCodec().GetMsgV1Signers(msg) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(address.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(address.Bytes(), signers[0]) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } @@ -147,8 +144,6 @@ func TestMsgMigrateContractValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc - err := tc.msg.ValidateBasic() if tc.expErr == nil { require.NoError(t, err) @@ -158,9 +153,9 @@ func TestMsgMigrateContractValidateBasic(t *testing.T) { } } -func (suite *TypesTestSuite) TestMsgMigrateContractGetSigners() { +func (s *TypesTestSuite) TestMsgMigrateContractGetSigners() { checksum, err := types.CreateChecksum(wasmtesting.Code) - suite.Require().NoError(err) + s.Require().NoError(err) testCases := []struct { name string @@ -172,20 +167,19 @@ func (suite *TypesTestSuite) TestMsgMigrateContractGetSigners() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() address := tc.address msg := types.NewMsgMigrateContract(address.String(), defaultWasmClientID, checksum, []byte("{}")) - signers, _, err := GetSimApp(suite.chainA).AppCodec().GetMsgV1Signers(msg) + signers, _, err := GetSimApp(s.chainA).AppCodec().GetMsgV1Signers(msg) if tc.expErr == nil { - suite.Require().NoError(err) - suite.Require().Equal(address.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(address.Bytes(), signers[0]) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expErr.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expErr.Error()) } }) } @@ -224,8 +218,6 @@ func TestMsgRemoveChecksumValidateBasic(t *testing.T) { } for _, tc := range testCases { - tc := tc - err := tc.msg.ValidateBasic() if tc.expErr == nil { @@ -236,9 +228,9 @@ func TestMsgRemoveChecksumValidateBasic(t *testing.T) { } } -func (suite *TypesTestSuite) TestMsgRemoveChecksumGetSigners() { +func (s *TypesTestSuite) TestMsgRemoveChecksumGetSigners() { checksum, err := types.CreateChecksum(wasmtesting.Code) - suite.Require().NoError(err) + s.Require().NoError(err) testCases := []struct { name string @@ -250,20 +242,19 @@ func (suite *TypesTestSuite) TestMsgRemoveChecksumGetSigners() { } for _, tc := range testCases { - tc := tc - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() address := tc.address msg := types.NewMsgRemoveChecksum(address.String(), checksum) - signers, _, err := GetSimApp(suite.chainA).AppCodec().GetMsgV1Signers(msg) + signers, _, err := GetSimApp(s.chainA).AppCodec().GetMsgV1Signers(msg) if tc.expError == nil { - suite.Require().NoError(err) - suite.Require().Equal(address.Bytes(), signers[0]) + s.Require().NoError(err) + s.Require().Equal(address.Bytes(), signers[0]) } else { - suite.Require().Error(err) - suite.Require().Equal(err.Error(), tc.expError.Error()) + s.Require().Error(err) + s.Require().Equal(err.Error(), tc.expError.Error()) } }) } diff --git a/modules/light-clients/08-wasm/types/query.pb.go b/modules/light-clients/08-wasm/types/query.pb.go index bf3006ed6e5..ea740234715 100644 --- a/modules/light-clients/08-wasm/types/query.pb.go +++ b/modules/light-clients/08-wasm/types/query.pb.go @@ -367,6 +367,7 @@ func _Query_Code_Handler(srv interface{}, ctx context.Context, dec func(interfac return interceptor(ctx, in, info, handler) } +var Query_serviceDesc = _Query_serviceDesc var _Query_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.lightclients.wasm.v1.Query", HandlerType: (*QueryServer)(nil), diff --git a/modules/light-clients/08-wasm/types/tx.pb.go b/modules/light-clients/08-wasm/types/tx.pb.go index 247fc859fd4..9f2ff386bf7 100644 --- a/modules/light-clients/08-wasm/types/tx.pb.go +++ b/modules/light-clients/08-wasm/types/tx.pb.go @@ -511,6 +511,7 @@ func _Msg_MigrateContract_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +var Msg_serviceDesc = _Msg_serviceDesc var _Msg_serviceDesc = grpc.ServiceDesc{ ServiceName: "ibc.lightclients.wasm.v1.Msg", HandlerType: (*MsgServer)(nil), diff --git a/modules/light-clients/08-wasm/types/types_test.go b/modules/light-clients/08-wasm/types/types_test.go index b73ca57f190..9c0a42799ff 100644 --- a/modules/light-clients/08-wasm/types/types_test.go +++ b/modules/light-clients/08-wasm/types/types_test.go @@ -31,9 +31,9 @@ func TestWasmTestSuite(t *testing.T) { testifysuite.Run(t, new(TypesTestSuite)) } -func (suite *TypesTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCustomAppCoordinator(suite.T(), 1, setupTestingApp) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +func (s *TypesTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCustomAppCoordinator(s.T(), 1, setupTestingApp) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) } // GetSimApp returns the duplicated SimApp from within the 08-wasm directory. diff --git a/modules/light-clients/08-wasm/types/utils.go b/modules/light-clients/08-wasm/types/utils.go index 88d333490e7..049d99c1735 100644 --- a/modules/light-clients/08-wasm/types/utils.go +++ b/modules/light-clients/08-wasm/types/utils.go @@ -46,7 +46,7 @@ type limitedReader struct { r *io.LimitedReader } -func (l *limitedReader) Read(p []byte) (n int, err error) { +func (l *limitedReader) Read(p []byte) (int, error) { if l.r.N <= 0 { return 0, ErrWasmCodeTooLarge } diff --git a/modules/light-clients/08-wasm/types/wasm.pb.go b/modules/light-clients/08-wasm/types/wasm.pb.go index ef397f3c6d2..99ed3129eb4 100644 --- a/modules/light-clients/08-wasm/types/wasm.pb.go +++ b/modules/light-clients/08-wasm/types/wasm.pb.go @@ -144,61 +144,10 @@ func (m *ClientMessage) XXX_DiscardUnknown() { var xxx_messageInfo_ClientMessage proto.InternalMessageInfo -// Checksums defines a list of all checksums that are stored -// -// Deprecated: This message is deprecated in favor of storing the checksums -// using a Collections.KeySet. -// -// Deprecated: Do not use. -type Checksums struct { - Checksums [][]byte `protobuf:"bytes,1,rep,name=checksums,proto3" json:"checksums,omitempty"` -} - -func (m *Checksums) Reset() { *m = Checksums{} } -func (m *Checksums) String() string { return proto.CompactTextString(m) } -func (*Checksums) ProtoMessage() {} -func (*Checksums) Descriptor() ([]byte, []int) { - return fileDescriptor_678928ebbdee1807, []int{3} -} -func (m *Checksums) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Checksums) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Checksums.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Checksums) XXX_Merge(src proto.Message) { - xxx_messageInfo_Checksums.Merge(m, src) -} -func (m *Checksums) XXX_Size() int { - return m.Size() -} -func (m *Checksums) XXX_DiscardUnknown() { - xxx_messageInfo_Checksums.DiscardUnknown(m) -} - -var xxx_messageInfo_Checksums proto.InternalMessageInfo - -func (m *Checksums) GetChecksums() [][]byte { - if m != nil { - return m.Checksums - } - return nil -} - func init() { proto.RegisterType((*ClientState)(nil), "ibc.lightclients.wasm.v1.ClientState") proto.RegisterType((*ConsensusState)(nil), "ibc.lightclients.wasm.v1.ConsensusState") proto.RegisterType((*ClientMessage)(nil), "ibc.lightclients.wasm.v1.ClientMessage") - proto.RegisterType((*Checksums)(nil), "ibc.lightclients.wasm.v1.Checksums") } func init() { @@ -206,29 +155,27 @@ func init() { } var fileDescriptor_678928ebbdee1807 = []byte{ - // 341 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xf3, 0x30, - 0x10, 0xc7, 0xe3, 0xb6, 0xfa, 0xf4, 0xd5, 0x6d, 0x19, 0x22, 0x86, 0x28, 0x42, 0x69, 0x55, 0x96, - 0x82, 0x14, 0xbb, 0x85, 0x05, 0x75, 0x42, 0xad, 0x90, 0x58, 0x58, 0xca, 0xd6, 0x05, 0x25, 0xae, - 0x95, 0x58, 0x24, 0x75, 0xd5, 0x73, 0x8a, 0x78, 0x03, 0xc4, 0xc4, 0x23, 0xf0, 0x38, 0x1d, 0x3b, - 0x32, 0x21, 0xd4, 0xbe, 0x08, 0xb2, 0x9d, 0x02, 0x0b, 0x4c, 0xb9, 0xfc, 0xfd, 0xf3, 0xdd, 0x4f, - 0x3e, 0x7c, 0x2c, 0x62, 0x46, 0x33, 0x91, 0xa4, 0x8a, 0x65, 0x82, 0xcf, 0x15, 0xd0, 0x87, 0x08, - 0x72, 0xba, 0x1a, 0x98, 0x2f, 0x59, 0x2c, 0xa5, 0x92, 0xae, 0x27, 0x62, 0x46, 0x7e, 0x42, 0xc4, - 0x1c, 0xae, 0x06, 0xfe, 0x61, 0x22, 0x13, 0x69, 0x20, 0xaa, 0x2b, 0xcb, 0xfb, 0x6d, 0xdd, 0x94, - 0xc9, 0x25, 0xa7, 0x96, 0xd7, 0xed, 0x6c, 0x65, 0x81, 0xee, 0x33, 0xc2, 0x8d, 0xb1, 0x09, 0x6e, - 0x55, 0xa4, 0xb8, 0xeb, 0xe2, 0xda, 0x2c, 0x52, 0x91, 0x87, 0x3a, 0xa8, 0xd7, 0x9c, 0x98, 0xda, - 0xf5, 0xf1, 0x7f, 0x96, 0x72, 0x76, 0x0f, 0x45, 0xee, 0x55, 0x4c, 0xfe, 0xf5, 0xef, 0x5e, 0xe1, - 0x56, 0x16, 0x29, 0x0e, 0xea, 0x2e, 0xe5, 0x5a, 0xcb, 0xab, 0x76, 0x50, 0xaf, 0x71, 0xe6, 0x13, - 0x2d, 0xaa, 0x07, 0x93, 0x72, 0xdc, 0x6a, 0x40, 0xae, 0x0d, 0x31, 0xaa, 0xad, 0xdf, 0xdb, 0xce, - 0xa4, 0x69, 0xaf, 0xd9, 0x6c, 0x58, 0x7b, 0x7a, 0x6d, 0x3b, 0xdd, 0x53, 0x7c, 0x30, 0x96, 0x73, - 0xe0, 0x73, 0x28, 0xe0, 0x57, 0x9d, 0x92, 0x3d, 0xc1, 0x2d, 0xeb, 0x7d, 0xc3, 0x01, 0xa2, 0xe4, - 0x2f, 0x34, 0xc4, 0xf5, 0x71, 0xe9, 0x0b, 0xee, 0x11, 0xae, 0xef, 0xe5, 0xc1, 0x43, 0x9d, 0x6a, - 0xaf, 0x39, 0xf9, 0x0e, 0x86, 0x15, 0x0f, 0x8d, 0xa6, 0xeb, 0x6d, 0x80, 0x36, 0xdb, 0x00, 0x7d, - 0x6c, 0x03, 0xf4, 0xb2, 0x0b, 0x9c, 0xcd, 0x2e, 0x70, 0xde, 0x76, 0x81, 0x33, 0xbd, 0x4c, 0x84, - 0x4a, 0x8b, 0x98, 0x30, 0x99, 0x53, 0x26, 0x21, 0x97, 0x40, 0x45, 0xcc, 0xc2, 0x44, 0xd2, 0x5c, - 0xce, 0x8a, 0x8c, 0x83, 0xdd, 0x5f, 0xb8, 0x5f, 0x60, 0xff, 0x22, 0x2c, 0x77, 0xd8, 0xa7, 0xea, - 0x71, 0xc1, 0x21, 0xfe, 0x67, 0x5e, 0xfd, 0xfc, 0x33, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x8b, 0xe8, - 0xa1, 0xed, 0x01, 0x00, 0x00, + // 318 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x31, 0x4f, 0x3a, 0x31, + 0x18, 0xc6, 0xaf, 0xff, 0x3f, 0x31, 0xa6, 0x80, 0xc3, 0xc5, 0xe1, 0x72, 0x43, 0x21, 0xb8, 0xa0, + 0x09, 0x2d, 0xe8, 0x62, 0x9c, 0x0c, 0xc4, 0xc4, 0xc5, 0x05, 0x37, 0x16, 0xd3, 0x2b, 0x4d, 0xaf, + 0xf1, 0x8e, 0x12, 0xde, 0x1e, 0xc6, 0x6f, 0x60, 0x9c, 0xfc, 0x08, 0x7e, 0x1c, 0x46, 0x46, 0x27, + 0x63, 0xe0, 0x8b, 0x98, 0xb6, 0x68, 0x5c, 0x74, 0xba, 0xe7, 0x9e, 0xfe, 0xfa, 0xbe, 0x4f, 0xfa, + 0xe0, 0x23, 0x9d, 0x09, 0x56, 0x68, 0x95, 0x5b, 0x51, 0x68, 0x39, 0xb3, 0xc0, 0x1e, 0x38, 0x94, + 0x6c, 0x39, 0xf0, 0x5f, 0x3a, 0x5f, 0x18, 0x6b, 0xe2, 0x44, 0x67, 0x82, 0xfe, 0x84, 0xa8, 0x3f, + 0x5c, 0x0e, 0xd2, 0x43, 0x65, 0x94, 0xf1, 0x10, 0x73, 0x2a, 0xf0, 0x69, 0xcb, 0x0d, 0x15, 0x66, + 0x21, 0x59, 0xe0, 0xdd, 0xb8, 0xa0, 0x02, 0xd0, 0x79, 0x46, 0xb8, 0x3e, 0xf2, 0xc6, 0xad, 0xe5, + 0x56, 0xc6, 0x31, 0xae, 0x4d, 0xb9, 0xe5, 0x09, 0x6a, 0xa3, 0x6e, 0x63, 0xec, 0x75, 0x9c, 0xe2, + 0x7d, 0x91, 0x4b, 0x71, 0x0f, 0x55, 0x99, 0xfc, 0xf3, 0xfe, 0xf7, 0x7f, 0x7c, 0x85, 0x9b, 0x05, + 0xb7, 0x12, 0xec, 0x5d, 0x2e, 0x5d, 0xac, 0xe4, 0x7f, 0x1b, 0x75, 0xeb, 0xa7, 0x29, 0x75, 0x41, + 0xdd, 0x62, 0xba, 0x5b, 0xb7, 0x1c, 0xd0, 0x6b, 0x4f, 0x0c, 0x6b, 0xab, 0xf7, 0x56, 0x34, 0x6e, + 0x84, 0x6b, 0xc1, 0xbb, 0xa8, 0x3d, 0xbd, 0xb6, 0xa2, 0xce, 0x09, 0x3e, 0x18, 0x99, 0x19, 0xc8, + 0x19, 0x54, 0xf0, 0x6b, 0x9c, 0x1d, 0x7b, 0x8c, 0x9b, 0x21, 0xf7, 0x8d, 0x04, 0xe0, 0xea, 0x0f, + 0x74, 0x38, 0x59, 0x6d, 0x08, 0x5a, 0x6f, 0x08, 0xfa, 0xd8, 0x10, 0xf4, 0xb2, 0x25, 0xd1, 0x7a, + 0x4b, 0xa2, 0xb7, 0x2d, 0x89, 0x26, 0x97, 0x4a, 0xdb, 0xbc, 0xca, 0xa8, 0x30, 0x25, 0x13, 0x06, + 0x4a, 0x03, 0x4c, 0x67, 0xa2, 0xa7, 0x0c, 0x2b, 0xcd, 0xb4, 0x2a, 0x24, 0x84, 0x42, 0x7a, 0x5f, + 0x8d, 0xf4, 0xcf, 0x7b, 0xbb, 0x52, 0xfa, 0xcc, 0x3e, 0xce, 0x25, 0x64, 0x7b, 0xfe, 0x19, 0xcf, + 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x31, 0xe5, 0x31, 0x34, 0xbe, 0x01, 0x00, 0x00, } func (m *ClientState) Marshal() (dAtA []byte, err error) { @@ -338,38 +285,6 @@ func (m *ClientMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Checksums) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Checksums) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Checksums) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Checksums) > 0 { - for iNdEx := len(m.Checksums) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Checksums[iNdEx]) - copy(dAtA[i:], m.Checksums[iNdEx]) - i = encodeVarintWasm(dAtA, i, uint64(len(m.Checksums[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func encodeVarintWasm(dAtA []byte, offset int, v uint64) int { offset -= sovWasm(v) base := offset @@ -426,21 +341,6 @@ func (m *ClientMessage) Size() (n int) { return n } -func (m *Checksums) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Checksums) > 0 { - for _, b := range m.Checksums { - l = len(b) - n += 1 + l + sovWasm(uint64(l)) - } - } - return n -} - func sovWasm(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -766,88 +666,6 @@ func (m *ClientMessage) Unmarshal(dAtA []byte) error { } return nil } -func (m *Checksums) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWasm - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Checksums: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Checksums: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checksums", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWasm - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthWasm - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthWasm - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checksums = append(m.Checksums, make([]byte, postIndex-iNdEx)) - copy(m.Checksums[len(m.Checksums)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWasm(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthWasm - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipWasm(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/modules/light-clients/08-wasm/wasm_test.go b/modules/light-clients/08-wasm/wasm_test.go index 6365f0272e1..9076aab5aec 100644 --- a/modules/light-clients/08-wasm/wasm_test.go +++ b/modules/light-clients/08-wasm/wasm_test.go @@ -36,13 +36,13 @@ type WasmTestSuite struct { checksum types.Checksum } -func TestWasmTestSuite(t *testing.T) { +func TestWasmTests(t *testing.T) { testifysuite.Run(t, new(WasmTestSuite)) } -func (suite *WasmTestSuite) SetupTest() { - suite.coordinator = ibctesting.NewCustomAppCoordinator(suite.T(), 1, setupTestingApp) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +func (s *WasmTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCustomAppCoordinator(s.T(), 1, setupTestingApp) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) } // GetSimApp returns the duplicated SimApp from within the 08-wasm directory. @@ -63,56 +63,56 @@ func setupTestingApp() (ibctesting.TestingApp, map[string]json.RawMessage) { } // SetupWasmWithMockVM sets up mock cometbft chain with a mock vm. -func (suite *WasmTestSuite) SetupWasmWithMockVM() { - suite.coordinator = ibctesting.NewCustomAppCoordinator(suite.T(), 1, suite.setupWasmWithMockVM) - suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) - suite.checksum = storeWasmCode(suite, wasmtesting.Code) +func (s *WasmTestSuite) SetupWasmWithMockVM() { + s.coordinator = ibctesting.NewCustomAppCoordinator(s.T(), 1, s.setupWasmWithMockVM) + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.checksum = storeWasmCode(s, wasmtesting.Code) } -func (suite *WasmTestSuite) setupWasmWithMockVM() (ibctesting.TestingApp, map[string]json.RawMessage) { - suite.mockVM = wasmtesting.NewMockWasmEngine() +func (s *WasmTestSuite) setupWasmWithMockVM() (ibctesting.TestingApp, map[string]json.RawMessage) { + s.mockVM = wasmtesting.NewMockWasmEngine() - suite.mockVM.InstantiateFn = func(checksum wasmvm.Checksum, env wasmvmtypes.Env, info wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { + s.mockVM.InstantiateFn = func(checksum wasmvm.Checksum, env wasmvmtypes.Env, info wasmvmtypes.MessageInfo, initMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.ContractResult, uint64, error) { var payload types.InstantiateMessage err := json.Unmarshal(initMsg, &payload) - suite.Require().NoError(err) + s.Require().NoError(err) - wrappedClientState, ok := clienttypes.MustUnmarshalClientState(suite.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) - suite.Require().True(ok) + wrappedClientState, ok := clienttypes.MustUnmarshalClientState(s.chainA.App.AppCodec(), payload.ClientState).(*ibctm.ClientState) + s.Require().True(ok) clientState := types.NewClientState(payload.ClientState, payload.Checksum, wrappedClientState.LatestHeight) - clientStateBz := clienttypes.MustMarshalClientState(suite.chainA.App.AppCodec(), clientState) + clientStateBz := clienttypes.MustMarshalClientState(s.chainA.App.AppCodec(), clientState) store.Set(host.ClientStateKey(), clientStateBz) consensusState := types.NewConsensusState(payload.ConsensusState) - consensusStateBz := clienttypes.MustMarshalConsensusState(suite.chainA.App.AppCodec(), consensusState) + consensusStateBz := clienttypes.MustMarshalConsensusState(s.chainA.App.AppCodec(), consensusState) store.Set(host.ConsensusStateKey(clientState.LatestHeight), consensusStateBz) resp, err := json.Marshal(types.EmptyResult{}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.ContractResult{Ok: &wasmvmtypes.Response{Data: resp}}, 0, nil } - suite.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { + s.mockVM.RegisterQueryCallback(types.StatusMsg{}, func(checksum wasmvm.Checksum, env wasmvmtypes.Env, queryMsg []byte, store wasmvm.KVStore, goapi wasmvm.GoAPI, querier wasmvm.Querier, gasMeter wasmvm.GasMeter, gasLimit uint64, deserCost wasmvmtypes.UFraction) (*wasmvmtypes.QueryResult, uint64, error) { resp, err := json.Marshal(types.StatusResult{Status: exported.Active.String()}) - suite.Require().NoError(err) + s.Require().NoError(err) return &wasmvmtypes.QueryResult{Ok: resp}, wasmtesting.DefaultGasUsed, nil }) db := dbm.NewMemDB() - app := simapp.NewUnitTestSimApp(log.NewNopLogger(), db, nil, true, simtestutil.EmptyAppOptions{}, suite.mockVM) + app := simapp.NewUnitTestSimApp(log.NewNopLogger(), db, nil, true, simtestutil.EmptyAppOptions{}, s.mockVM) return app, app.DefaultGenesis() } // storeWasmCode stores the wasm code on chain and returns the checksum. -func storeWasmCode(suite *WasmTestSuite, wasmCode []byte) types.Checksum { - ctx := suite.chainA.GetContext().WithBlockGasMeter(storetypes.NewInfiniteGasMeter()) +func storeWasmCode(s *WasmTestSuite, wasmCode []byte) types.Checksum { + ctx := s.chainA.GetContext().WithBlockGasMeter(storetypes.NewInfiniteGasMeter()) msg := types.NewMsgStoreCode(authtypes.NewModuleAddress(govtypes.ModuleName).String(), wasmCode) - response, err := GetSimApp(suite.chainA).WasmClientKeeper.StoreCode(ctx, msg) - suite.Require().NoError(err) - suite.Require().NotNil(response.Checksum) + response, err := GetSimApp(s.chainA).WasmClientKeeper.StoreCode(ctx, msg) + s.Require().NoError(err) + s.Require().NotNil(response.Checksum) return response.Checksum } diff --git a/modules/light-clients/09-localhost/light_client_module_test.go b/modules/light-clients/09-localhost/light_client_module_test.go index fb5b814ce6b..164e7319cf7 100644 --- a/modules/light-clients/09-localhost/light_client_module_test.go +++ b/modules/light-clients/09-localhost/light_client_module_test.go @@ -26,49 +26,49 @@ type LocalhostTestSuite struct { chain *ibctesting.TestChain } -func (suite *LocalhostTestSuite) SetupTest() { - suite.coordinator = *ibctesting.NewCoordinator(suite.T(), 1) - suite.chain = suite.coordinator.GetChain(ibctesting.GetChainID(1)) +func (s *LocalhostTestSuite) SetupTest() { + s.coordinator = *ibctesting.NewCoordinator(s.T(), 1) + s.chain = s.coordinator.GetChain(ibctesting.GetChainID(1)) } func TestLocalhostTestSuite(t *testing.T) { testifysuite.Run(t, new(LocalhostTestSuite)) } -func (suite *LocalhostTestSuite) TestInitialize() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestInitialize() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - err = lightClientModule.Initialize(suite.chain.GetContext(), exported.LocalhostClientID, nil, nil) - suite.Require().Error(err) + err = lightClientModule.Initialize(s.chain.GetContext(), exported.LocalhostClientID, nil, nil) + s.Require().Error(err) } -func (suite *LocalhostTestSuite) TestVerifyClientMessage() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestVerifyClientMessage() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - err = lightClientModule.VerifyClientMessage(suite.chain.GetContext(), exported.LocalhostClientID, nil) - suite.Require().Error(err) + err = lightClientModule.VerifyClientMessage(s.chain.GetContext(), exported.LocalhostClientID, nil) + s.Require().Error(err) } -func (suite *LocalhostTestSuite) TestVerifyCheckForMisbehaviour() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestVerifyCheckForMisbehaviour() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - suite.Require().False(lightClientModule.CheckForMisbehaviour(suite.chain.GetContext(), exported.LocalhostClientID, nil)) + s.Require().False(lightClientModule.CheckForMisbehaviour(s.chain.GetContext(), exported.LocalhostClientID, nil)) } -func (suite *LocalhostTestSuite) TestUpdateState() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestUpdateState() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - heights := lightClientModule.UpdateState(suite.chain.GetContext(), exported.LocalhostClientID, nil) + heights := lightClientModule.UpdateState(s.chain.GetContext(), exported.LocalhostClientID, nil) - expHeight := clienttypes.NewHeight(1, uint64(suite.chain.GetContext().BlockHeight())) - suite.Require().True(heights[0].EQ(expHeight)) + expHeight := clienttypes.NewHeight(1, uint64(s.chain.GetContext().BlockHeight())) + s.Require().True(heights[0].EQ(expHeight)) } -func (suite *LocalhostTestSuite) TestVerifyMembership() { +func (s *LocalhostTestSuite) TestVerifyMembership() { var ( path exported.Path value []byte @@ -85,18 +85,18 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { connectionEnd := connectiontypes.NewConnectionEnd( connectiontypes.OPEN, exported.LocalhostClientID, - connectiontypes.NewCounterparty(exported.LocalhostClientID, exported.LocalhostConnectionID, suite.chain.GetPrefix()), + connectiontypes.NewCounterparty(exported.LocalhostClientID, exported.LocalhostConnectionID, s.chain.GetPrefix()), connectiontypes.GetCompatibleVersions(), 0, ) - suite.chain.GetSimApp().GetIBCKeeper().ConnectionKeeper.SetConnection(suite.chain.GetContext(), exported.LocalhostConnectionID, connectionEnd) + s.chain.GetSimApp().GetIBCKeeper().ConnectionKeeper.SetConnection(s.chain.GetContext(), exported.LocalhostConnectionID, connectionEnd) merklePath := commitmenttypes.NewMerklePath(host.ConnectionKey(exported.LocalhostConnectionID)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath - value = suite.chain.Codec.MustMarshal(&connectionEnd) + value = s.chain.Codec.MustMarshal(&connectionEnd) }, nil, }, @@ -111,14 +111,14 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { mock.Version, ) - suite.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, channel) + s.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, channel) merklePath := commitmenttypes.NewMerklePath(host.ChannelKey(mock.PortID, ibctesting.FirstChannelID)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath - value = suite.chain.Codec.MustMarshal(&channel) + value = s.chain.Codec.MustMarshal(&channel) }, nil, }, @@ -126,11 +126,11 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { "success: next sequence recv verification", func() { nextSeqRecv := uint64(100) - suite.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(suite.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, nextSeqRecv) + s.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetNextSequenceRecv(s.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, nextSeqRecv) merklePath := commitmenttypes.NewMerklePath(host.NextSequenceRecvKey(mock.PortID, ibctesting.FirstChannelID)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath value = sdk.Uint64ToBigEndian(nextSeqRecv) @@ -152,11 +152,11 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { ) commitmentBz := channeltypes.CommitPacket(packet) - suite.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetPacketCommitment(suite.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, 1, commitmentBz) + s.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetPacketCommitment(s.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, 1, commitmentBz) merklePath := commitmenttypes.NewMerklePath(host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath value = commitmentBz @@ -166,11 +166,11 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { { "success: packet acknowledgement verification", func() { - suite.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(suite.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, 1, ibctesting.MockAcknowledgement) + s.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetPacketAcknowledgement(s.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, 1, ibctesting.MockAcknowledgement) merklePath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementKey(mock.PortID, ibctesting.FirstChannelID, 1)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath value = ibctesting.MockAcknowledgement @@ -195,8 +195,8 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { "failure: no value found at provided key path", func() { merklePath := commitmenttypes.NewMerklePath(host.PacketAcknowledgementKey(mock.PortID, ibctesting.FirstChannelID, 100)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath value = ibctesting.MockAcknowledgement @@ -214,33 +214,33 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { mock.Version, ) - suite.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(suite.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, channel) + s.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetChannel(s.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, channel) merklePath := commitmenttypes.NewMerklePath(host.ChannelKey(mock.PortID, ibctesting.FirstChannelID)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath // modify the channel before marshalling to value bz channel.State = channeltypes.CLOSED - value = suite.chain.Codec.MustMarshal(&channel) + value = s.chain.Codec.MustMarshal(&channel) }, errors.New("value provided does not equal value stored at path"), }, } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) err = lightClientModule.VerifyMembership( - suite.chain.GetContext(), + s.chain.GetContext(), exported.LocalhostClientID, clienttypes.ZeroHeight(), 0, 0, // use zero values for delay periods @@ -250,16 +250,16 @@ func (suite *LocalhostTestSuite) TestVerifyMembership() { ) if tc.expErr == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.ErrorContains(err, tc.expErr.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expErr.Error()) } }) } } -func (suite *LocalhostTestSuite) TestVerifyNonMembership() { +func (s *LocalhostTestSuite) TestVerifyNonMembership() { var path exported.Path testCases := []struct { @@ -271,8 +271,8 @@ func (suite *LocalhostTestSuite) TestVerifyNonMembership() { "success: packet receipt absence verification", func() { merklePath := commitmenttypes.NewMerklePath(host.PacketReceiptKey(mock.PortID, ibctesting.FirstChannelID, 1)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath }, @@ -281,11 +281,11 @@ func (suite *LocalhostTestSuite) TestVerifyNonMembership() { { "packet receipt absence verification fails", func() { - suite.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetPacketReceipt(suite.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, 1) + s.chain.GetSimApp().GetIBCKeeper().ChannelKeeper.SetPacketReceipt(s.chain.GetContext(), mock.PortID, ibctesting.FirstChannelID, 1) merklePath := commitmenttypes.NewMerklePath(host.PacketReceiptKey(mock.PortID, ibctesting.FirstChannelID, 1)) - merklePath, err := commitmenttypes.ApplyPrefix(suite.chain.GetPrefix(), merklePath) - suite.Require().NoError(err) + merklePath, err := commitmenttypes.ApplyPrefix(s.chain.GetPrefix(), merklePath) + s.Require().NoError(err) path = merklePath }, @@ -308,16 +308,16 @@ func (suite *LocalhostTestSuite) TestVerifyNonMembership() { } for _, tc := range testCases { - suite.Run(tc.name, func() { - suite.SetupTest() + s.Run(tc.name, func() { + s.SetupTest() tc.malleate() - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) err = lightClientModule.VerifyNonMembership( - suite.chain.GetContext(), + s.chain.GetContext(), exported.LocalhostClientID, clienttypes.ZeroHeight(), 0, 0, // use zero values for delay periods @@ -326,44 +326,44 @@ func (suite *LocalhostTestSuite) TestVerifyNonMembership() { ) if tc.expError == nil { - suite.Require().NoError(err) + s.Require().NoError(err) } else { - suite.Require().Error(err) - suite.ErrorContains(err, tc.expError.Error()) + s.Require().Error(err) + s.Require().ErrorContains(err, tc.expError.Error()) } }) } } -func (suite *LocalhostTestSuite) TestStatus() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestStatus() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - suite.Require().Equal(exported.Active, lightClientModule.Status(suite.chain.GetContext(), exported.LocalhostClientID)) + s.Require().Equal(exported.Active, lightClientModule.Status(s.chain.GetContext(), exported.LocalhostClientID)) } -func (suite *LocalhostTestSuite) TestGetTimestampAtHeight() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestGetTimestampAtHeight() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - ctx := suite.chain.GetContext() + ctx := s.chain.GetContext() timestamp, err := lightClientModule.TimestampAtHeight(ctx, exported.LocalhostClientID, nil) - suite.Require().NoError(err) - suite.Require().Equal(uint64(ctx.BlockTime().UnixNano()), timestamp) + s.Require().NoError(err) + s.Require().Equal(uint64(ctx.BlockTime().UnixNano()), timestamp) } -func (suite *LocalhostTestSuite) TestRecoverClient() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestRecoverClient() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - err = lightClientModule.RecoverClient(suite.chain.GetContext(), exported.LocalhostClientID, exported.LocalhostClientID) - suite.Require().Error(err) + err = lightClientModule.RecoverClient(s.chain.GetContext(), exported.LocalhostClientID, exported.LocalhostClientID) + s.Require().Error(err) } -func (suite *LocalhostTestSuite) TestVerifyUpgradeAndUpdateState() { - lightClientModule, err := suite.chain.App.GetIBCKeeper().ClientKeeper.Route(suite.chain.GetContext(), exported.LocalhostClientID) - suite.Require().NoError(err) +func (s *LocalhostTestSuite) TestVerifyUpgradeAndUpdateState() { + lightClientModule, err := s.chain.App.GetIBCKeeper().ClientKeeper.Route(s.chain.GetContext(), exported.LocalhostClientID) + s.Require().NoError(err) - err = lightClientModule.VerifyUpgradeAndUpdateState(suite.chain.GetContext(), exported.LocalhostClientID, nil, nil, nil, nil) - suite.Require().Error(err) + err = lightClientModule.VerifyUpgradeAndUpdateState(s.chain.GetContext(), exported.LocalhostClientID, nil, nil, nil, nil) + s.Require().Error(err) } diff --git a/proto/buf.yaml b/proto/buf.yaml index 68d17ed2f07..450635e095c 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -11,7 +11,7 @@ breaking: - FILE lint: use: - - DEFAULT + - STANDARD - COMMENTS - FILE_LOWER_SNAKE_CASE except: diff --git a/proto/ibc/applications/packet_forward_middleware/v1/genesis.proto b/proto/ibc/applications/packet_forward_middleware/v1/genesis.proto new file mode 100644 index 00000000000..57af0b1a9d3 --- /dev/null +++ b/proto/ibc/applications/packet_forward_middleware/v1/genesis.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package ibc.applications.packet_forward_middleware.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types"; + +// GenesisState defines the packetforward genesis state +message GenesisState { + // key - information about forwarded packet: src_channel + // (parsedReceiver.Channel), src_port (parsedReceiver.Port), sequence value - + // information about original packet for refunding if necessary: retries, + // srcPacketSender, srcPacket.DestinationChannel, srcPacket.DestinationPort + map in_flight_packets = 2 + [(gogoproto.moretags) = "yaml:\"in_flight_packets\"", (gogoproto.nullable) = false]; +} + +// InFlightPacket contains information about original packet for +// writing the acknowledgement and refunding if necessary. +message InFlightPacket { + string original_sender_address = 1; + string refund_channel_id = 2; + string refund_port_id = 3; + string packet_src_channel_id = 4; + string packet_src_port_id = 5; + uint64 packet_timeout_timestamp = 6; + string packet_timeout_height = 7; + bytes packet_data = 8; + uint64 refund_sequence = 9; + int32 retries_remaining = 10; + uint64 timeout = 11; + bool nonrefundable = 12; +} diff --git a/proto/ibc/applications/rate_limiting/v1/genesis.proto b/proto/ibc/applications/rate_limiting/v1/genesis.proto new file mode 100644 index 00000000000..6e734b4d4c1 --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/genesis.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "gogoproto/gogo.proto"; +import "ibc/applications/rate_limiting/v1/rate_limiting.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// GenesisState defines the ratelimit module's genesis state. +message GenesisState { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; + repeated WhitelistedAddressPair whitelisted_address_pairs = 2 [(gogoproto.nullable) = false]; + repeated string blacklisted_denoms = 3; + repeated string pending_send_packet_sequence_numbers = 4; + HourEpoch hour_epoch = 5 [(gogoproto.nullable) = false]; +} diff --git a/proto/ibc/applications/rate_limiting/v1/query.proto b/proto/ibc/applications/rate_limiting/v1/query.proto new file mode 100644 index 00000000000..6d9b594159d --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/query.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "ibc/applications/rate_limiting/v1/rate_limiting.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// Query defines the gRPC querier service. +service Query { + // Queries all rate limits + rpc AllRateLimits(QueryAllRateLimitsRequest) returns (QueryAllRateLimitsResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimits"; + } + + // Queries a specific rate limit by channel ID and denom + // Ex: + // - /ratelimit/{channel_or_client_id}/by_denom?denom={denom} + rpc RateLimit(QueryRateLimitRequest) returns (QueryRateLimitResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/" + "ratelimit/{channel_or_client_id}/by_denom"; + } + + // Queries all the rate limits for a given chain + rpc RateLimitsByChainID(QueryRateLimitsByChainIDRequest) returns (QueryRateLimitsByChainIDResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/ratelimits/{chain_id}"; + } + + // Queries all the rate limits for a given channel ID + rpc RateLimitsByChannelOrClientID(QueryRateLimitsByChannelOrClientIDRequest) + returns (QueryRateLimitsByChannelOrClientIDResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/ratelimits/{channel_or_client_id}"; + } + + // Queries all blacklisted denoms + rpc AllBlacklistedDenoms(QueryAllBlacklistedDenomsRequest) returns (QueryAllBlacklistedDenomsResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/blacklisted_denoms"; + } + + // Queries all whitelisted address pairs + rpc AllWhitelistedAddresses(QueryAllWhitelistedAddressesRequest) returns (QueryAllWhitelistedAddressesResponse) { + option (google.api.http).get = "/ibc/apps/rate-limiting/v1/ratelimit/whitelisted_addresses"; + } +} + +// Queries all rate limits +message QueryAllRateLimitsRequest {} + +// QueryAllRateLimitsResponse returns all the rate limits stored on the chain. +message QueryAllRateLimitsResponse { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; +} + +// Queries a specific rate limit by channel ID and denom +message QueryRateLimitRequest { + string denom = 1; + string channel_or_client_id = 2; +} + +// QueryRateLimitResponse returns a rate limit by denom and channel_or_client_id combination. +message QueryRateLimitResponse { + RateLimit rate_limit = 1; +} + +// Queries all the rate limits for a given chain +message QueryRateLimitsByChainIDRequest { + string chain_id = 1; +} + +// QueryRateLimitsByChainIDResponse returns all rate-limits by a chain. +message QueryRateLimitsByChainIDResponse { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; +} + +// Queries all the rate limits for a given channel or client ID +message QueryRateLimitsByChannelOrClientIDRequest { + string channel_or_client_id = 1; +} + +// QueryRateLimitsByChannelOrClientIDResponse returns all rate-limits by a channel or client id. +message QueryRateLimitsByChannelOrClientIDResponse { + repeated RateLimit rate_limits = 1 [(gogoproto.nullable) = false]; +} + +// Queries all blacklisted denoms +message QueryAllBlacklistedDenomsRequest {} + +// QueryAllBlacklistedDenomsResponse returns all the blacklisted denosm. +message QueryAllBlacklistedDenomsResponse { + repeated string denoms = 1; +} + +// Queries all whitelisted address pairs +message QueryAllWhitelistedAddressesRequest {} + +// QueryAllWhitelistedAddressesResponse returns all whitelisted pairs. +message QueryAllWhitelistedAddressesResponse { + repeated WhitelistedAddressPair address_pairs = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/ibc/applications/rate_limiting/v1/rate_limiting.proto b/proto/ibc/applications/rate_limiting/v1/rate_limiting.proto new file mode 100644 index 00000000000..81fb84e5e3e --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/rate_limiting.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// PacketDirection defines whether the transfer packet is being sent from +// this chain or is being received on this chain +enum PacketDirection { + option (gogoproto.goproto_enum_prefix) = false; + + PACKET_SEND = 0; + PACKET_RECV = 1; +} + +// Path holds the denom and channelID that define the rate limited route +message Path { + string denom = 1; + string channel_or_client_id = 2; +} + +// Quota defines the rate limit thresholds for transfer packets +message Quota { + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_send = 1 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_recv = 2 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + uint64 duration_hours = 3; +} + +// Flow tracks all the inflows and outflows of a channel. +message Flow { + // Inflow defines the total amount of inbound transfers for the given + // rate limit in the current window + string inflow = 1 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // Outflow defines the total amount of outbound transfers for the given + // rate limit in the current window + string outflow = 2 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // ChannelValue stores the total supply of the denom at the start of + // the rate limit. This is used as the denominator when checking + // the rate limit threshold + // The ChannelValue is fixed for the duration of the rate limit window + string channel_value = 3 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; +} + +// RateLimit stores all the context about a given rate limit, including +// the relevant denom and channel, rate limit thresholds, and current +// progress towards the limits +message RateLimit { + Path path = 1; + Quota quota = 2; + Flow flow = 3; +} + +// WhitelistedAddressPair represents a sender-receiver combo that is +// not subject to rate limit restrictions +message WhitelistedAddressPair { + string sender = 1; + string receiver = 2; +} + +// HourEpoch is the epoch type. +message HourEpoch { + uint64 epoch_number = 1; + google.protobuf.Duration duration = 2 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true, (gogoproto.jsontag) = "duration,omitempty"]; + google.protobuf.Timestamp epoch_start_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + int64 epoch_start_height = 4; +} diff --git a/proto/ibc/applications/rate_limiting/v1/tx.proto b/proto/ibc/applications/rate_limiting/v1/tx.proto new file mode 100644 index 00000000000..9c5b6c25deb --- /dev/null +++ b/proto/ibc/applications/rate_limiting/v1/tx.proto @@ -0,0 +1,109 @@ +syntax = "proto3"; +package ibc.applications.rate_limiting.v1; + +import "amino/amino.proto"; +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types"; + +// Msg service for rate limit txs +service Msg { + option (cosmos.msg.v1.service) = true; + + // Gov tx to add a new rate limit + rpc AddRateLimit(MsgAddRateLimit) returns (MsgAddRateLimitResponse); + // Gov tx to update an existing rate limit + rpc UpdateRateLimit(MsgUpdateRateLimit) returns (MsgUpdateRateLimitResponse); + // Gov tx to remove a rate limit + rpc RemoveRateLimit(MsgRemoveRateLimit) returns (MsgRemoveRateLimitResponse); + // Gov tx to reset the flow on a rate limit + rpc ResetRateLimit(MsgResetRateLimit) returns (MsgResetRateLimitResponse); +} + +// Gov tx to add a new rate limit +message MsgAddRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgAddRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_send = 4 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_recv = 5 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + uint64 duration_hours = 6; +} + +// MsgAddRateLimitResponse is the return type for AddRateLimit function. +message MsgAddRateLimitResponse {} + +// Gov tx to update an existing rate limit +message MsgUpdateRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgUpdateRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; + // MaxPercentSend defines the threshold for outflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_send = 4 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // MaxPercentSend defines the threshold for inflows + // The threshold is defined as a percentage (e.g. 10 indicates 10%) + string max_percent_recv = 5 [(gogoproto.customtype) = "cosmossdk.io/math.Int", (gogoproto.nullable) = false]; + // DurationHours specifies the number of hours before the rate limit + // is reset (e.g. 24 indicates that the rate limit is reset each day) + uint64 duration_hours = 6; +} + +// MsgUpdateRateLimitResponse is the return type for UpdateRateLimit. +message MsgUpdateRateLimitResponse {} + +// Gov tx to remove a rate limit +message MsgRemoveRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgRemoveRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; +} + +// MsgRemoveRateLimitResponse is the response type for RemoveRateLimit +message MsgRemoveRateLimitResponse {} + +// Gov tx to reset the flow on a rate limit +message MsgResetRateLimit { + option (cosmos.msg.v1.signer) = "signer"; + option (amino.name) = "ratelimit/MsgResetRateLimit"; + + // signer defines the x/gov module account address or other authority signing the message + string signer = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // Denom for the rate limit, as it appears on the rate limited chain + // When rate limiting a non-native token, this will be an ibc denom + string denom = 2; + // ChannelId for the rate limit, on the side of the rate limited chain + string channel_or_client_id = 3; +} + +// MsgResetRateLimitResponse is the response type for ResetRateLimit. +message MsgResetRateLimitResponse {} diff --git a/proto/ibc/applications/transfer/v1/token.proto b/proto/ibc/applications/transfer/v1/token.proto index 7a4dd889df5..cf8ac485b80 100644 --- a/proto/ibc/applications/transfer/v1/token.proto +++ b/proto/ibc/applications/transfer/v1/token.proto @@ -2,10 +2,10 @@ syntax = "proto3"; package ibc.applications.transfer.v1; -option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types"; - import "gogoproto/gogo.proto"; +option go_package = "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types"; + // Token defines a struct which represents a token to be transferred. message Token { // the token denomination diff --git a/proto/ibc/applications/transfer/v1/tx.proto b/proto/ibc/applications/transfer/v1/tx.proto index e1a6a1f5e8b..8250bb48373 100644 --- a/proto/ibc/applications/transfer/v1/tx.proto +++ b/proto/ibc/applications/transfer/v1/tx.proto @@ -53,6 +53,13 @@ message MsgTransfer { string memo = 8; // optional encoding string encoding = 9; + // boolean flag to indicate if the transfer message + // is sent with the IBC v2 protocol but uses v1 channel identifiers. + // In this case, the v1 channel identifiers function as aliases to the + // underlying client ids. + // This only needs to be set if the channel IDs + // are V1 channel identifiers. + bool use_aliasing = 10 [(amino.dont_omitempty) = true]; } // MsgTransferResponse defines the Msg/Transfer response type. diff --git a/proto/ibc/core/client/v1/tx.proto b/proto/ibc/core/client/v1/tx.proto index dc1a38419a4..0cebe2bfbbd 100644 --- a/proto/ibc/core/client/v1/tx.proto +++ b/proto/ibc/core/client/v1/tx.proto @@ -4,6 +4,7 @@ package ibc.core.client.v1; option go_package = "github.com/cosmos/ibc-go/v10/modules/core/02-client/types"; +import "amino/amino.proto"; import "cosmos/msg/v1/msg.proto"; import "cosmos/upgrade/v1beta1/upgrade.proto"; import "gogoproto/gogo.proto"; @@ -23,9 +24,6 @@ service Msg { // UpgradeClient defines a rpc handler method for MsgUpgradeClient. rpc UpgradeClient(MsgUpgradeClient) returns (MsgUpgradeClientResponse); - // SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. - rpc SubmitMisbehaviour(MsgSubmitMisbehaviour) returns (MsgSubmitMisbehaviourResponse); - // RecoverClient defines a rpc handler method for MsgRecoverClient. rpc RecoverClient(MsgRecoverClient) returns (MsgRecoverClientResponse); @@ -104,29 +102,9 @@ message MsgUpgradeClient { // MsgUpgradeClientResponse defines the Msg/UpgradeClient response type. message MsgUpgradeClientResponse {} -// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for -// light client misbehaviour. -// This message has been deprecated. Use MsgUpdateClient instead. -message MsgSubmitMisbehaviour { - option deprecated = true; - option (cosmos.msg.v1.signer) = "signer"; - - option (gogoproto.goproto_getters) = false; - - // client unique identifier - string client_id = 1; - // misbehaviour used for freezing the light client - google.protobuf.Any misbehaviour = 2; - // signer address - string signer = 3; -} - -// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response -// type. -message MsgSubmitMisbehaviourResponse {} - // MsgRecoverClient defines the message used to recover a frozen or expired client. message MsgRecoverClient { + option (amino.name) = "cosmos-sdk/MsgRecoverClient"; option (gogoproto.goproto_getters) = false; option (cosmos.msg.v1.signer) = "signer"; diff --git a/proto/ibc/lightclients/wasm/v1/wasm.proto b/proto/ibc/lightclients/wasm/v1/wasm.proto index da7071f2439..370b394dc2e 100644 --- a/proto/ibc/lightclients/wasm/v1/wasm.proto +++ b/proto/ibc/lightclients/wasm/v1/wasm.proto @@ -31,13 +31,3 @@ message ClientMessage { bytes data = 1; } - -// Checksums defines a list of all checksums that are stored -// -// Deprecated: This message is deprecated in favor of storing the checksums -// using a Collections.KeySet. -message Checksums { - option deprecated = true; - - repeated bytes checksums = 1; -} diff --git a/scripts/go-lint-all.sh b/scripts/go-lint-all.sh index a6811de23b7..0bb1ec16f9f 100755 --- a/scripts/go-lint-all.sh +++ b/scripts/go-lint-all.sh @@ -7,10 +7,14 @@ export REPO_ROOT lint_module() { local root="$1" + local dirname="$(dirname "$root")" + echo "Linting $1" shift - cd "$(dirname "$root")" && - echo "linting $(grep "^module" go.mod) [$(date -u +"%Y-%m-%dT%H:%M:%S")]" && + set -x + cd $dirname && golangci-lint run ./... -c "${REPO_ROOT}/.golangci.yml" "$@" + set +x + } export -f lint_module diff --git a/scripts/go-test-all.py b/scripts/go-test-all.py index 2ce44e7f455..0a1dc45b0b4 100755 --- a/scripts/go-test-all.py +++ b/scripts/go-test-all.py @@ -32,7 +32,8 @@ def run_tests_for_module(module, *runargs): print(f"Running unit tests for {module}") # add runargs to test_command - test_command = f'go test -mod=readonly {" ".join(runargs)} ./...' + test_command = f'go test -mod=readonly {" ".join(runargs)}' + print(f"Running command: {test_command}") result = subprocess.run(test_command, shell=True) return result.returncode diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py deleted file mode 100644 index 2492b9a89ab..00000000000 --- a/scripts/linkify_changelog.py +++ /dev/null @@ -1,15 +0,0 @@ -import fileinput -import re - -# This script goes through the provided file, and replaces any " \#", -# with the valid mark down formatted link to it. e.g. -# " [\#number](https://github.com/cosmos/cosmos-sdk/issues/) -# Note that if the number is for a PR, github will auto-redirect you when you click the link. -# It is safe to run the script multiple times in succession. -# -# Example: -# -# $ python ./scripts/linkify_changelog.py CHANGELOG.md -for line in fileinput.input(inplace=1): - line = re.sub(r"\s\\#([0-9]+)", r" [\\#\1](https://github.com/cosmos/ibc-go/issues/\1)", line.rstrip()) - print(line) diff --git a/simapp/app.go b/simapp/app.go index a38a0ca0ae0..d5c90bf4be2 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -71,9 +71,6 @@ import ( "github.com/cosmos/cosmos-sdk/x/consensus" consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" - "github.com/cosmos/cosmos-sdk/x/crisis" - crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" - crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" distr "github.com/cosmos/cosmos-sdk/x/distribution" distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" @@ -83,16 +80,9 @@ import ( govclient "github.com/cosmos/cosmos-sdk/x/gov/client" govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - "github.com/cosmos/cosmos-sdk/x/group" - groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" - groupmodule "github.com/cosmos/cosmos-sdk/x/group/module" "github.com/cosmos/cosmos-sdk/x/mint" mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - "github.com/cosmos/cosmos-sdk/x/params" - paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" - paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" "github.com/cosmos/cosmos-sdk/x/slashing" slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" @@ -110,13 +100,19 @@ import ( icahostkeeper "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/keeper" icahosttypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" icatypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/types" + packetforward "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware" + packetforwardkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + ratelimitkeeper "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + ratelimittypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" "github.com/cosmos/ibc-go/v10/modules/apps/transfer" ibctransferkeeper "github.com/cosmos/ibc-go/v10/modules/apps/transfer/keeper" ibctransfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" + transferv2 "github.com/cosmos/ibc-go/v10/modules/apps/transfer/v2" ibc "github.com/cosmos/ibc-go/v10/modules/core" - ibcclienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" - ibcconnectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" + ibcapi "github.com/cosmos/ibc-go/v10/modules/core/api" ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" ibckeeper "github.com/cosmos/ibc-go/v10/modules/core/keeper" solomachine "github.com/cosmos/ibc-go/v10/modules/light-clients/06-solomachine" @@ -157,8 +153,7 @@ type SimApp struct { interfaceRegistry types.InterfaceRegistry // keys to access the substores - keys map[string]*storetypes.KVStoreKey - tkeys map[string]*storetypes.TransientStoreKey + keys map[string]*storetypes.KVStoreKey // keepers AccountKeeper authkeeper.AccountKeeper @@ -168,19 +163,18 @@ type SimApp struct { MintKeeper mintkeeper.Keeper DistrKeeper distrkeeper.Keeper GovKeeper govkeeper.Keeper - CrisisKeeper *crisiskeeper.Keeper UpgradeKeeper *upgradekeeper.Keeper - ParamsKeeper paramskeeper.Keeper AuthzKeeper authzkeeper.Keeper IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly - ICAControllerKeeper icacontrollerkeeper.Keeper - ICAHostKeeper icahostkeeper.Keeper + ICAControllerKeeper *icacontrollerkeeper.Keeper + ICAHostKeeper *icahostkeeper.Keeper EvidenceKeeper evidencekeeper.Keeper - TransferKeeper ibctransferkeeper.Keeper + TransferKeeper *ibctransferkeeper.Keeper FeeGrantKeeper feegrantkeeper.Keeper - GroupKeeper groupkeeper.Keeper ConsensusParamsKeeper consensusparamkeeper.Keeper CircuitKeeper circuitkeeper.Keeper + PFMKeeper *packetforwardkeeper.Keeper + RateLimitKeeper *ratelimitkeeper.Keeper // the module manager ModuleManager *module.Manager @@ -225,6 +219,7 @@ func NewSimApp( appCodec := codec.NewProtoCodec(interfaceRegistry) legacyAmino := codec.NewLegacyAmino() txConfig := authtx.NewTxConfig(appCodec, authtx.DefaultSignModes) + govAuthority := authtypes.NewModuleAddress(govtypes.ModuleName).String() std.RegisterLegacyAminoCodec(legacyAmino) std.RegisterInterfaces(interfaceRegistry) @@ -262,11 +257,11 @@ func NewSimApp( bApp.SetTxEncoder(txConfig.TxEncoder()) keys := storetypes.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, crisistypes.StoreKey, + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, group.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, + govtypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, - authzkeeper.StoreKey, consensusparamtypes.StoreKey, circuittypes.StoreKey, + authzkeeper.StoreKey, consensusparamtypes.StoreKey, circuittypes.StoreKey, packetforwardtypes.StoreKey, ratelimittypes.StoreKey, ) // register streaming services @@ -274,8 +269,6 @@ func NewSimApp( panic(err) } - tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey) - app := &SimApp{ BaseApp: bApp, legacyAmino: legacyAmino, @@ -283,43 +276,36 @@ func NewSimApp( txConfig: txConfig, interfaceRegistry: interfaceRegistry, keys: keys, - tkeys: tkeys, } - app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) - // set the BaseApp's parameter store - app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), runtime.EventService{}) + app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), govAuthority, runtime.EventService{}) bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore) // SDK module keepers // add keepers - app.AccountKeeper = authkeeper.NewAccountKeeper(appCodec, runtime.NewKVStoreService(keys[authtypes.StoreKey]), authtypes.ProtoBaseAccount, maccPerms, authcodec.NewBech32Codec(sdk.Bech32MainPrefix), sdk.Bech32MainPrefix, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.AccountKeeper = authkeeper.NewAccountKeeper(appCodec, runtime.NewKVStoreService(keys[authtypes.StoreKey]), authtypes.ProtoBaseAccount, maccPerms, authcodec.NewBech32Codec(sdk.Bech32MainPrefix), sdk.Bech32MainPrefix, govAuthority) app.BankKeeper = bankkeeper.NewBaseKeeper( appCodec, runtime.NewKVStoreService(keys[banktypes.StoreKey]), app.AccountKeeper, BlockedAddresses(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), + govAuthority, logger, ) app.StakingKeeper = stakingkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[stakingtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr), authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr), + appCodec, runtime.NewKVStoreService(keys[stakingtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, govAuthority, authcodec.NewBech32Codec(sdk.Bech32PrefixValAddr), authcodec.NewBech32Codec(sdk.Bech32PrefixConsAddr), ) - app.MintKeeper = mintkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[minttypes.StoreKey]), app.StakingKeeper, app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.MintKeeper = mintkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[minttypes.StoreKey]), app.StakingKeeper, app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, govAuthority) - app.DistrKeeper = distrkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[distrtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.DistrKeeper = distrkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[distrtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName, govAuthority) app.SlashingKeeper = slashingkeeper.NewKeeper( - appCodec, legacyAmino, runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), app.StakingKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, legacyAmino, runtime.NewKVStoreService(keys[slashingtypes.StoreKey]), app.StakingKeeper, govAuthority, ) - invCheckPeriod := cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)) - app.CrisisKeeper = crisiskeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[crisistypes.StoreKey]), invCheckPeriod, - app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec()) - app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[feegrant.StoreKey]), app.AccountKeeper) // register the staking hooks @@ -328,18 +314,11 @@ func NewSimApp( stakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks()), ) - app.CircuitKeeper = circuitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[circuittypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), app.AccountKeeper.AddressCodec()) + app.CircuitKeeper = circuitkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[circuittypes.StoreKey]), govAuthority, app.AccountKeeper.AddressCodec()) app.SetCircuitBreaker(&app.CircuitKeeper) app.AuthzKeeper = authzkeeper.NewKeeper(runtime.NewKVStoreService(keys[authzkeeper.StoreKey]), appCodec, app.MsgServiceRouter(), app.AccountKeeper) - groupConfig := group.DefaultConfig() - /* - Example of setting group params: - groupConfig.MaxMetadataLen = 1000 - */ - app.GroupKeeper = groupkeeper.NewKeeper(keys[group.StoreKey], appCodec, app.MsgServiceRouter(), app.AccountKeeper, groupConfig) - // get skipUpgradeHeights from the app options skipUpgradeHeights := map[int64]bool{} for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) { @@ -347,10 +326,10 @@ func NewSimApp( } homePath := cast.ToString(appOpts.Get(flags.FlagHome)) // set the governance module account as the authority for conducting upgrades - app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, govAuthority) app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.GetSubspace(ibcexported.ModuleName), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.UpgradeKeeper, govAuthority, ) govConfig := govtypes.DefaultConfig() @@ -360,7 +339,7 @@ func NewSimApp( */ govKeeper := govkeeper.NewKeeper( appCodec, runtime.NewKVStoreService(keys[govtypes.StoreKey]), app.AccountKeeper, app.BankKeeper, - app.StakingKeeper, app.DistrKeeper, app.MsgServiceRouter(), govConfig, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + app.StakingKeeper, app.DistrKeeper, app.MsgServiceRouter(), govConfig, govAuthority, ) app.GovKeeper = *govKeeper.SetHooks( @@ -371,52 +350,63 @@ func NewSimApp( // ICA Controller keeper app.ICAControllerKeeper = icacontrollerkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.GetSubspace(icacontrollertypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, + runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), + govAuthority, ) // ICA Host keeper app.ICAHostKeeper = icahostkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.GetSubspace(icahosttypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.AccountKeeper, app.MsgServiceRouter(), app.GRPCQueryRouter(), - authtypes.NewModuleAddress(govtypes.ModuleName).String(), + govAuthority, ) - // Create IBC Router - ibcRouter := porttypes.NewRouter() - - // Middleware Stacks - - // Create Transfer Keeper + // Transfer Keeper app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.GetSubspace(ibctransfertypes.ModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, + app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) + // Packet Forward Middleware keeper + app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, app.AccountKeeper.AddressCodec(), runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), app.TransferKeeper, app.IBCKeeper.ChannelKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + + app.RateLimitKeeper = ratelimitkeeper.NewKeeper(appCodec, app.AccountKeeper.AddressCodec(), runtime.NewKVStoreService(keys[ratelimittypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ClientKeeper, app.BankKeeper, govAuthority) + + // Create IBC Router + ibcRouter := porttypes.NewRouter() + ibcRouterV2 := ibcapi.NewRouter() + // Create Transfer Stack // SendPacket, since it is originating from the application to core IBC: - // transferKeeper.SendPacket -> channel.SendPacket + // transferKeeper.SendPacket -> Pf.SendPacket -> RateLim.SendPacket -> channel.SendPacket // RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way - // channel.RecvPacket -> transfer.OnRecvPacket + // channel.RecvPacket -> RateLim.OnRecvPacket -> Pf.OnRecvPacket -> transfer.OnRecvPacket // transfer stack contains (from top to bottom): + // - RateLimit + // - PacketForward // - Transfer // create IBC module from bottom to top of stack - var transferStack porttypes.IBCModule = transfer.NewIBCModule(app.TransferKeeper) + transferStack := porttypes.NewIBCStackBuilder(app.IBCKeeper.ChannelKeeper) + transferStack.Base(transfer.NewIBCModule(app.TransferKeeper)). + Next(packetforward.NewIBCMiddleware(app.PFMKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp)). + Next(ratelimiting.NewIBCMiddleware(app.RateLimitKeeper)) // Add transfer stack to IBC Router - ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) + ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack.Build()) + + // Packet Forward Middleware Stack. // Create Interchain Accounts Stack // SendPacket, since it is originating from the application to core IBC: @@ -433,8 +423,12 @@ func NewSimApp( AddRoute(icacontrollertypes.SubModuleName, icaControllerStack). AddRoute(icahosttypes.SubModuleName, icaHostStack) + // register the transfer v2 module. + ibcRouterV2.AddRoute(ibctransfertypes.PortID, transferv2.NewIBCModule(app.TransferKeeper)) + // Set the IBC Routers app.IBCKeeper.SetRouter(ibcRouter) + app.IBCKeeper.SetRouterV2(ibcRouterV2) clientKeeper := app.IBCKeeper.ClientKeeper storeProvider := clientKeeper.GetStoreProvider() @@ -454,10 +448,6 @@ func NewSimApp( // **** Module Options **** - // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment - // we prefer to be more strict in what arguments the modules expect. - skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)) - // NOTE: Any module instantiated in the module manager that is later modified // must be passed by reference here. app.ModuleManager = module.NewManager( @@ -465,28 +455,27 @@ func NewSimApp( app.AccountKeeper, app.StakingKeeper, app, txConfig, ), - auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), - bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, app.GetSubspace(banktypes.ModuleName)), - crisis.NewAppModule(app.CrisisKeeper, skipGenesisInvariants, app.GetSubspace(crisistypes.ModuleName)), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil), feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), - gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(govtypes.ModuleName)), - mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, app.GetSubspace(minttypes.ModuleName)), - slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(slashingtypes.ModuleName), app.interfaceRegistry), - distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(distrtypes.ModuleName)), - staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName)), + gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry), + distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil), upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()), evidence.NewAppModule(app.EvidenceKeeper), - params.NewAppModule(app.ParamsKeeper), authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), - groupmodule.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper), circuit.NewAppModule(appCodec, app.CircuitKeeper), // IBC modules ibc.NewAppModule(app.IBCKeeper), transfer.NewAppModule(app.TransferKeeper), - ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), + ica.NewAppModule(app.ICAControllerKeeper, app.ICAHostKeeper), + packetforward.NewAppModule(app.PFMKeeper), + ratelimiting.NewAppModule(app.RateLimitKeeper), // IBC light clients ibctm.NewAppModule(tmLightClientModule), @@ -501,11 +490,7 @@ func NewSimApp( app.ModuleManager, map[string]module.AppModuleBasic{ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator), - govtypes.ModuleName: gov.NewAppModuleBasic( - []govclient.ProposalHandler{ - paramsclient.ProposalHandler, - }, - ), + govtypes.ModuleName: gov.NewAppModuleBasic([]govclient.ProposalHandler{}), }) app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino) app.BasicModuleManager.RegisterInterfaces(interfaceRegistry) @@ -527,21 +512,23 @@ func NewSimApp( evidencetypes.ModuleName, stakingtypes.ModuleName, ibcexported.ModuleName, + packetforwardtypes.ModuleName, ibctransfertypes.ModuleName, genutiltypes.ModuleName, authz.ModuleName, icatypes.ModuleName, + ratelimittypes.ModuleName, ) app.ModuleManager.SetOrderEndBlockers( - crisistypes.ModuleName, govtypes.ModuleName, stakingtypes.ModuleName, ibcexported.ModuleName, + packetforwardtypes.ModuleName, ibctransfertypes.ModuleName, genutiltypes.ModuleName, feegrant.ModuleName, icatypes.ModuleName, - group.ModuleName, + ratelimittypes.ModuleName, ) // NOTE: The genutils module must occur after staking so that pools are @@ -550,10 +537,10 @@ func NewSimApp( genesisModuleOrder := []string{ authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, - slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName, + slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, ibcexported.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, - icatypes.ModuleName, feegrant.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, - vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, + packetforwardtypes.ModuleName, icatypes.ModuleName, feegrant.ModuleName, upgradetypes.ModuleName, + vestingtypes.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, ratelimittypes.ModuleName, } app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...) app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...) @@ -561,7 +548,6 @@ func NewSimApp( // Uncomment if you want to set a custom migration order here. // app.ModuleManager.SetOrderMigrations(custom order) - app.ModuleManager.RegisterInvariants(app.CrisisKeeper) app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) err := app.ModuleManager.RegisterServices(app.configurator) if err != nil { @@ -588,7 +574,7 @@ func NewSimApp( // NOTE: this is not required apps that don't use the simulator for fuzz testing // transactions overrideModules := map[string]module.AppModuleSimulation{ - authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), } app.simulationManager = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules) @@ -596,7 +582,6 @@ func NewSimApp( // initialize stores app.MountKVStores(keys) - app.MountTransientStores(tkeys) // initialize BaseApp app.SetInitChainer(app.InitChainer) @@ -789,14 +774,6 @@ func (app *SimApp) GetStoreKeys() []storetypes.StoreKey { return keys } -// GetSubspace returns a param subspace for a given module name. -// -// NOTE: This is solely to be used for testing purposes. -func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace { - subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) - return subspace -} - // SimulationManager implements the SimulationApp interface func (app *SimApp) SimulationManager() *module.SimulationManager { return app.simulationManager @@ -866,18 +843,3 @@ func BlockedAddresses() map[string]bool { return modAccAddrs } - -// initParamsKeeper init params keeper and its subspaces -func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { - paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) - - // register the key tables for legacy param subspaces - keyTable := ibcclienttypes.ParamKeyTable() - keyTable.RegisterParamSet(&ibcconnectiontypes.Params{}) - paramsKeeper.Subspace(ibcexported.ModuleName).WithKeyTable(keyTable) - paramsKeeper.Subspace(ibctransfertypes.ModuleName).WithKeyTable(ibctransfertypes.ParamKeyTable()) - paramsKeeper.Subspace(icacontrollertypes.SubModuleName).WithKeyTable(icacontrollertypes.ParamKeyTable()) - paramsKeeper.Subspace(icahosttypes.SubModuleName).WithKeyTable(icahosttypes.ParamKeyTable()) - - return paramsKeeper -} diff --git a/simapp/export.go b/simapp/export.go index 6f35c37e4b5..99dc99bbe2d 100644 --- a/simapp/export.go +++ b/simapp/export.go @@ -66,13 +66,10 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] allowedAddrsMap[addr] = true } - /* Just to be safe, assert the invariants on current state. */ - app.CrisisKeeper.AssertInvariants(ctx) - /* Handle fee distribution state. */ // withdraw all validator commission - err := app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + err := app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) bool { valBz, err := app.StakingKeeper.ValidatorAddressCodec().StringToBytes(val.GetOperator()) if err != nil { panic(err) @@ -114,7 +111,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] ctx = ctx.WithBlockHeight(0) // reinitialize all validators - err = app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + err = app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) bool { valBz, err := sdk.ValAddressFromBech32(val.GetOperator()) if err != nil { panic(err) @@ -168,7 +165,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] /* Handle staking state. */ // iterate through redelegations, reset creation height - err = app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) { + err = app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) bool { for i := range red.Entries { red.Entries[i].CreationHeight = 0 } @@ -183,7 +180,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] } // iterate through unbonding delegations, reset creation height - err = app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) { + err = app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) bool { for i := range ubd.Entries { ubd.Entries[i].CreationHeight = 0 } @@ -234,7 +231,7 @@ func (app *SimApp) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs [] // reset start height on signing infos err = app.SlashingKeeper.IterateValidatorSigningInfos( ctx, - func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { + func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) bool { info.StartHeight = 0 err = app.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info) if err != nil { diff --git a/simapp/go.mod b/simapp/go.mod index 0b12feb8110..513e0c0c65e 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -1,6 +1,6 @@ module github.com/cosmos/ibc-go/simapp -go 1.23.8 +go 1.24.3 replace ( github.com/cosmos/ibc-go/v10 => ../ @@ -11,7 +11,7 @@ require ( cosmossdk.io/api v0.9.2 cosmossdk.io/client/v2 v2.0.0-beta.9 cosmossdk.io/core v0.11.3 - cosmossdk.io/log v1.6.0 + cosmossdk.io/log v1.6.1 cosmossdk.io/store v1.1.2 cosmossdk.io/tools/confix v0.1.2 cosmossdk.io/x/circuit v0.2.0 @@ -20,27 +20,27 @@ require ( cosmossdk.io/x/tx v0.14.0 cosmossdk.io/x/upgrade v0.2.0 github.com/cometbft/cometbft v0.38.17 - github.com/cosmos/cosmos-db v1.1.1 - github.com/cosmos/cosmos-sdk v0.53.0 + github.com/cosmos/cosmos-db v1.1.3 + github.com/cosmos/cosmos-sdk v0.53.4 github.com/cosmos/gogoproto v1.7.0 - github.com/cosmos/ibc-go/v10 v10.0.0 - github.com/spf13/cast v1.8.0 - github.com/spf13/cobra v1.9.1 + github.com/cosmos/ibc-go/v10 v10.3.0 + github.com/spf13/cast v1.9.2 + github.com/spf13/cobra v1.10.1 github.com/spf13/viper v1.20.1 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 ) require ( - cel.dev/expr v0.20.0 // indirect + cel.dev/expr v0.24.0 // indirect cloud.google.com/go v0.116.0 // indirect cloud.google.com/go/auth v0.14.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect - cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/compute/metadata v0.7.0 // indirect cloud.google.com/go/iam v1.2.2 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect cloud.google.com/go/storage v1.49.0 // indirect - cosmossdk.io/collections v1.2.1 // indirect - cosmossdk.io/depinject v1.2.0 // indirect + cosmossdk.io/collections v1.3.1 // indirect + cosmossdk.io/depinject v1.2.1 // indirect cosmossdk.io/errors v1.0.2 // indirect cosmossdk.io/math v1.5.3 // indirect cosmossdk.io/schema v1.1.0 // indirect @@ -49,7 +49,7 @@ require ( github.com/99designs/keyring v1.2.2 // indirect github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -58,13 +58,13 @@ require ( github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.2.0 // indirect github.com/bits-and-blooms/bitset v1.22.0 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/cloudwego/base64x v0.1.5 // indirect - github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/cockroachdb/apd/v2 v2.0.2 // indirect github.com/cockroachdb/errors v1.12.0 // indirect github.com/cockroachdb/fifo v0.0.0-20240616162244-4768e80dfb9a // indirect @@ -93,22 +93,22 @@ require ( github.com/emicklei/dot v1.6.2 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect - github.com/ethereum/go-ethereum v1.15.11 // indirect + github.com/ethereum/go-ethereum v1.16.3 // indirect github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/getsentry/sentry-go v0.32.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/getsentry/sentry-go v0.33.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.4 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e // indirect @@ -179,7 +179,7 @@ require ( github.com/sasha-s/go-deadlock v0.3.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -193,37 +193,38 @@ require ( go.etcd.io/bbolt v1.4.0-alpha.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/arch v0.15.0 // indirect - golang.org/x/crypto v0.37.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/arch v0.17.0 // indirect + golang.org/x/crypto v0.39.0 // indirect golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.13.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.31.0 // indirect - golang.org/x/text v0.24.0 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.15.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/term v0.32.0 // indirect + golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.10.0 // indirect google.golang.org/api v0.222.0 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect - google.golang.org/grpc v1.72.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect nhooyr.io/websocket v1.8.11 // indirect pgregory.net/rapid v1.2.0 // indirect rsc.io/qr v0.2.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/simapp/go.sum b/simapp/go.sum index 6f11dd64775..d04055c2391 100644 --- a/simapp/go.sum +++ b/simapp/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= -cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -618,16 +618,16 @@ cosmossdk.io/api v0.9.2 h1:9i9ptOBdmoIEVEVWLtYYHjxZonlF/aOVODLFaxpmNtg= cosmossdk.io/api v0.9.2/go.mod h1:CWt31nVohvoPMTlPv+mMNCtC0a7BqRdESjCsstHcTkU= cosmossdk.io/client/v2 v2.0.0-beta.9 h1:xc06zg4G858/pK5plhf8RCfo+KR2mdDKJNrEkfrVAqc= cosmossdk.io/client/v2 v2.0.0-beta.9/go.mod h1:pHf3CCHX5gmbL9rDCVbXhGI2+/DdAVTEZSLpdd5V9Zs= -cosmossdk.io/collections v1.2.1 h1:mAlNMs5vJwkda4TA+k5q/43p24RVAQ/qyDrjANu3BXE= -cosmossdk.io/collections v1.2.1/go.mod h1:PSsEJ/fqny0VPsHLFT6gXDj/2C1tBOTS9eByK0+PBFU= +cosmossdk.io/collections v1.3.1 h1:09e+DUId2brWsNOQ4nrk+bprVmMUaDH9xvtZkeqIjVw= +cosmossdk.io/collections v1.3.1/go.mod h1:ynvkP0r5ruAjbmedE+vQ07MT6OtJ0ZIDKrtJHK7Q/4c= cosmossdk.io/core v0.11.3 h1:mei+MVDJOwIjIniaKelE3jPDqShCc/F4LkNNHh+4yfo= cosmossdk.io/core v0.11.3/go.mod h1:9rL4RE1uDt5AJ4Tg55sYyHWXA16VmpHgbe0PbJc6N2Y= -cosmossdk.io/depinject v1.2.0 h1:6NW/FSK1IkWTrX7XxUpBmX1QMBozpEI9SsWkKTBc5zw= -cosmossdk.io/depinject v1.2.0/go.mod h1:pvitjtUxZZZTQESKNS9KhGjWVslJZxtO9VooRJYyPjk= +cosmossdk.io/depinject v1.2.1 h1:eD6FxkIjlVaNZT+dXTQuwQTKZrFZ4UrfCq1RKgzyhMw= +cosmossdk.io/depinject v1.2.1/go.mod h1:lqQEycz0H2JXqvOgVwTsjEdMI0plswI7p6KX+MVqFOM= cosmossdk.io/errors v1.0.2 h1:wcYiJz08HThbWxd/L4jObeLaLySopyyuUFB5w4AGpCo= cosmossdk.io/errors v1.0.2/go.mod h1:0rjgiHkftRYPj//3DrD6y8hcm40HcPv/dR4R/4efr0k= -cosmossdk.io/log v1.6.0 h1:SJIOmJ059wi1piyRgNRXKXhlDXGqnB5eQwhcZKv2tOk= -cosmossdk.io/log v1.6.0/go.mod h1:5cXXBvfBkR2/BcXmosdCSLXllvgSjphrrDVdfVRmBGM= +cosmossdk.io/log v1.6.1 h1:YXNwAgbDwMEKwDlCdH8vPcoggma48MgZrTQXCfmMBeI= +cosmossdk.io/log v1.6.1/go.mod h1:gMwsWyyDBjpdG9u2avCFdysXqxq28WJapJvu+vF1y+E= cosmossdk.io/math v1.5.3 h1:WH6tu6Z3AUCeHbeOSHg2mt9rnoiUWVWaQ2t6Gkll96U= cosmossdk.io/math v1.5.3/go.mod h1:uqcZv7vexnhMFJF+6zh9EWdm/+Ylyln34IvPnBauPCQ= cosmossdk.io/schema v1.1.0 h1:mmpuz3dzouCoyjjcMcA/xHBEmMChN+EHh8EHxHRHhzE= @@ -664,8 +664,8 @@ github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bp github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= @@ -733,11 +733,11 @@ github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/ github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -782,8 +782,8 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= -github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -814,12 +814,12 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= -github.com/cosmos/cosmos-db v1.1.1 h1:FezFSU37AlBC8S98NlSagL76oqBRWq/prTPvFcEJNCM= -github.com/cosmos/cosmos-db v1.1.1/go.mod h1:AghjcIPqdhSLP/2Z0yha5xPH3nLnskz81pBx3tcVSAw= +github.com/cosmos/cosmos-db v1.1.3 h1:7QNT77+vkefostcKkhrzDK9uoIEryzFrU9eoMeaQOPY= +github.com/cosmos/cosmos-db v1.1.3/go.mod h1:kN+wGsnwUJZYn8Sy5Q2O0vCYA99MJllkKASbs6Unb9U= github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/cosmos-sdk v0.53.0 h1:ZsB2tnBVudumV059oPuElcr0K1lLOutaI6WJ+osNTbI= -github.com/cosmos/cosmos-sdk v0.53.0/go.mod h1:UPcRyFwOUy2PfSFBWxBceO/HTjZOuBVqY583WyazIGs= +github.com/cosmos/cosmos-sdk v0.53.4 h1:kPF6vY68+/xi1/VebSZGpoxQqA52qkhUzqkrgeBn3Mg= +github.com/cosmos/cosmos-sdk v0.53.4/go.mod h1:7U3+WHZtI44dEOnU46+lDzBb2tFh1QlMvi8Z5JugopI= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -903,8 +903,8 @@ github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0+ github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ethereum/go-ethereum v1.15.11 h1:JK73WKeu0WC0O1eyX+mdQAVHUV+UR1a9VB/domDngBU= -github.com/ethereum/go-ethereum v1.15.11/go.mod h1:mf8YiHIb0GR4x4TipcvBUPxJLw1mFdmxzoDi11sDRoI= +github.com/ethereum/go-ethereum v1.16.3 h1:nDoBSrmsrPbrDIVLTkDQCy1U9KdHN+F2PzvMbDoS42Q= +github.com/ethereum/go-ethereum v1.16.3/go.mod h1:Lrsc6bt9Gm9RyvhfFK53vboCia8kpF9nv+2Ukntnl+8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= @@ -923,8 +923,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= -github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/getsentry/sentry-go v0.33.0 h1:YWyDii0KGVov3xOaamOnF0mjOrqSjBqwv48UEzn7QFg= +github.com/getsentry/sentry-go v0.33.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= @@ -938,8 +938,8 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -956,8 +956,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= @@ -991,8 +991,8 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= -github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1489,15 +1489,15 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= -github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= +github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= @@ -1522,8 +1522,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -1576,24 +1576,24 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= -go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1615,8 +1615,12 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= -golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= +golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1634,8 +1638,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= -golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= +golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1773,8 +1777,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= -golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1804,8 +1808,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1826,8 +1830,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1939,8 +1943,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1955,8 +1959,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= -golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1977,8 +1981,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= -golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2073,6 +2077,8 @@ gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= @@ -2284,10 +2290,10 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= -google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2335,8 +2341,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= -google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2356,8 +2362,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2446,6 +2452,6 @@ rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index 223d3ef46bb..ad91a51c7c4 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -34,7 +34,6 @@ import ( "github.com/cosmos/cosmos-sdk/x/auth/tx" txmodule "github.com/cosmos/cosmos-sdk/x/auth/tx/config" "github.com/cosmos/cosmos-sdk/x/auth/types" - "github.com/cosmos/cosmos-sdk/x/crisis" genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" cmtcfg "github.com/cometbft/cometbft/config" @@ -226,7 +225,7 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, b server.QueryBlockResultsCmd(), ) - server.AddCommands(rootCmd, simapp.DefaultNodeHome, newApp, appExport, addModuleInitFlags) + server.AddCommands(rootCmd, simapp.DefaultNodeHome, newApp, appExport, func(_ *cobra.Command) {}) // add keybase, auxiliary RPC, query, genesis, and tx child commands rootCmd.AddCommand( @@ -238,10 +237,6 @@ func initRootCmd(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, b ) } -func addModuleInitFlags(startCmd *cobra.Command) { - crisis.AddModuleInitFlags(startCmd) -} - func queryCommand() *cobra.Command { cmd := &cobra.Command{ Use: "query", diff --git a/simapp/upgrades.go b/simapp/upgrades.go index b71ab1d002b..1f28e067571 100644 --- a/simapp/upgrades.go +++ b/simapp/upgrades.go @@ -5,31 +5,11 @@ import ( circuittypes "cosmossdk.io/x/circuit/types" upgradetypes "cosmossdk.io/x/upgrade/types" - consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" - crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" - "github.com/cosmos/ibc-go/simapp/upgrades" ) // registerUpgradeHandlers registers all supported upgrade handlers func (app *SimApp) registerUpgradeHandlers() { - app.UpgradeKeeper.SetUpgradeHandler( - upgrades.V7, - upgrades.CreateV7UpgradeHandler( - app.ModuleManager, - app.configurator, - app.appCodec, - *app.IBCKeeper.ClientKeeper, - app.ConsensusParamsKeeper, - app.ParamsKeeper, - ), - ) - - app.UpgradeKeeper.SetUpgradeHandler( - upgrades.V7_1, - upgrades.CreateV7LocalhostUpgradeHandler(app.ModuleManager, app.configurator, *app.IBCKeeper.ClientKeeper), - ) - app.UpgradeKeeper.SetUpgradeHandler( upgrades.V8, upgrades.CreateDefaultUpgradeHandler( @@ -59,18 +39,6 @@ func (app *SimApp) registerUpgradeHandlers() { panic(err) } - if upgradeInfo.Name == upgrades.V7 && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { - storeUpgrades := storetypes.StoreUpgrades{ - Added: []string{ - consensusparamtypes.StoreKey, - crisistypes.StoreKey, - }, - } - - // configure store loader that checks if version == upgradeHeight and applies store upgrades - app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) - } - if upgradeInfo.Name == upgrades.V8 && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { storeUpgrades := storetypes.StoreUpgrades{ Added: []string{ diff --git a/simapp/upgrades/upgrades.go b/simapp/upgrades/upgrades.go index d4f8c4bc71d..a9f7efc0818 100644 --- a/simapp/upgrades/upgrades.go +++ b/simapp/upgrades/upgrades.go @@ -5,24 +5,10 @@ import ( upgradetypes "cosmossdk.io/x/upgrade/types" - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" - consensusparamskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" - paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" - - clientkeeper "github.com/cosmos/ibc-go/v10/modules/core/02-client/keeper" - "github.com/cosmos/ibc-go/v10/modules/core/exported" - ibctmmigrations "github.com/cosmos/ibc-go/v10/modules/light-clients/07-tendermint/migrations" ) const ( - // V7 defines the upgrade name for the ibc-go/v7 upgrade handler. - V7 = "v7" - // V7_1 defines the upgrade name for the ibc-go/v7.1 upgrade handler. - V7_1 = "v7.1" // V8 defines the upgrade name for the ibc-go/v8 upgrade handler. V8 = "v8" // V8_1 defines the upgrade name for the ibc-go/v8.1 upgrade handler. @@ -41,46 +27,3 @@ func CreateDefaultUpgradeHandler( return mm.RunMigrations(ctx, configurator, vm) } } - -// CreateV7UpgradeHandler creates an upgrade handler for the ibc-go/v7 SimApp upgrade. -func CreateV7UpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - cdc codec.BinaryCodec, - clientKeeper clientkeeper.Keeper, - consensusParamsKeeper consensusparamskeeper.Keeper, - paramsKeeper paramskeeper.Keeper, -) upgradetypes.UpgradeHandler { - return func(goCtx context.Context, _ upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx := sdk.UnwrapSDKContext(goCtx) - // OPTIONAL: prune expired tendermint consensus states to save storage space - if _, err := ibctmmigrations.PruneExpiredConsensusStates(ctx, cdc, &clientKeeper); err != nil { - return nil, err - } - - legacyBaseAppSubspace := paramsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()) - err := baseapp.MigrateParams(ctx, legacyBaseAppSubspace, consensusParamsKeeper.ParamsStore) - if err != nil { - panic(err) - } - - return mm.RunMigrations(goCtx, configurator, vm) - } -} - -// CreateV7LocalhostUpgradeHandler creates an upgrade handler for the ibc-go/v7.1 SimApp upgrade. -func CreateV7LocalhostUpgradeHandler( - mm *module.Manager, - configurator module.Configurator, - clientKeeper clientkeeper.Keeper, -) upgradetypes.UpgradeHandler { - return func(goCtx context.Context, _ upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { - ctx := sdk.UnwrapSDKContext(goCtx) - // explicitly update the IBC 02-client params, adding the localhost client type - params := clientKeeper.GetParams(ctx) - params.AllowedClients = append(params.AllowedClients, exported.Localhost) - clientKeeper.SetParams(ctx, params) - - return mm.RunMigrations(goCtx, configurator, vm) - } -} diff --git a/testing/README.md b/testing/README.md index e767b5ed81a..d9e6ea6827f 100644 --- a/testing/README.md +++ b/testing/README.md @@ -150,7 +150,7 @@ func TestKeeperTestSuite(t *testing.T) { } // SetupTest creates a coordinator with 2 test chains. -func (suite *KeeperTestSuite) SetupTest() { +func (s *KeeperTestSuite) SetupTest() { suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2) // initializes 2 test chains suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(1)) // convenience and readability suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(2)) // convenience and readability diff --git a/testing/chain.go b/testing/chain.go index c5f92fbba44..53ad018393c 100644 --- a/testing/chain.go +++ b/testing/chain.go @@ -212,53 +212,53 @@ func NewTestChain(t *testing.T, coord *Coordinator, chainID string) *TestChain { } // GetContext returns the current context for the application. -func (chain *TestChain) GetContext() sdk.Context { - return chain.App.GetBaseApp().NewUncachedContext(false, chain.ProposedHeader) +func (c *TestChain) GetContext() sdk.Context { + return c.App.GetBaseApp().NewUncachedContext(false, c.ProposedHeader) } // GetSimApp returns the SimApp to allow usage ofnon-interface fields. // CONTRACT: This function should not be called by third parties implementing // their own SimApp. -func (chain *TestChain) GetSimApp() *simapp.SimApp { - app, ok := chain.App.(*simapp.SimApp) - require.True(chain.TB, ok) +func (c *TestChain) GetSimApp() *simapp.SimApp { + app, ok := c.App.(*simapp.SimApp) + require.True(c.TB, ok) return app } // QueryProof performs an abci query with the given key and returns the proto encoded merkle proof // for the query and the height at which the proof will succeed on a tendermint verifier. -func (chain *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) { - return chain.QueryProofAtHeight(key, chain.App.LastBlockHeight()) +func (c *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) { + return c.QueryProofAtHeight(key, c.App.LastBlockHeight()) } // QueryProofAtHeight performs an abci query with the given key and returns the proto encoded merkle proof // for the query and the height at which the proof will succeed on a tendermint verifier. Only the IBC // store is supported -func (chain *TestChain) QueryProofAtHeight(key []byte, height int64) ([]byte, clienttypes.Height) { - return chain.QueryProofForStore(exported.StoreKey, key, height) +func (c *TestChain) QueryProofAtHeight(key []byte, height int64) ([]byte, clienttypes.Height) { + return c.QueryProofForStore(exported.StoreKey, key, height) } // QueryProofForStore performs an abci query with the given key and returns the proto encoded merkle proof // for the query and the height at which the proof will succeed on a tendermint verifier. -func (chain *TestChain) QueryProofForStore(storeKey string, key []byte, height int64) ([]byte, clienttypes.Height) { - res, err := chain.App.Query( - chain.GetContext().Context(), +func (c *TestChain) QueryProofForStore(storeKey string, key []byte, height int64) ([]byte, clienttypes.Height) { + res, err := c.App.Query( + c.GetContext().Context(), &abci.RequestQuery{ Path: fmt.Sprintf("store/%s/key", storeKey), Height: height - 1, Data: key, Prove: true, }) - require.NoError(chain.TB, err) + require.NoError(c.TB, err) merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) - require.NoError(chain.TB, err) + require.NoError(c.TB, err) - proof, err := chain.App.AppCodec().Marshal(&merkleProof) - require.NoError(chain.TB, err) + proof, err := c.App.AppCodec().Marshal(&merkleProof) + require.NoError(c.TB, err) - revision := clienttypes.ParseChainID(chain.ChainID) + revision := clienttypes.ParseChainID(c.ChainID) // proof height + 1 is returned as the proof created corresponds to the height the proof // was created in the IAVL tree. Tendermint and subsequently the clients that rely on it @@ -268,24 +268,24 @@ func (chain *TestChain) QueryProofForStore(storeKey string, key []byte, height i // QueryUpgradeProof performs an abci query with the given key and returns the proto encoded merkle proof // for the query and the height at which the proof will succeed on a tendermint verifier. -func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, clienttypes.Height) { - res, err := chain.App.Query( - chain.GetContext().Context(), +func (c *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, clienttypes.Height) { + res, err := c.App.Query( + c.GetContext().Context(), &abci.RequestQuery{ Path: "store/upgrade/key", Height: int64(height - 1), Data: key, Prove: true, }) - require.NoError(chain.TB, err) + require.NoError(c.TB, err) merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps) - require.NoError(chain.TB, err) + require.NoError(c.TB, err) - proof, err := chain.App.AppCodec().Marshal(&merkleProof) - require.NoError(chain.TB, err) + proof, err := c.App.AppCodec().Marshal(&merkleProof) + require.NoError(c.TB, err) - revision := clienttypes.ParseChainID(chain.ChainID) + revision := clienttypes.ParseChainID(c.ChainID) // proof height + 1 is returned as the proof created corresponds to the height the proof // was created in the IAVL tree. Tendermint and subsequently the clients that rely on it @@ -295,11 +295,11 @@ func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, cl // QueryConsensusStateProof performs an abci query for a consensus state // stored on the given clientID. The proof and consensusHeight are returned. -func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) { - consensusHeight, ok := chain.GetClientLatestHeight(clientID).(clienttypes.Height) - require.True(chain.TB, ok) +func (c *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) { + consensusHeight, ok := c.GetClientLatestHeight(clientID).(clienttypes.Height) + require.True(c.TB, ok) consensusKey := host.FullConsensusStateKey(clientID, consensusHeight) - consensusProof, _ := chain.QueryProof(consensusKey) + consensusProof, _ := c.QueryProof(consensusKey) return consensusProof, consensusHeight } @@ -310,77 +310,77 @@ func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clien // of the next block being created. This follows the Tendermint protocol of applying valset changes // returned on block `n` to the validators of block `n+2`. // It calls BeginBlock with the new block created before returning. -func (chain *TestChain) NextBlock() { - res, err := chain.App.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: chain.ProposedHeader.Height, - Time: chain.ProposedHeader.GetTime(), - NextValidatorsHash: chain.NextVals.Hash(), +func (c *TestChain) NextBlock() { + res, err := c.App.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: c.ProposedHeader.Height, + Time: c.ProposedHeader.GetTime(), + NextValidatorsHash: c.NextVals.Hash(), }) - require.NoError(chain.TB, err) - chain.commitBlock(res) + require.NoError(c.TB, err) + c.commitBlock(res) } -func (chain *TestChain) commitBlock(res *abci.ResponseFinalizeBlock) { - _, err := chain.App.Commit() - require.NoError(chain.TB, err) +func (c *TestChain) commitBlock(res *abci.ResponseFinalizeBlock) { + _, err := c.App.Commit() + require.NoError(c.TB, err) // set the last header to the current header // use nil trusted fields - chain.LatestCommittedHeader = chain.CurrentTMClientHeader() + c.LatestCommittedHeader = c.CurrentTMClientHeader() // set the trusted validator set to the next validator set // The latest trusted validator set is the next validator set // associated with the header being committed in storage. This will // allow for header updates to be proved against these validators. - chain.TrustedValidators[uint64(chain.ProposedHeader.Height)] = chain.NextVals + c.TrustedValidators[uint64(c.ProposedHeader.Height)] = c.NextVals // val set changes returned from previous block get applied to the next validators // of this block. See tendermint spec for details. - chain.Vals = chain.NextVals - chain.NextVals = ApplyValSetChanges(chain, chain.Vals, res.ValidatorUpdates) + c.Vals = c.NextVals + c.NextVals = ApplyValSetChanges(c, c.Vals, res.ValidatorUpdates) // increment the proposer priority of validators - chain.Vals.IncrementProposerPriority(1) + c.Vals.IncrementProposerPriority(1) // increment the current header - chain.ProposedHeader = cmtproto.Header{ - ChainID: chain.ChainID, - Height: chain.App.LastBlockHeight() + 1, - AppHash: chain.App.LastCommitID().Hash, + c.ProposedHeader = cmtproto.Header{ + ChainID: c.ChainID, + Height: c.App.LastBlockHeight() + 1, + AppHash: c.App.LastCommitID().Hash, // NOTE: the time is increased by the coordinator to maintain time synchrony amongst // chains. - Time: chain.ProposedHeader.Time, - ValidatorsHash: chain.Vals.Hash(), - NextValidatorsHash: chain.NextVals.Hash(), - ProposerAddress: chain.Vals.Proposer.Address, + Time: c.ProposedHeader.Time, + ValidatorsHash: c.Vals.Hash(), + NextValidatorsHash: c.NextVals.Hash(), + ProposerAddress: c.Vals.Proposer.Address, } } // sendMsgs delivers a transaction through the application without returning the result. -func (chain *TestChain) sendMsgs(msgs ...sdk.Msg) error { - _, err := chain.SendMsgs(msgs...) +func (c *TestChain) sendMsgs(msgs ...sdk.Msg) error { + _, err := c.SendMsgs(msgs...) return err } // SendMsgs delivers a transaction through the application using a predefined sender. // It updates the senders sequence number and updates the TestChain's headers. // It returns the result and error if one occurred. -func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*abci.ExecTxResult, error) { +func (c *TestChain) SendMsgs(msgs ...sdk.Msg) (*abci.ExecTxResult, error) { senderAccount := SenderAccount{ - SenderPrivKey: chain.SenderPrivKey, - SenderAccount: chain.SenderAccount, + SenderPrivKey: c.SenderPrivKey, + SenderAccount: c.SenderAccount, } - return chain.SendMsgsWithSender(senderAccount, msgs...) + return c.SendMsgsWithSender(senderAccount, msgs...) } // SendMsgsWithSender delivers a transaction through the application using the provided sender. -func (chain *TestChain) SendMsgsWithSender(sender SenderAccount, msgs ...sdk.Msg) (*abci.ExecTxResult, error) { - if chain.SendMsgsOverride != nil { - return chain.SendMsgsOverride(msgs...) +func (c *TestChain) SendMsgsWithSender(sender SenderAccount, msgs ...sdk.Msg) (*abci.ExecTxResult, error) { + if c.SendMsgsOverride != nil { + return c.SendMsgsOverride(msgs...) } // ensure the chain has the latest time - chain.Coordinator.UpdateTimeForChain(chain) + c.Coordinator.UpdateTimeForChain(c) // increment acc sequence regardless of success or failure tx execution defer func() { @@ -391,83 +391,83 @@ func (chain *TestChain) SendMsgsWithSender(sender SenderAccount, msgs ...sdk.Msg }() resp, err := simapp.SignAndDeliver( - chain.TB, - chain.TxConfig, - chain.App.GetBaseApp(), + c.TB, + c.TxConfig, + c.App.GetBaseApp(), msgs, - chain.ChainID, + c.ChainID, []uint64{sender.SenderAccount.GetAccountNumber()}, []uint64{sender.SenderAccount.GetSequence()}, true, - chain.ProposedHeader.GetTime(), - chain.NextVals.Hash(), + c.ProposedHeader.GetTime(), + c.NextVals.Hash(), sender.SenderPrivKey, ) if err != nil { return nil, err } - chain.commitBlock(resp) + c.commitBlock(resp) - require.Len(chain.TB, resp.TxResults, 1) + require.Len(c.TB, resp.TxResults, 1) txResult := resp.TxResults[0] if txResult.Code != 0 { return txResult, fmt.Errorf("%s/%d: %q", txResult.Codespace, txResult.Code, txResult.Log) } - chain.Coordinator.IncrementTime() + c.Coordinator.IncrementTime() return txResult, nil } // GetClientState retrieves the client state for the provided clientID. The client is // expected to exist otherwise testing will fail. -func (chain *TestChain) GetClientState(clientID string) exported.ClientState { - clientState, found := chain.App.GetIBCKeeper().ClientKeeper.GetClientState(chain.GetContext(), clientID) - require.True(chain.TB, found) +func (c *TestChain) GetClientState(clientID string) exported.ClientState { + clientState, found := c.App.GetIBCKeeper().ClientKeeper.GetClientState(c.GetContext(), clientID) + require.True(c.TB, found) return clientState } // GetConsensusState retrieves the consensus state for the provided clientID and height. // It will return a success boolean depending on if consensus state exists or not. -func (chain *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) { - return chain.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height) +func (c *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) { + return c.App.GetIBCKeeper().ClientKeeper.GetClientConsensusState(c.GetContext(), clientID, height) } // GetAcknowledgement retrieves an acknowledgement for the provided packet. If the // acknowledgement does not exist then testing will fail. -func (chain *TestChain) GetAcknowledgement(packet channeltypes.Packet) []byte { - ack, found := chain.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - require.True(chain.TB, found) +func (c *TestChain) GetAcknowledgement(packet channeltypes.Packet) []byte { + ack, found := c.App.GetIBCKeeper().ChannelKeeper.GetPacketAcknowledgement(c.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) + require.True(c.TB, found) return ack } // GetPrefix returns the prefix for used by a chain in connection creation -func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix { - return commitmenttypes.NewMerklePrefix(chain.App.GetIBCKeeper().ConnectionKeeper.GetCommitmentPrefix().Bytes()) +func (c *TestChain) GetPrefix() commitmenttypes.MerklePrefix { + return commitmenttypes.NewMerklePrefix(c.App.GetIBCKeeper().ConnectionKeeper.GetCommitmentPrefix().Bytes()) } // ExpireClient fast forwards the chain's block time by the provided amount of time which will // expire any clients with a trusting period less than or equal to this amount of time. -func (chain *TestChain) ExpireClient(amount time.Duration) { - chain.Coordinator.IncrementTimeBy(amount) +func (c *TestChain) ExpireClient(amount time.Duration) { + c.Coordinator.IncrementTimeBy(amount) } // CurrentTMClientHeader creates a TM header using the current header parameters // on the chain. The trusted fields in the header are set to nil. -func (chain *TestChain) CurrentTMClientHeader() *ibctm.Header { - return chain.CreateTMClientHeader( - chain.ChainID, - chain.ProposedHeader.Height, +func (c *TestChain) CurrentTMClientHeader() *ibctm.Header { + return c.CreateTMClientHeader( + c.ChainID, + c.ProposedHeader.Height, clienttypes.Height{}, - chain.ProposedHeader.Time, - chain.Vals, - chain.NextVals, + c.ProposedHeader.Time, + c.Vals, + c.NextVals, nil, - chain.Signers, + c.Signers, ) } @@ -502,12 +502,12 @@ func CommitHeader(proposedHeader cmttypes.Header, valSet *cmttypes.ValidatorSet, // CreateTMClientHeader creates a TM header to update the TM client. Args are passed in to allow // caller flexibility to use params that differ from the chain. -func (chain *TestChain) CreateTMClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, cmtValSet, nextVals, cmtTrustedVals *cmttypes.ValidatorSet, signers map[string]cmttypes.PrivValidator) *ibctm.Header { +func (c *TestChain) CreateTMClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, cmtValSet, nextVals, cmtTrustedVals *cmttypes.ValidatorSet, signers map[string]cmttypes.PrivValidator) *ibctm.Header { var ( valSet *cmtproto.ValidatorSet trustedVals *cmtproto.ValidatorSet ) - require.NotNil(chain.TB, cmtValSet) + require.NotNil(c.TB, cmtValSet) proposedHeader := cmttypes.Header{ Version: cmtprotoversion.Consensus{Block: cmtversion.BlockProtocol, App: 2}, @@ -515,29 +515,29 @@ func (chain *TestChain) CreateTMClientHeader(chainID string, blockHeight int64, Height: blockHeight, Time: timestamp, LastBlockID: MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)), - LastCommitHash: chain.App.LastCommitID().Hash, + LastCommitHash: c.App.LastCommitID().Hash, DataHash: unusedHash, ValidatorsHash: cmtValSet.Hash(), NextValidatorsHash: nextVals.Hash(), ConsensusHash: unusedHash, - AppHash: chain.ProposedHeader.AppHash, + AppHash: c.ProposedHeader.AppHash, LastResultsHash: unusedHash, EvidenceHash: unusedHash, ProposerAddress: cmtValSet.Proposer.Address, //nolint:staticcheck } signedHeader, err := CommitHeader(proposedHeader, cmtValSet, signers) - require.NoError(chain.TB, err) + require.NoError(c.TB, err) if cmtValSet != nil { //nolint:staticcheck valSet, err = cmtValSet.ToProto() - require.NoError(chain.TB, err) + require.NoError(c.TB, err) valSet.TotalVotingPower = cmtValSet.TotalVotingPower() } if cmtTrustedVals != nil { trustedVals, err = cmtTrustedVals.ToProto() - require.NoError(chain.TB, err) + require.NoError(c.TB, err) trustedVals.TotalVotingPower = cmtTrustedVals.TotalVotingPower() } @@ -564,45 +564,45 @@ func MakeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) cmttypes.B // GetClientLatestHeight returns the latest height for the client state with the given client identifier. // If an invalid client identifier is provided then a zero value height will be returned and testing will fail. -func (chain *TestChain) GetClientLatestHeight(clientID string) exported.Height { - latestHeight := chain.App.GetIBCKeeper().ClientKeeper.GetClientLatestHeight(chain.GetContext(), clientID) - require.False(chain.TB, latestHeight.IsZero()) +func (c *TestChain) GetClientLatestHeight(clientID string) exported.Height { + latestHeight := c.App.GetIBCKeeper().ClientKeeper.GetClientLatestHeight(c.GetContext(), clientID) + require.False(c.TB, latestHeight.IsZero()) return latestHeight } // GetTimeoutHeight is a convenience function which returns a IBC packet timeout height // to be used for testing. It returns the current IBC height + 100 blocks -func (chain *TestChain) GetTimeoutHeight() clienttypes.Height { - return clienttypes.NewHeight(clienttypes.ParseChainID(chain.ChainID), uint64(chain.GetContext().BlockHeight())+100) +func (c *TestChain) GetTimeoutHeight() clienttypes.Height { + return clienttypes.NewHeight(clienttypes.ParseChainID(c.ChainID), uint64(c.GetContext().BlockHeight())+100) } // GetTimeoutTimestamp is a convenience function which returns a IBC packet timeout timestamp // to be used for testing. It returns the current block timestamp + default timestamp delta (1 hour). -func (chain *TestChain) GetTimeoutTimestamp() uint64 { - return uint64(chain.GetContext().BlockTime().UnixNano()) + DefaultTimeoutTimestampDelta +func (c *TestChain) GetTimeoutTimestamp() uint64 { + return uint64(c.GetContext().BlockTime().UnixNano()) + DefaultTimeoutTimestampDelta } // GetTimeoutTimestampSecs is a convenience function which returns a IBC packet timeout timestamp in seconds // to be used for testing. It returns the current block timestamp + default timestamp delta (1 hour). -func (chain *TestChain) GetTimeoutTimestampSecs() uint64 { - return uint64(chain.GetContext().BlockTime().Unix()) + uint64(time.Hour.Seconds()) +func (c *TestChain) GetTimeoutTimestampSecs() uint64 { + return uint64(c.GetContext().BlockTime().Unix()) + uint64(time.Hour.Seconds()) } // DeleteKey deletes the specified key from the ibc store. -func (chain *TestChain) DeleteKey(key []byte) { - storeKey := chain.GetSimApp().GetKey(exported.StoreKey) - kvStore := chain.GetContext().KVStore(storeKey) +func (c *TestChain) DeleteKey(key []byte) { + storeKey := c.GetSimApp().GetKey(exported.StoreKey) + kvStore := c.GetContext().KVStore(storeKey) kvStore.Delete(key) } // IBCClientHeader will construct a 07-tendermint Header to update the light client // on the counterparty chain. The trustedHeight must be passed in as a non-zero height. -func (chain *TestChain) IBCClientHeader(header *ibctm.Header, trustedHeight clienttypes.Height) (*ibctm.Header, error) { +func (c *TestChain) IBCClientHeader(header *ibctm.Header, trustedHeight clienttypes.Height) (*ibctm.Header, error) { if trustedHeight.IsZero() { return nil, errorsmod.Wrap(ibctm.ErrInvalidHeaderHeight, "trustedHeight must be a non-zero height") } - cmtTrustedVals, ok := chain.TrustedValidators[trustedHeight.RevisionHeight] + cmtTrustedVals, ok := c.TrustedValidators[trustedHeight.RevisionHeight] if !ok { return nil, fmt.Errorf("unable to find trusted validators at height %d", trustedHeight.RevisionHeight) } @@ -620,8 +620,8 @@ func (chain *TestChain) IBCClientHeader(header *ibctm.Header, trustedHeight clie } // GetSenderAccount returns the sender account associated with the provided private key. -func (chain *TestChain) GetSenderAccount(privKey cryptotypes.PrivKey) SenderAccount { - account := chain.GetSimApp().AccountKeeper.GetAccount(chain.GetContext(), sdk.AccAddress(privKey.PubKey().Address())) +func (c *TestChain) GetSenderAccount(privKey cryptotypes.PrivKey) SenderAccount { + account := c.GetSimApp().AccountKeeper.GetAccount(c.GetContext(), sdk.AccAddress(privKey.PubKey().Address())) return SenderAccount{ SenderPrivKey: privKey, diff --git a/testing/chain_test.go b/testing/chain_test.go index eb8bd7bce22..f99b221dd69 100644 --- a/testing/chain_test.go +++ b/testing/chain_test.go @@ -70,7 +70,7 @@ func TestJailProposerValidator(t *testing.T) { require.NoError(t, err) // check that the jailing has taken effect in chain A - require.Equal(t, valsetLen-1, len(chainA.Vals.Validators)) + require.Len(t, chainA.Vals.Validators, valsetLen-1) // check that the valset in chain A has a new proposer require.False(t, propAddr.Equals(sdk.ConsAddress(chainA.Vals.Proposer.Address))) diff --git a/testing/coordinator.go b/testing/coordinator.go index 301c2f4f8d1..b514b632121 100644 --- a/testing/coordinator.go +++ b/testing/coordinator.go @@ -1,7 +1,6 @@ package ibctesting import ( - "fmt" "strconv" "testing" "time" @@ -54,27 +53,34 @@ func NewCustomAppCoordinator(t *testing.T, n int, appCreator AppCreator) *Coordi // by 5 seconds. // // CONTRACT: this function must be called after every Commit on any TestChain. -func (coord *Coordinator) IncrementTime() { - coord.IncrementTimeBy(TimeIncrement) +func (c *Coordinator) IncrementTime() { + c.IncrementTimeBy(TimeIncrement) } // IncrementTimeBy iterates through all the TestChain's and increments their current header time // by specified time. -func (coord *Coordinator) IncrementTimeBy(increment time.Duration) { - coord.CurrentTime = coord.CurrentTime.Add(increment).UTC() - coord.UpdateTime() +func (c *Coordinator) IncrementTimeBy(increment time.Duration) { + c.CurrentTime = c.CurrentTime.Add(increment).UTC() + c.UpdateTime() +} + +// SetTime sets the coordinator's current time to the specified time and updates +// the proposed header time for all chains. +func (c *Coordinator) SetTime(t time.Time) { + c.CurrentTime = t.UTC() + c.UpdateTime() } // UpdateTime updates all clocks for the TestChains to the current global time. -func (coord *Coordinator) UpdateTime() { - for _, chain := range coord.Chains { - coord.UpdateTimeForChain(chain) +func (c *Coordinator) UpdateTime() { + for _, chain := range c.Chains { + c.UpdateTimeForChain(chain) } } // UpdateTimeForChain updates the clock for a specific chain. -func (coord *Coordinator) UpdateTimeForChain(chain *TestChain) { - chain.ProposedHeader.Time = coord.CurrentTime.UTC() +func (c *Coordinator) UpdateTimeForChain(chain *TestChain) { + chain.ProposedHeader.Time = c.CurrentTime.UTC() } // Setup constructs a TM client, connection, and channel on both chains provided. It will @@ -131,9 +137,9 @@ func (*Coordinator) CreateTransferChannels(path *Path) { // GetChain returns the TestChain using the given chainID and returns an error if it does // not exist. -func (coord *Coordinator) GetChain(chainID string) *TestChain { - chain, found := coord.Chains[chainID] - require.True(coord.T, found, fmt.Sprintf("%s chain does not exist", chainID)) +func (c *Coordinator) GetChain(chainID string) *TestChain { + chain, found := c.Chains[chainID] + require.True(c.T, found, "%s chain does not exist", chainID) return chain } @@ -145,17 +151,17 @@ func GetChainID(index int) string { // CommitBlock commits a block on the provided indexes and then increments the global time. // // CONTRACT: the passed in list of indexes must not contain duplicates -func (coord *Coordinator) CommitBlock(chains ...*TestChain) { +func (c *Coordinator) CommitBlock(chains ...*TestChain) { for _, chain := range chains { chain.NextBlock() } - coord.IncrementTime() + c.IncrementTime() } // CommitNBlocks commits n blocks to state and updates the block height by 1 for each commit. -func (coord *Coordinator) CommitNBlocks(chain *TestChain, n uint64) { - for i := uint64(0); i < n; i++ { +func (c *Coordinator) CommitNBlocks(chain *TestChain, n uint64) { + for range n { chain.NextBlock() - coord.IncrementTime() + c.IncrementTime() } } diff --git a/testing/coordinator_test.go b/testing/coordinator_test.go new file mode 100644 index 00000000000..b03f43bbc0e --- /dev/null +++ b/testing/coordinator_test.go @@ -0,0 +1,37 @@ +package ibctesting_test + +import ( + "testing" + + testifysuite "github.com/stretchr/testify/suite" + + ibctesting "github.com/cosmos/ibc-go/v10/testing" +) + +type CoordinatorTestSuite struct { + testifysuite.Suite + + coordinator *ibctesting.Coordinator + + chainA *ibctesting.TestChain + chainB *ibctesting.TestChain +} + +func (s *CoordinatorTestSuite) SetupTest() { + s.coordinator = ibctesting.NewCoordinator(s.T(), 2) + + s.chainA = s.coordinator.GetChain(ibctesting.GetChainID(1)) + s.chainB = s.coordinator.GetChain(ibctesting.GetChainID(2)) +} + +func TestCoordinatorTestSuite(t *testing.T) { + testifysuite.Run(t, new(CoordinatorTestSuite)) +} + +func (s *CoordinatorTestSuite) TestChainCodecRootResolveNotSet() { + resolved, err := s.chainA.Codec.InterfaceRegistry().Resolve("/") + s.Require().Error(err, "Root typeUrl should not be resolvable: %T", resolved) + + resolved, err = s.chainB.Codec.InterfaceRegistry().Resolve("/") + s.Require().Error(err, "Root typeUrl should not be resolvable: %T", resolved) +} diff --git a/testing/endpoint.go b/testing/endpoint.go index bf1f909b842..4a4e9819b79 100644 --- a/testing/endpoint.go +++ b/testing/endpoint.go @@ -72,109 +72,104 @@ func NewDefaultEndpoint(chain *TestChain) *Endpoint { // QueryProof queries proof associated with this endpoint using the latest client state // height on the counterparty chain. -func (endpoint *Endpoint) QueryProof(key []byte) ([]byte, clienttypes.Height) { +func (ep *Endpoint) QueryProof(key []byte) ([]byte, clienttypes.Height) { // obtain the counterparty client height. - latestCounterpartyHeight := endpoint.Counterparty.GetClientLatestHeight() + latestCounterpartyHeight := ep.Counterparty.GetClientLatestHeight() // query proof on the counterparty using the latest height of the IBC client - return endpoint.QueryProofAtHeight(key, latestCounterpartyHeight.GetRevisionHeight()) + return ep.QueryProofAtHeight(key, latestCounterpartyHeight.GetRevisionHeight()) } // QueryProofAtHeight queries proof associated with this endpoint using the proof height // provided -func (endpoint *Endpoint) QueryProofAtHeight(key []byte, height uint64) ([]byte, clienttypes.Height) { +func (ep *Endpoint) QueryProofAtHeight(key []byte, height uint64) ([]byte, clienttypes.Height) { // query proof on the counterparty using the latest height of the IBC client - return endpoint.Chain.QueryProofAtHeight(key, int64(height)) + return ep.Chain.QueryProofAtHeight(key, int64(height)) } -// CreateClient creates an IBC client on the endpoint. It will update the +// CreateClient creates an IBC client on the ep. It will update the // clientID for the endpoint if the message is successfully executed. // NOTE: a solo machine client will be created with an empty diversifier. -func (endpoint *Endpoint) CreateClient() (err error) { +func (ep *Endpoint) CreateClient() error { // ensure counterparty has committed state - endpoint.Counterparty.Chain.NextBlock() + ep.Counterparty.Chain.NextBlock() var ( clientState exported.ClientState consensusState exported.ConsensusState ) - switch endpoint.ClientConfig.GetClientType() { + switch ep.ClientConfig.GetClientType() { case exported.Tendermint: - tmConfig, ok := endpoint.ClientConfig.(*TendermintConfig) - require.True(endpoint.Chain.TB, ok) + tmConfig, ok := ep.ClientConfig.(*TendermintConfig) + require.True(ep.Chain.TB, ok) - height, ok := endpoint.Counterparty.Chain.LatestCommittedHeader.GetHeight().(clienttypes.Height) - require.True(endpoint.Chain.TB, ok) + height, ok := ep.Counterparty.Chain.LatestCommittedHeader.GetHeight().(clienttypes.Height) + require.True(ep.Chain.TB, ok) clientState = ibctm.NewClientState( - endpoint.Counterparty.Chain.ChainID, tmConfig.TrustLevel, tmConfig.TrustingPeriod, tmConfig.UnbondingPeriod, tmConfig.MaxClockDrift, + ep.Counterparty.Chain.ChainID, tmConfig.TrustLevel, tmConfig.TrustingPeriod, tmConfig.UnbondingPeriod, tmConfig.MaxClockDrift, height, commitmenttypes.GetSDKSpecs(), UpgradePath) - consensusState = endpoint.Counterparty.Chain.LatestCommittedHeader.ConsensusState() + consensusState = ep.Counterparty.Chain.LatestCommittedHeader.ConsensusState() case exported.Solomachine: // TODO - // solo := NewSolomachine(endpoint.Chain.TB, endpoint.Chain.Codec, clientID, "", 1) + // solo := NewSolomachine(ep.Chain.TB, ep.Chain.Codec, clientID, "", 1) // clientState = solo.ClientState() // consensusState = solo.ConsensusState() default: - err = fmt.Errorf("client type %s is not supported", endpoint.ClientConfig.GetClientType()) - } - - if err != nil { - return err + return fmt.Errorf("client type %s is not supported", ep.ClientConfig.GetClientType()) } msg, err := clienttypes.NewMsgCreateClient( - clientState, consensusState, endpoint.Chain.SenderAccount.GetAddress().String(), + clientState, consensusState, ep.Chain.SenderAccount.GetAddress().String(), ) - require.NoError(endpoint.Chain.TB, err) + require.NoError(ep.Chain.TB, err) - res, err := endpoint.Chain.SendMsgs(msg) + res, err := ep.Chain.SendMsgs(msg) if err != nil { return err } - endpoint.ClientID, err = ParseClientIDFromEvents(res.Events) - require.NoError(endpoint.Chain.TB, err) + ep.ClientID, err = ParseClientIDFromEvents(res.Events) + require.NoError(ep.Chain.TB, err) return nil } -// UpdateClient updates the IBC client associated with the endpoint. -func (endpoint *Endpoint) UpdateClient() (err error) { +// UpdateClient updates the IBC client associated with the ep. +func (ep *Endpoint) UpdateClient() error { // ensure counterparty has committed state - endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain) + ep.Chain.Coordinator.CommitBlock(ep.Counterparty.Chain) var header exported.ClientMessage - - switch endpoint.ClientConfig.GetClientType() { + switch ep.ClientConfig.GetClientType() { case exported.Tendermint: - trustedHeight, ok := endpoint.GetClientLatestHeight().(clienttypes.Height) - require.True(endpoint.Chain.TB, ok) - header, err = endpoint.Counterparty.Chain.IBCClientHeader(endpoint.Counterparty.Chain.LatestCommittedHeader, trustedHeight) + trustedHeight, ok := ep.GetClientLatestHeight().(clienttypes.Height) + require.True(ep.Chain.TB, ok) + var err error + header, err = ep.Counterparty.Chain.IBCClientHeader(ep.Counterparty.Chain.LatestCommittedHeader, trustedHeight) + if err != nil { + return err + } default: - err = fmt.Errorf("client type %s is not supported", endpoint.ClientConfig.GetClientType()) - } - - if err != nil { - return err + return fmt.Errorf("client type %s is not supported", ep.ClientConfig.GetClientType()) } msg, err := clienttypes.NewMsgUpdateClient( - endpoint.ClientID, header, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.ClientID, header, + ep.Chain.SenderAccount.GetAddress().String(), ) - require.NoError(endpoint.Chain.TB, err) + require.NoError(ep.Chain.TB, err) - return endpoint.Chain.sendMsgs(msg) + return ep.Chain.sendMsgs(msg) } -// FreezeClient freezes the IBC client associated with the endpoint. -func (endpoint *Endpoint) FreezeClient() { - clientState := endpoint.Chain.GetClientState(endpoint.ClientID) +// FreezeClient freezes the IBC client associated with the ep. +func (ep *Endpoint) FreezeClient() { + clientState := ep.Chain.GetClientState(ep.ClientID) tmClientState, ok := clientState.(*ibctm.ClientState) - require.True(endpoint.Chain.TB, ok) + require.True(ep.Chain.TB, ok) tmClientState.FrozenHeight = clienttypes.NewHeight(0, 1) - endpoint.Chain.App.GetIBCKeeper().ClientKeeper.SetClientState(endpoint.Chain.GetContext(), endpoint.ClientID, tmClientState) + ep.Chain.App.GetIBCKeeper().ClientKeeper.SetClientState(ep.Chain.GetContext(), ep.ClientID, tmClientState) } // UpgradeChain will upgrade a chain's chainID to the next revision number. @@ -182,14 +177,14 @@ func (endpoint *Endpoint) FreezeClient() { // TODO: implement actual upgrade chain functionality via scheduling an upgrade // and upgrading the client via MsgUpgradeClient // see reference https://github.com/cosmos/ibc-go/pull/1169 -func (endpoint *Endpoint) UpgradeChain() error { - if strings.TrimSpace(endpoint.Counterparty.ClientID) == "" { +func (ep *Endpoint) UpgradeChain() error { + if strings.TrimSpace(ep.Counterparty.ClientID) == "" { return errors.New("cannot upgrade chain if there is no counterparty client") } - clientState := endpoint.Counterparty.GetClientState() + clientState := ep.Counterparty.GetClientState() tmClientState, ok := clientState.(*ibctm.ClientState) - require.True(endpoint.Chain.TB, ok) + require.True(ep.Chain.TB, ok) // increment revision number in chainID oldChainID := tmClientState.ChainId @@ -204,120 +199,118 @@ func (endpoint *Endpoint) UpgradeChain() error { } // update chain - baseapp.SetChainID(newChainID)(endpoint.Chain.App.GetBaseApp()) - endpoint.Chain.ChainID = newChainID - endpoint.Chain.ProposedHeader.ChainID = newChainID - endpoint.Chain.NextBlock() // commit changes + baseapp.SetChainID(newChainID)(ep.Chain.App.GetBaseApp()) + ep.Chain.ChainID = newChainID + ep.Chain.ProposedHeader.ChainID = newChainID + ep.Chain.NextBlock() // commit changes // update counterparty client manually tmClientState.ChainId = newChainID tmClientState.LatestHeight = clienttypes.NewHeight(revisionNumber+1, tmClientState.LatestHeight.GetRevisionHeight()+1) - endpoint.Counterparty.SetClientState(clientState) + ep.Counterparty.SetClientState(clientState) tmConsensusState := &ibctm.ConsensusState{ - Timestamp: endpoint.Chain.LatestCommittedHeader.GetTime(), - Root: commitmenttypes.NewMerkleRoot(endpoint.Chain.LatestCommittedHeader.Header.GetAppHash()), - NextValidatorsHash: endpoint.Chain.LatestCommittedHeader.Header.NextValidatorsHash, + Timestamp: ep.Chain.LatestCommittedHeader.GetTime(), + Root: commitmenttypes.NewMerkleRoot(ep.Chain.LatestCommittedHeader.Header.GetAppHash()), + NextValidatorsHash: ep.Chain.LatestCommittedHeader.Header.NextValidatorsHash, } - latestHeight := endpoint.Counterparty.GetClientLatestHeight() + latestHeight := ep.Counterparty.GetClientLatestHeight() - endpoint.Counterparty.SetConsensusState(tmConsensusState, latestHeight) + ep.Counterparty.SetConsensusState(tmConsensusState, latestHeight) // ensure the next update isn't identical to the one set in state - endpoint.Chain.Coordinator.IncrementTime() - endpoint.Chain.NextBlock() + ep.Chain.Coordinator.IncrementTime() + ep.Chain.NextBlock() - return endpoint.Counterparty.UpdateClient() + return ep.Counterparty.UpdateClient() } -// ConnOpenInit will construct and execute a MsgConnectionOpenInit on the associated endpoint. -func (endpoint *Endpoint) ConnOpenInit() error { +// ConnOpenInit will construct and execute a MsgConnectionOpenInit on the associated ep. +func (ep *Endpoint) ConnOpenInit() error { msg := connectiontypes.NewMsgConnectionOpenInit( - endpoint.ClientID, - endpoint.Counterparty.ClientID, - endpoint.Counterparty.Chain.GetPrefix(), DefaultOpenInitVersion, endpoint.ConnectionConfig.DelayPeriod, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.ClientID, + ep.Counterparty.ClientID, + ep.Counterparty.Chain.GetPrefix(), DefaultOpenInitVersion, ep.ConnectionConfig.DelayPeriod, + ep.Chain.SenderAccount.GetAddress().String(), ) - res, err := endpoint.Chain.SendMsgs(msg) + res, err := ep.Chain.SendMsgs(msg) if err != nil { return err } - endpoint.ConnectionID, err = ParseConnectionIDFromEvents(res.Events) - require.NoError(endpoint.Chain.TB, err) + ep.ConnectionID, err = ParseConnectionIDFromEvents(res.Events) + require.NoError(ep.Chain.TB, err) return nil } -// ConnOpenTry will construct and execute a MsgConnectionOpenTry on the associated endpoint. -func (endpoint *Endpoint) ConnOpenTry() error { - err := endpoint.UpdateClient() - require.NoError(endpoint.Chain.TB, err) +// ConnOpenTry will construct and execute a MsgConnectionOpenTry on the associated ep. +func (ep *Endpoint) ConnOpenTry() error { + err := ep.UpdateClient() + require.NoError(ep.Chain.TB, err) - initProof, proofHeight := endpoint.QueryConnectionHandshakeProof() + initProof, proofHeight := ep.QueryConnectionHandshakeProof() msg := connectiontypes.NewMsgConnectionOpenTry( - endpoint.ClientID, endpoint.Counterparty.ConnectionID, endpoint.Counterparty.ClientID, - endpoint.Counterparty.Chain.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, - endpoint.ConnectionConfig.DelayPeriod, initProof, proofHeight, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.ClientID, ep.Counterparty.ConnectionID, ep.Counterparty.ClientID, + ep.Counterparty.Chain.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, + ep.ConnectionConfig.DelayPeriod, initProof, proofHeight, + ep.Chain.SenderAccount.GetAddress().String(), ) - res, err := endpoint.Chain.SendMsgs(msg) + res, err := ep.Chain.SendMsgs(msg) if err != nil { return err } - if endpoint.ConnectionID == "" { - endpoint.ConnectionID, err = ParseConnectionIDFromEvents(res.Events) - require.NoError(endpoint.Chain.TB, err) + if ep.ConnectionID == "" { + ep.ConnectionID, err = ParseConnectionIDFromEvents(res.Events) + require.NoError(ep.Chain.TB, err) } return nil } -// ConnOpenAck will construct and execute a MsgConnectionOpenAck on the associated endpoint. -func (endpoint *Endpoint) ConnOpenAck() error { - err := endpoint.UpdateClient() - require.NoError(endpoint.Chain.TB, err) +// ConnOpenAck will construct and execute a MsgConnectionOpenAck on the associated ep. +func (ep *Endpoint) ConnOpenAck() error { + err := ep.UpdateClient() + require.NoError(ep.Chain.TB, err) - tryProof, proofHeight := endpoint.QueryConnectionHandshakeProof() + tryProof, proofHeight := ep.QueryConnectionHandshakeProof() msg := connectiontypes.NewMsgConnectionOpenAck( - endpoint.ConnectionID, endpoint.Counterparty.ConnectionID, // testing doesn't use flexible selection + ep.ConnectionID, ep.Counterparty.ConnectionID, // testing doesn't use flexible selection tryProof, proofHeight, ConnectionVersion, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.Chain.SenderAccount.GetAddress().String(), ) - return endpoint.Chain.sendMsgs(msg) + return ep.Chain.sendMsgs(msg) } -// ConnOpenConfirm will construct and execute a MsgConnectionOpenConfirm on the associated endpoint. -func (endpoint *Endpoint) ConnOpenConfirm() error { - err := endpoint.UpdateClient() - require.NoError(endpoint.Chain.TB, err) +// ConnOpenConfirm will construct and execute a MsgConnectionOpenConfirm on the associated ep. +func (ep *Endpoint) ConnOpenConfirm() error { + err := ep.UpdateClient() + require.NoError(ep.Chain.TB, err) - connectionKey := host.ConnectionKey(endpoint.Counterparty.ConnectionID) - proof, height := endpoint.Counterparty.Chain.QueryProof(connectionKey) + connectionKey := host.ConnectionKey(ep.Counterparty.ConnectionID) + proof, height := ep.Counterparty.Chain.QueryProof(connectionKey) msg := connectiontypes.NewMsgConnectionOpenConfirm( - endpoint.ConnectionID, + ep.ConnectionID, proof, height, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.Chain.SenderAccount.GetAddress().String(), ) - return endpoint.Chain.sendMsgs(msg) + return ep.Chain.sendMsgs(msg) } // QueryConnectionHandshakeProof returns all the proofs necessary to execute OpenTry or Open Ack of // the connection handshakes. It returns the proof of the counterparty connection and the proof height. -func (endpoint *Endpoint) QueryConnectionHandshakeProof() ( - connectionProof []byte, proofHeight clienttypes.Height, +func (ep *Endpoint) QueryConnectionHandshakeProof() ( + []byte, clienttypes.Height, ) { // query proof for the connection on the counterparty - connectionKey := host.ConnectionKey(endpoint.Counterparty.ConnectionID) - connectionProof, proofHeight = endpoint.Counterparty.QueryProof(connectionKey) - - return connectionProof, proofHeight + connectionKey := host.ConnectionKey(ep.Counterparty.ConnectionID) + return ep.Counterparty.QueryProof(connectionKey) } var sequenceNumber int @@ -325,143 +318,143 @@ var sequenceNumber int // IncrementNextChannelSequence incrementes the value "nextChannelSequence" in the store, // which is used to determine the next channel ID. // This guarantees that we'll have always different IDs while running tests. -func (endpoint *Endpoint) IncrementNextChannelSequence() { - if endpoint.disableUniqueChannelIDs { +func (ep *Endpoint) IncrementNextChannelSequence() { + if ep.disableUniqueChannelIDs { return } sequenceNumber++ - endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SetNextChannelSequence(endpoint.Chain.GetContext(), uint64(sequenceNumber)) + ep.Chain.App.GetIBCKeeper().ChannelKeeper.SetNextChannelSequence(ep.Chain.GetContext(), uint64(sequenceNumber)) } -// ChanOpenInit will construct and execute a MsgChannelOpenInit on the associated endpoint. -func (endpoint *Endpoint) ChanOpenInit() error { - endpoint.IncrementNextChannelSequence() +// ChanOpenInit will construct and execute a MsgChannelOpenInit on the associated ep. +func (ep *Endpoint) ChanOpenInit() error { + ep.IncrementNextChannelSequence() msg := channeltypes.NewMsgChannelOpenInit( - endpoint.ChannelConfig.PortID, - endpoint.ChannelConfig.Version, endpoint.ChannelConfig.Order, []string{endpoint.ConnectionID}, - endpoint.Counterparty.ChannelConfig.PortID, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.ChannelConfig.PortID, + ep.ChannelConfig.Version, ep.ChannelConfig.Order, []string{ep.ConnectionID}, + ep.Counterparty.ChannelConfig.PortID, + ep.Chain.SenderAccount.GetAddress().String(), ) - res, err := endpoint.Chain.SendMsgs(msg) + res, err := ep.Chain.SendMsgs(msg) if err != nil { return err } - endpoint.ChannelID, err = ParseChannelIDFromEvents(res.Events) - require.NoError(endpoint.Chain.TB, err) + ep.ChannelID, err = ParseChannelIDFromEvents(res.Events) + require.NoError(ep.Chain.TB, err) // update version to selected app version // NOTE: this update must be performed after SendMsgs() - endpoint.ChannelConfig.Version = endpoint.GetChannel().Version - endpoint.Counterparty.ChannelConfig.Version = endpoint.GetChannel().Version + ep.ChannelConfig.Version = ep.GetChannel().Version + ep.Counterparty.ChannelConfig.Version = ep.GetChannel().Version return nil } -// ChanOpenTry will construct and execute a MsgChannelOpenTry on the associated endpoint. -func (endpoint *Endpoint) ChanOpenTry() error { - endpoint.IncrementNextChannelSequence() - err := endpoint.UpdateClient() - require.NoError(endpoint.Chain.TB, err) +// ChanOpenTry will construct and execute a MsgChannelOpenTry on the associated ep. +func (ep *Endpoint) ChanOpenTry() error { + ep.IncrementNextChannelSequence() + err := ep.UpdateClient() + require.NoError(ep.Chain.TB, err) - channelKey := host.ChannelKey(endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID) - proof, height := endpoint.Counterparty.Chain.QueryProof(channelKey) + channelKey := host.ChannelKey(ep.Counterparty.ChannelConfig.PortID, ep.Counterparty.ChannelID) + proof, height := ep.Counterparty.Chain.QueryProof(channelKey) msg := channeltypes.NewMsgChannelOpenTry( - endpoint.ChannelConfig.PortID, - endpoint.ChannelConfig.Version, endpoint.ChannelConfig.Order, []string{endpoint.ConnectionID}, - endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID, endpoint.Counterparty.ChannelConfig.Version, + ep.ChannelConfig.PortID, + ep.ChannelConfig.Version, ep.ChannelConfig.Order, []string{ep.ConnectionID}, + ep.Counterparty.ChannelConfig.PortID, ep.Counterparty.ChannelID, ep.Counterparty.ChannelConfig.Version, proof, height, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.Chain.SenderAccount.GetAddress().String(), ) - res, err := endpoint.Chain.SendMsgs(msg) + res, err := ep.Chain.SendMsgs(msg) if err != nil { return err } - if endpoint.ChannelID == "" { - endpoint.ChannelID, err = ParseChannelIDFromEvents(res.Events) - require.NoError(endpoint.Chain.TB, err) + if ep.ChannelID == "" { + ep.ChannelID, err = ParseChannelIDFromEvents(res.Events) + require.NoError(ep.Chain.TB, err) } // update version to selected app version // NOTE: this update must be performed after the endpoint channelID is set - endpoint.ChannelConfig.Version = endpoint.GetChannel().Version - endpoint.Counterparty.ChannelConfig.Version = endpoint.GetChannel().Version + ep.ChannelConfig.Version = ep.GetChannel().Version + ep.Counterparty.ChannelConfig.Version = ep.GetChannel().Version return nil } -// ChanOpenAck will construct and execute a MsgChannelOpenAck on the associated endpoint. -func (endpoint *Endpoint) ChanOpenAck() error { - err := endpoint.UpdateClient() - require.NoError(endpoint.Chain.TB, err) +// ChanOpenAck will construct and execute a MsgChannelOpenAck on the associated ep. +func (ep *Endpoint) ChanOpenAck() error { + err := ep.UpdateClient() + require.NoError(ep.Chain.TB, err) - channelKey := host.ChannelKey(endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID) - proof, height := endpoint.Counterparty.Chain.QueryProof(channelKey) + channelKey := host.ChannelKey(ep.Counterparty.ChannelConfig.PortID, ep.Counterparty.ChannelID) + proof, height := ep.Counterparty.Chain.QueryProof(channelKey) msg := channeltypes.NewMsgChannelOpenAck( - endpoint.ChannelConfig.PortID, endpoint.ChannelID, - endpoint.Counterparty.ChannelID, endpoint.Counterparty.ChannelConfig.Version, // testing doesn't use flexible selection + ep.ChannelConfig.PortID, ep.ChannelID, + ep.Counterparty.ChannelID, ep.Counterparty.ChannelConfig.Version, // testing doesn't use flexible selection proof, height, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.Chain.SenderAccount.GetAddress().String(), ) - if err = endpoint.Chain.sendMsgs(msg); err != nil { + if err = ep.Chain.sendMsgs(msg); err != nil { return err } - endpoint.ChannelConfig.Version = endpoint.GetChannel().Version + ep.ChannelConfig.Version = ep.GetChannel().Version return nil } -// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm on the associated endpoint. -func (endpoint *Endpoint) ChanOpenConfirm() error { - err := endpoint.UpdateClient() - require.NoError(endpoint.Chain.TB, err) +// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm on the associated ep. +func (ep *Endpoint) ChanOpenConfirm() error { + err := ep.UpdateClient() + require.NoError(ep.Chain.TB, err) - channelKey := host.ChannelKey(endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID) - proof, height := endpoint.Counterparty.Chain.QueryProof(channelKey) + channelKey := host.ChannelKey(ep.Counterparty.ChannelConfig.PortID, ep.Counterparty.ChannelID) + proof, height := ep.Counterparty.Chain.QueryProof(channelKey) msg := channeltypes.NewMsgChannelOpenConfirm( - endpoint.ChannelConfig.PortID, endpoint.ChannelID, + ep.ChannelConfig.PortID, ep.ChannelID, proof, height, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.Chain.SenderAccount.GetAddress().String(), ) - return endpoint.Chain.sendMsgs(msg) + return ep.Chain.sendMsgs(msg) } -// ChanCloseInit will construct and execute a MsgChannelCloseInit on the associated endpoint. +// ChanCloseInit will construct and execute a MsgChannelCloseInit on the associated ep. // // NOTE: does not work with ibc-transfer module -func (endpoint *Endpoint) ChanCloseInit() error { +func (ep *Endpoint) ChanCloseInit() error { msg := channeltypes.NewMsgChannelCloseInit( - endpoint.ChannelConfig.PortID, endpoint.ChannelID, - endpoint.Chain.SenderAccount.GetAddress().String(), + ep.ChannelConfig.PortID, ep.ChannelID, + ep.Chain.SenderAccount.GetAddress().String(), ) - return endpoint.Chain.sendMsgs(msg) + return ep.Chain.sendMsgs(msg) } // SendPacket sends a packet through the channel keeper using the associated endpoint // The counterparty client is updated so proofs can be sent to the counterparty chain. // The packet sequence generated for the packet to be sent is returned. An error // is returned if one occurs. -func (endpoint *Endpoint) SendPacket( +func (ep *Endpoint) SendPacket( timeoutHeight clienttypes.Height, timeoutTimestamp uint64, data []byte, ) (uint64, error) { // no need to send message, acting as a module - sequence, err := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SendPacket(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID, timeoutHeight, timeoutTimestamp, data) + sequence, err := ep.Chain.App.GetIBCKeeper().ChannelKeeper.SendPacket(ep.Chain.GetContext(), ep.ChannelConfig.PortID, ep.ChannelID, timeoutHeight, timeoutTimestamp, data) if err != nil { return 0, err } // commit changes since no message was sent - endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain) + ep.Chain.Coordinator.CommitBlock(ep.Chain) - err = endpoint.Counterparty.UpdateClient() + err = ep.Counterparty.UpdateClient() if err != nil { return 0, err } @@ -469,10 +462,10 @@ func (endpoint *Endpoint) SendPacket( return sequence, nil } -// RecvPacket receives a packet on the associated endpoint. +// RecvPacket receives a packet on the associated ep. // The counterparty client is updated. -func (endpoint *Endpoint) RecvPacket(packet channeltypes.Packet) error { - _, err := endpoint.RecvPacketWithResult(packet) +func (ep *Endpoint) RecvPacket(packet channeltypes.Packet) error { + _, err := ep.RecvPacketWithResult(packet) if err != nil { return err } @@ -482,228 +475,228 @@ func (endpoint *Endpoint) RecvPacket(packet channeltypes.Packet) error { // RecvPacketWithResult receives a packet on the associated endpoint and the result // of the transaction is returned. The counterparty client is updated. -func (endpoint *Endpoint) RecvPacketWithResult(packet channeltypes.Packet) (*abci.ExecTxResult, error) { +func (ep *Endpoint) RecvPacketWithResult(packet channeltypes.Packet) (*abci.ExecTxResult, error) { // get proof of packet commitment on source packetKey := host.PacketCommitmentKey(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence()) - proof, proofHeight := endpoint.Counterparty.Chain.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.Chain.QueryProof(packetKey) - recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String()) + recvMsg := channeltypes.NewMsgRecvPacket(packet, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) // receive on counterparty and update source client - res, err := endpoint.Chain.SendMsgs(recvMsg) + res, err := ep.Chain.SendMsgs(recvMsg) if err != nil { return nil, err } - if err := endpoint.Counterparty.UpdateClient(); err != nil { + if err := ep.Counterparty.UpdateClient(); err != nil { return nil, err } return res, nil } -// WriteAcknowledgement writes an acknowledgement on the channel associated with the endpoint. +// WriteAcknowledgement writes an acknowledgement on the channel associated with the ep. // The counterparty client is updated. -func (endpoint *Endpoint) WriteAcknowledgement(ack exported.Acknowledgement, packet exported.PacketI) error { +func (ep *Endpoint) WriteAcknowledgement(ack exported.Acknowledgement, packet exported.PacketI) error { // no need to send message, acting as a handler - err := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.WriteAcknowledgement(endpoint.Chain.GetContext(), packet, ack) + err := ep.Chain.App.GetIBCKeeper().ChannelKeeper.WriteAcknowledgement(ep.Chain.GetContext(), packet, ack) if err != nil { return err } // commit changes since no message was sent - endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain) + ep.Chain.Coordinator.CommitBlock(ep.Chain) - return endpoint.Counterparty.UpdateClient() + return ep.Counterparty.UpdateClient() } -// AcknowledgePacket sends a MsgAcknowledgement to the channel associated with the endpoint. -func (endpoint *Endpoint) AcknowledgePacket(packet channeltypes.Packet, ack []byte) error { +// AcknowledgePacket sends a MsgAcknowledgement to the channel associated with the ep. +func (ep *Endpoint) AcknowledgePacket(packet channeltypes.Packet, ack []byte) error { // get proof of acknowledgement on counterparty packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) - ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String()) + ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) - return endpoint.Chain.sendMsgs(ackMsg) + return ep.Chain.sendMsgs(ackMsg) } // AcknowledgePacketWithResult sends a MsgAcknowledgement to the channel associated with the endpoint and returns the result. -func (endpoint *Endpoint) AcknowledgePacketWithResult(packet channeltypes.Packet, ack []byte) (*abci.ExecTxResult, error) { +func (ep *Endpoint) AcknowledgePacketWithResult(packet channeltypes.Packet, ack []byte) (*abci.ExecTxResult, error) { // get proof of acknowledgement on counterparty packetKey := host.PacketAcknowledgementKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) - proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) - ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String()) + ackMsg := channeltypes.NewMsgAcknowledgement(packet, ack, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) - return endpoint.Chain.SendMsgs(ackMsg) + return ep.Chain.SendMsgs(ackMsg) } -// TimeoutPacketWithResult sends a MsgTimeout to the channel associated with the endpoint. -func (endpoint *Endpoint) TimeoutPacketWithResult(packet channeltypes.Packet) (*abci.ExecTxResult, error) { +// TimeoutPacketWithResult sends a MsgTimeout to the channel associated with the ep. +func (ep *Endpoint) TimeoutPacketWithResult(packet channeltypes.Packet) (*abci.ExecTxResult, error) { // get proof for timeout based on channel order var packetKey []byte - switch endpoint.ChannelConfig.Order { + switch ep.ChannelConfig.Order { case channeltypes.ORDERED: packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) case channeltypes.UNORDERED: packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) default: - return nil, fmt.Errorf("unsupported order type %s", endpoint.ChannelConfig.Order) + return nil, fmt.Errorf("unsupported order type %s", ep.ChannelConfig.Order) } - counterparty := endpoint.Counterparty + counterparty := ep.Counterparty proof, proofHeight := counterparty.QueryProof(packetKey) nextSeqRecv, found := counterparty.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(counterparty.Chain.GetContext(), counterparty.ChannelConfig.PortID, counterparty.ChannelID) - require.True(endpoint.Chain.TB, found) + require.True(ep.Chain.TB, found) timeoutMsg := channeltypes.NewMsgTimeout( packet, nextSeqRecv, - proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String(), + proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String(), ) - return endpoint.Chain.SendMsgs(timeoutMsg) + return ep.Chain.SendMsgs(timeoutMsg) } -// TimeoutPacket sends a MsgTimeout to the channel associated with the endpoint. -func (endpoint *Endpoint) TimeoutPacket(packet channeltypes.Packet) error { - _, err := endpoint.TimeoutPacketWithResult(packet) +// TimeoutPacket sends a MsgTimeout to the channel associated with the ep. +func (ep *Endpoint) TimeoutPacket(packet channeltypes.Packet) error { + _, err := ep.TimeoutPacketWithResult(packet) return err } -// TimeoutOnClose sends a MsgTimeoutOnClose to the channel associated with the endpoint. -func (endpoint *Endpoint) TimeoutOnClose(packet channeltypes.Packet) error { +// TimeoutOnClose sends a MsgTimeoutOnClose to the channel associated with the ep. +func (ep *Endpoint) TimeoutOnClose(packet channeltypes.Packet) error { // get proof for timeout based on channel order var packetKey []byte - switch endpoint.ChannelConfig.Order { + switch ep.ChannelConfig.Order { case channeltypes.ORDERED: packetKey = host.NextSequenceRecvKey(packet.GetDestPort(), packet.GetDestChannel()) case channeltypes.UNORDERED: packetKey = host.PacketReceiptKey(packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence()) default: - return fmt.Errorf("unsupported order type %s", endpoint.ChannelConfig.Order) + return fmt.Errorf("unsupported order type %s", ep.ChannelConfig.Order) } - proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) channelKey := host.ChannelKey(packet.GetDestPort(), packet.GetDestChannel()) - closedProof, _ := endpoint.Counterparty.QueryProof(channelKey) + closedProof, _ := ep.Counterparty.QueryProof(channelKey) - nextSeqRecv, found := endpoint.Counterparty.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(endpoint.Counterparty.Chain.GetContext(), endpoint.Counterparty.ChannelConfig.PortID, endpoint.Counterparty.ChannelID) - require.True(endpoint.Chain.TB, found) + nextSeqRecv, found := ep.Counterparty.Chain.App.GetIBCKeeper().ChannelKeeper.GetNextSequenceRecv(ep.Counterparty.Chain.GetContext(), ep.Counterparty.ChannelConfig.PortID, ep.Counterparty.ChannelID) + require.True(ep.Chain.TB, found) timeoutOnCloseMsg := channeltypes.NewMsgTimeoutOnClose( packet, nextSeqRecv, - proof, closedProof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String(), + proof, closedProof, proofHeight, ep.Chain.SenderAccount.GetAddress().String(), ) - return endpoint.Chain.sendMsgs(timeoutOnCloseMsg) + return ep.Chain.sendMsgs(timeoutOnCloseMsg) } // Deprecated: usage of this function should be replaced by `UpdateChannel` // SetChannelState sets a channel state -func (endpoint *Endpoint) SetChannelState(state channeltypes.State) error { - channel := endpoint.GetChannel() +func (ep *Endpoint) SetChannelState(state channeltypes.State) error { + channel := ep.GetChannel() channel.State = state - endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SetChannel(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID, channel) + ep.Chain.App.GetIBCKeeper().ChannelKeeper.SetChannel(ep.Chain.GetContext(), ep.ChannelConfig.PortID, ep.ChannelID, channel) - endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain) + ep.Chain.Coordinator.CommitBlock(ep.Chain) - return endpoint.Counterparty.UpdateClient() + return ep.Counterparty.UpdateClient() } -// UpdateChannel updates the channel associated with the given endpoint. It accepts a +// UpdateChannel updates the channel associated with the given ep. It accepts a // closure which takes a channel allowing the caller to modify its fields. -func (endpoint *Endpoint) UpdateChannel(updater func(channel *channeltypes.Channel)) { - channel := endpoint.GetChannel() +func (ep *Endpoint) UpdateChannel(updater func(channel *channeltypes.Channel)) { + channel := ep.GetChannel() updater(&channel) - endpoint.SetChannel(channel) + ep.SetChannel(channel) - endpoint.Chain.Coordinator.CommitBlock(endpoint.Chain) + ep.Chain.Coordinator.CommitBlock(ep.Chain) - err := endpoint.Counterparty.UpdateClient() - require.NoError(endpoint.Chain.TB, err) + err := ep.Counterparty.UpdateClient() + require.NoError(ep.Chain.TB, err) } -// GetClientLatestHeight returns the latest height for the client state for this endpoint. +// GetClientLatestHeight returns the latest height for the client state for this ep. // The client state is expected to exist otherwise testing will fail. -func (endpoint *Endpoint) GetClientLatestHeight() exported.Height { - return endpoint.Chain.GetClientLatestHeight(endpoint.ClientID) +func (ep *Endpoint) GetClientLatestHeight() exported.Height { + return ep.Chain.GetClientLatestHeight(ep.ClientID) } -// GetClientState retrieves the client state for this endpoint. The +// GetClientState retrieves the client state for this ep. The // client state is expected to exist otherwise testing will fail. -func (endpoint *Endpoint) GetClientState() exported.ClientState { - return endpoint.Chain.GetClientState(endpoint.ClientID) +func (ep *Endpoint) GetClientState() exported.ClientState { + return ep.Chain.GetClientState(ep.ClientID) } -// SetClientState sets the client state for this endpoint. -func (endpoint *Endpoint) SetClientState(clientState exported.ClientState) { - endpoint.Chain.App.GetIBCKeeper().ClientKeeper.SetClientState(endpoint.Chain.GetContext(), endpoint.ClientID, clientState) +// SetClientState sets the client state for this ep. +func (ep *Endpoint) SetClientState(clientState exported.ClientState) { + ep.Chain.App.GetIBCKeeper().ClientKeeper.SetClientState(ep.Chain.GetContext(), ep.ClientID, clientState) } // GetConsensusState retrieves the Consensus State for this endpoint at the provided height. // The consensus state is expected to exist otherwise testing will fail. -func (endpoint *Endpoint) GetConsensusState(height exported.Height) exported.ConsensusState { - consensusState, found := endpoint.Chain.GetConsensusState(endpoint.ClientID, height) - require.True(endpoint.Chain.TB, found) +func (ep *Endpoint) GetConsensusState(height exported.Height) exported.ConsensusState { + consensusState, found := ep.Chain.GetConsensusState(ep.ClientID, height) + require.True(ep.Chain.TB, found) return consensusState } -// SetConsensusState sets the consensus state for this endpoint. -func (endpoint *Endpoint) SetConsensusState(consensusState exported.ConsensusState, height exported.Height) { - endpoint.Chain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(endpoint.Chain.GetContext(), endpoint.ClientID, height, consensusState) +// SetConsensusState sets the consensus state for this ep. +func (ep *Endpoint) SetConsensusState(consensusState exported.ConsensusState, height exported.Height) { + ep.Chain.App.GetIBCKeeper().ClientKeeper.SetClientConsensusState(ep.Chain.GetContext(), ep.ClientID, height, consensusState) } -// GetConnection retrieves an IBC Connection for the endpoint. The +// GetConnection retrieves an IBC Connection for the ep. The // connection is expected to exist otherwise testing will fail. -func (endpoint *Endpoint) GetConnection() connectiontypes.ConnectionEnd { - connection, found := endpoint.Chain.App.GetIBCKeeper().ConnectionKeeper.GetConnection(endpoint.Chain.GetContext(), endpoint.ConnectionID) - require.True(endpoint.Chain.TB, found) +func (ep *Endpoint) GetConnection() connectiontypes.ConnectionEnd { + connection, found := ep.Chain.App.GetIBCKeeper().ConnectionKeeper.GetConnection(ep.Chain.GetContext(), ep.ConnectionID) + require.True(ep.Chain.TB, found) return connection } -// SetConnection sets the connection for this endpoint. -func (endpoint *Endpoint) SetConnection(connection connectiontypes.ConnectionEnd) { - endpoint.Chain.App.GetIBCKeeper().ConnectionKeeper.SetConnection(endpoint.Chain.GetContext(), endpoint.ConnectionID, connection) +// SetConnection sets the connection for this ep. +func (ep *Endpoint) SetConnection(connection connectiontypes.ConnectionEnd) { + ep.Chain.App.GetIBCKeeper().ConnectionKeeper.SetConnection(ep.Chain.GetContext(), ep.ConnectionID, connection) } -// GetChannel retrieves an IBC Channel for the endpoint. The channel +// GetChannel retrieves an IBC Channel for the ep. The channel // is expected to exist otherwise testing will fail. -func (endpoint *Endpoint) GetChannel() channeltypes.Channel { - channel, found := endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.GetChannel(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID) - require.True(endpoint.Chain.TB, found) +func (ep *Endpoint) GetChannel() channeltypes.Channel { + channel, found := ep.Chain.App.GetIBCKeeper().ChannelKeeper.GetChannel(ep.Chain.GetContext(), ep.ChannelConfig.PortID, ep.ChannelID) + require.True(ep.Chain.TB, found) return channel } -// SetChannel sets the channel for this endpoint. -func (endpoint *Endpoint) SetChannel(channel channeltypes.Channel) { - endpoint.Chain.App.GetIBCKeeper().ChannelKeeper.SetChannel(endpoint.Chain.GetContext(), endpoint.ChannelConfig.PortID, endpoint.ChannelID, channel) +// SetChannel sets the channel for this ep. +func (ep *Endpoint) SetChannel(channel channeltypes.Channel) { + ep.Chain.App.GetIBCKeeper().ChannelKeeper.SetChannel(ep.Chain.GetContext(), ep.ChannelConfig.PortID, ep.ChannelID, channel) } // QueryClientStateProof performs and abci query for a client stat associated // with this endpoint and returns the ClientState along with the proof. -func (endpoint *Endpoint) QueryClientStateProof() (exported.ClientState, []byte) { +func (ep *Endpoint) QueryClientStateProof() (exported.ClientState, []byte) { // retrieve client state to provide proof for - clientState := endpoint.GetClientState() + clientState := ep.GetClientState() - clientKey := host.FullClientStateKey(endpoint.ClientID) - clientProof, _ := endpoint.QueryProof(clientKey) + clientKey := host.FullClientStateKey(ep.ClientID) + clientProof, _ := ep.QueryProof(clientKey) return clientState, clientProof } -// UpdateConnection updates the connection associated with the given endpoint. It accepts a +// UpdateConnection updates the connection associated with the given ep. It accepts a // closure which takes a connection allowing the caller to modify the connection fields. -func (endpoint *Endpoint) UpdateConnection(updater func(connection *connectiontypes.ConnectionEnd)) { - connection := endpoint.GetConnection() +func (ep *Endpoint) UpdateConnection(updater func(connection *connectiontypes.ConnectionEnd)) { + connection := ep.GetConnection() updater(&connection) - endpoint.SetConnection(connection) + ep.SetConnection(connection) } diff --git a/testing/endpoint_v2.go b/testing/endpoint_v2.go index c020ccc1712..e49ca4786fa 100644 --- a/testing/endpoint_v2.go +++ b/testing/endpoint_v2.go @@ -10,36 +10,36 @@ import ( hostv2 "github.com/cosmos/ibc-go/v10/modules/core/24-host/v2" ) -// RegisterCounterparty will construct and execute a MsgRegisterCounterparty on the associated endpoint. -func (endpoint *Endpoint) RegisterCounterparty() (err error) { - msg := clientv2types.NewMsgRegisterCounterparty(endpoint.ClientID, endpoint.Counterparty.MerklePathPrefix.KeyPath, endpoint.Counterparty.ClientID, endpoint.Chain.SenderAccount.GetAddress().String()) +// RegisterCounterparty will construct and execute a MsgRegisterCounterparty on the associated ep. +func (ep *Endpoint) RegisterCounterparty() error { + msg := clientv2types.NewMsgRegisterCounterparty(ep.ClientID, ep.Counterparty.MerklePathPrefix.KeyPath, ep.Counterparty.ClientID, ep.Chain.SenderAccount.GetAddress().String()) // setup counterparty - _, err = endpoint.Chain.SendMsgs(msg) + _, err := ep.Chain.SendMsgs(msg) return err } // MsgSendPacket sends a packet on the associated endpoint using a predefined sender. The constructed packet is returned. -func (endpoint *Endpoint) MsgSendPacket(timeoutTimestamp uint64, payload channeltypesv2.Payload) (channeltypesv2.Packet, error) { +func (ep *Endpoint) MsgSendPacket(timeoutTimestamp uint64, payloads ...channeltypesv2.Payload) (channeltypesv2.Packet, error) { senderAccount := SenderAccount{ - SenderPrivKey: endpoint.Chain.SenderPrivKey, - SenderAccount: endpoint.Chain.SenderAccount, + SenderPrivKey: ep.Chain.SenderPrivKey, + SenderAccount: ep.Chain.SenderAccount, } - return endpoint.MsgSendPacketWithSender(timeoutTimestamp, payload, senderAccount) + return ep.MsgSendPacketWithSender(timeoutTimestamp, payloads, senderAccount) } // MsgSendPacketWithSender sends a packet on the associated endpoint using the provided sender. The constructed packet is returned. -func (endpoint *Endpoint) MsgSendPacketWithSender(timeoutTimestamp uint64, payload channeltypesv2.Payload, sender SenderAccount) (channeltypesv2.Packet, error) { - msgSendPacket := channeltypesv2.NewMsgSendPacket(endpoint.ClientID, timeoutTimestamp, sender.SenderAccount.GetAddress().String(), payload) +func (ep *Endpoint) MsgSendPacketWithSender(timeoutTimestamp uint64, payloads []channeltypesv2.Payload, sender SenderAccount) (channeltypesv2.Packet, error) { + msgSendPacket := channeltypesv2.NewMsgSendPacket(ep.ClientID, timeoutTimestamp, sender.SenderAccount.GetAddress().String(), payloads...) - res, err := endpoint.Chain.SendMsgsWithSender(sender, msgSendPacket) + res, err := ep.Chain.SendMsgsWithSender(sender, msgSendPacket) if err != nil { return channeltypesv2.Packet{}, err } - if err := endpoint.Counterparty.UpdateClient(); err != nil { + if err := ep.Counterparty.UpdateClient(); err != nil { return channeltypesv2.Packet{}, err } @@ -56,50 +56,98 @@ func (endpoint *Endpoint) MsgSendPacketWithSender(timeoutTimestamp uint64, paylo if err != nil { return channeltypesv2.Packet{}, err } - packet := channeltypesv2.NewPacket(sendResponse.Sequence, endpoint.ClientID, endpoint.Counterparty.ClientID, timeoutTimestamp, payload) + packet := channeltypesv2.NewPacket(sendResponse.Sequence, ep.ClientID, ep.Counterparty.ClientID, timeoutTimestamp, payloads...) + + err = ep.Counterparty.UpdateClient() + if err != nil { + return channeltypesv2.Packet{}, err + } return packet, nil } // MsgRecvPacket sends a MsgRecvPacket on the associated endpoint with the provided packet. -func (endpoint *Endpoint) MsgRecvPacket(packet channeltypesv2.Packet) error { +func (ep *Endpoint) MsgRecvPacket(packet channeltypesv2.Packet) error { // get proof of packet commitment from chainA packetKey := hostv2.PacketCommitmentKey(packet.SourceClient, packet.Sequence) - proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) - msg := channeltypesv2.NewMsgRecvPacket(packet, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String()) + msg := channeltypesv2.NewMsgRecvPacket(packet, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) - if err := endpoint.Chain.sendMsgs(msg); err != nil { + if err := ep.Chain.sendMsgs(msg); err != nil { return err } - return endpoint.Counterparty.UpdateClient() + return ep.Counterparty.UpdateClient() +} + +// MsgRecvPacketWithAck returns the acknowledgement for the given packet by sending a MsgRecvPacket on the associated endpoint. +func (ep *Endpoint) MsgRecvPacketWithAck(packet channeltypesv2.Packet) (channeltypesv2.Acknowledgement, error) { + // get proof of packet commitment from chainA + packetKey := hostv2.PacketCommitmentKey(packet.SourceClient, packet.Sequence) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) + + msg := channeltypesv2.NewMsgRecvPacket(packet, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) + + res, err := ep.Chain.SendMsgs(msg) + if err != nil { + return channeltypesv2.Acknowledgement{}, err + } + + ackBz, err := ParseAckV2FromEvents(res.Events) + if err != nil { + return channeltypesv2.Acknowledgement{}, err + } + var ack channeltypesv2.Acknowledgement + err = proto.Unmarshal(ackBz, &ack) + if err != nil { + return channeltypesv2.Acknowledgement{}, err + } + + err = ep.Counterparty.UpdateClient() + if err != nil { + return channeltypesv2.Acknowledgement{}, err + } + + return ack, nil } // MsgAcknowledgePacket sends a MsgAcknowledgement on the associated endpoint with the provided packet and ack. -func (endpoint *Endpoint) MsgAcknowledgePacket(packet channeltypesv2.Packet, ack channeltypesv2.Acknowledgement) error { +func (ep *Endpoint) MsgAcknowledgePacket(packet channeltypesv2.Packet, ack channeltypesv2.Acknowledgement) error { packetKey := hostv2.PacketAcknowledgementKey(packet.DestinationClient, packet.Sequence) - proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) - msg := channeltypesv2.NewMsgAcknowledgement(packet, ack, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String()) + msg := channeltypesv2.NewMsgAcknowledgement(packet, ack, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) - if err := endpoint.Chain.sendMsgs(msg); err != nil { + if err := ep.Chain.sendMsgs(msg); err != nil { return err } - return endpoint.Counterparty.UpdateClient() + return ep.Counterparty.UpdateClient() } // MsgTimeoutPacket sends a MsgTimeout on the associated endpoint with the provided packet. -func (endpoint *Endpoint) MsgTimeoutPacket(packet channeltypesv2.Packet) error { +func (ep *Endpoint) MsgTimeoutPacket(packet channeltypesv2.Packet) error { packetKey := hostv2.PacketReceiptKey(packet.DestinationClient, packet.Sequence) - proof, proofHeight := endpoint.Counterparty.QueryProof(packetKey) + proof, proofHeight := ep.Counterparty.QueryProof(packetKey) - msg := channeltypesv2.NewMsgTimeout(packet, proof, proofHeight, endpoint.Chain.SenderAccount.GetAddress().String()) + msg := channeltypesv2.NewMsgTimeout(packet, proof, proofHeight, ep.Chain.SenderAccount.GetAddress().String()) - if err := endpoint.Chain.sendMsgs(msg); err != nil { + if err := ep.Chain.sendMsgs(msg); err != nil { + return err + } + + return ep.Counterparty.UpdateClient() +} + +// RelayPacket relayes packet that was previously sent on the given endpoint. +func (ep *Endpoint) RelayPacket(packet channeltypesv2.Packet) error { + // receive packet on counterparty + ack, err := ep.Counterparty.MsgRecvPacketWithAck(packet) + if err != nil { return err } - return endpoint.Counterparty.UpdateClient() + // acknowledge packet on endpoint + return ep.MsgAcknowledgePacket(packet, ack) } diff --git a/testing/events.go b/testing/events.go index cabe8be2217..572c657f9fa 100644 --- a/testing/events.go +++ b/testing/events.go @@ -150,6 +150,17 @@ func ParseIBCV1Packets(eventType string, events []abci.Event) ([]channeltypes.Pa return packets, nil } +// ParseV1PacketFromEvents parses events emitted from a v2 send packet +// and returns the first EventTypeSendPacket packet found. +// Returns an error if no packet is found. +func ParseV2PacketFromEvents(events []abci.Event) (channeltypesv2.Packet, error) { + packets, err := ParseIBCV2Packets(channeltypesv2.EventTypeSendPacket, events) + if err != nil { + return channeltypesv2.Packet{}, err + } + return packets[0], nil +} + // ParseIBCV2Packets parses events and returns all the v2 packets found. // Returns an error if no v2 packet is found. func ParseIBCV2Packets(eventType string, events []abci.Event) ([]channeltypesv2.Packet, error) { @@ -194,6 +205,9 @@ func ParseIBCV2Packets(eventType string, events []abci.Event) ([]channeltypesv2. case channeltypesv2.AttributeKeySrcClient: packet.SourceClient = attr.Value + + default: + // Ignore unknown attributes } } packets = append(packets, packet) @@ -223,6 +237,23 @@ func ParseAckFromEvents(events []abci.Event) ([]byte, error) { return nil, errors.New("acknowledgement event attribute not found") } +// ParseAckV2FromEvents parses events emitted from a MsgRecvPacket and returns the +// acknowledgement for v2 packets. +func ParseAckV2FromEvents(events []abci.Event) ([]byte, error) { + for _, ev := range events { + if ev.Type == channeltypesv2.EventTypeWriteAck { + if attribute, found := attributeByKey(ev.Attributes, channeltypesv2.AttributeKeyEncodedAckHex); found { + value, err := hex.DecodeString(attribute.Value) + if err != nil { + return nil, err + } + return value, nil + } + } + } + return nil, errors.New("acknowledgement event attribute not found") +} + // ParseProposalIDFromEvents parses events emitted from MsgSubmitProposal and returns proposalID func ParseProposalIDFromEvents(events []abci.Event) (uint64, error) { for _, event := range events { diff --git a/testing/mock/address_codec.go b/testing/mock/address_codec.go new file mode 100644 index 00000000000..0565fa59196 --- /dev/null +++ b/testing/mock/address_codec.go @@ -0,0 +1,27 @@ +package mock + +import ( + "errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type TestAddressCodec struct{} + +func (t TestAddressCodec) StringToBytes(text string) ([]byte, error) { + hexBytes, err := sdk.AccAddressFromHexUnsafe(text) + if err == nil { + return hexBytes, nil + } + + bech32Bytes, err := sdk.AccAddressFromBech32(text) + if err == nil { + return bech32Bytes, nil + } + + return nil, errors.New("invalid address format") +} + +func (t TestAddressCodec) BytesToString(bz []byte) (string, error) { + return sdk.AccAddress(bz).String(), nil +} diff --git a/testing/mock/ibc_module.go b/testing/mock/ibc_module.go index f685e304ad7..975cebd7cd2 100644 --- a/testing/mock/ibc_module.go +++ b/testing/mock/ibc_module.go @@ -31,9 +31,9 @@ type IBCModule struct { } // NewIBCModule creates a new IBCModule given the underlying mock IBC application and scopedKeeper. -func NewIBCModule(appModule *AppModule, app *IBCApp) IBCModule { +func NewIBCModule(appModule *AppModule, app *IBCApp) *IBCModule { appModule.ibcApps = append(appModule.ibcApps, app) - return IBCModule{ + return &IBCModule{ appModule: appModule, IBCApp: app, } @@ -150,3 +150,9 @@ func (IBCModule) UnmarshalPacketData(ctx sdk.Context, portID string, channelID s } return nil, "", MockApplicationCallbackError } + +func (*IBCModule) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + if wrapper == nil { + panic("ICS4Wrapper cannot be nil") + } +} diff --git a/testing/mock/middleware.go b/testing/mock/middleware.go index 69fc3ad9bbb..e8344833bc2 100644 --- a/testing/mock/middleware.go +++ b/testing/mock/middleware.go @@ -155,3 +155,15 @@ func (BlockUpgradeMiddleware) WriteAcknowledgement( func (BlockUpgradeMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { return Version, true } + +// SetICS4Wrapper sets the ICS4Wrapper. This function may be used after +// the module's initialization to set the middleware which is above this +// module in the IBC application stack. +func (BlockUpgradeMiddleware) SetICS4Wrapper(wrapper porttypes.ICS4Wrapper) { + panic("unused") +} + +// SetUnderlyingApplication sets the underlying application of the middleware. +func (BlockUpgradeMiddleware) SetUnderlyingApplication(app porttypes.IBCModule) { + panic("unused") +} diff --git a/testing/mock/v2/ibc_module.go b/testing/mock/v2/ibc_module.go index 5e53f7e397b..33ead1f02ae 100644 --- a/testing/mock/v2/ibc_module.go +++ b/testing/mock/v2/ibc_module.go @@ -50,6 +50,9 @@ func (im IBCModule) OnRecvPacket(ctx sdk.Context, sourceChannel string, destinat if bytes.Equal(payload.Value, mockv1.MockPacketData) { return MockRecvPacketResult } + if bytes.Equal(payload.Value, mockv1.MockAsyncPacketData) { + return channeltypesv2.RecvPacketResult{Status: channeltypesv2.PacketStatus_Async} + } return channeltypesv2.RecvPacketResult{Status: channeltypesv2.PacketStatus_Failure} } diff --git a/testing/mock/v2/mock.go b/testing/mock/v2/mock.go index 2e120dd79a0..3a3b3e989e4 100644 --- a/testing/mock/v2/mock.go +++ b/testing/mock/v2/mock.go @@ -34,3 +34,13 @@ func NewErrorMockPayload(sourcePort, destPort string) channeltypesv2.Payload { Version: mockv1.Version, } } + +func NewAsyncMockPayload(sourcePort, destPort string) channeltypesv2.Payload { + return channeltypesv2.Payload{ + SourcePort: sourcePort, + DestinationPort: destPort, + Encoding: transfertypes.EncodingProtobuf, + Value: mockv1.MockAsyncPacketData, + Version: mockv1.Version, + } +} diff --git a/testing/simapp/app.go b/testing/simapp/app.go index ae21ddb29ac..c65d894a895 100644 --- a/testing/simapp/app.go +++ b/testing/simapp/app.go @@ -66,14 +66,9 @@ import ( govclient "github.com/cosmos/cosmos-sdk/x/gov/client" govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - "github.com/cosmos/cosmos-sdk/x/group" "github.com/cosmos/cosmos-sdk/x/mint" mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - "github.com/cosmos/cosmos-sdk/x/params" - paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" - paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" "github.com/cosmos/cosmos-sdk/x/slashing" slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" @@ -91,13 +86,17 @@ import ( icahostkeeper "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/keeper" icahosttypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/host/types" icatypes "github.com/cosmos/ibc-go/v10/modules/apps/27-interchain-accounts/types" + packetforward "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware" + packetforwardkeeper "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/keeper" + packetforwardtypes "github.com/cosmos/ibc-go/v10/modules/apps/packet-forward-middleware/types" + ratelimiting "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting" + ratelimitkeeper "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/keeper" + ratelimittypes "github.com/cosmos/ibc-go/v10/modules/apps/rate-limiting/types" "github.com/cosmos/ibc-go/v10/modules/apps/transfer" ibctransferkeeper "github.com/cosmos/ibc-go/v10/modules/apps/transfer/keeper" ibctransfertypes "github.com/cosmos/ibc-go/v10/modules/apps/transfer/types" transferv2 "github.com/cosmos/ibc-go/v10/modules/apps/transfer/v2" ibc "github.com/cosmos/ibc-go/v10/modules/core" - ibcclienttypes "github.com/cosmos/ibc-go/v10/modules/core/02-client/types" - ibcconnectiontypes "github.com/cosmos/ibc-go/v10/modules/core/03-connection/types" porttypes "github.com/cosmos/ibc-go/v10/modules/core/05-port/types" ibcapi "github.com/cosmos/ibc-go/v10/modules/core/api" ibcexported "github.com/cosmos/ibc-go/v10/modules/core/exported" @@ -141,7 +140,6 @@ type SimApp struct { // keys to access the substores keys map[string]*storetypes.KVStoreKey - tkeys map[string]*storetypes.TransientStoreKey memKeys map[string]*storetypes.MemoryStoreKey // keepers @@ -153,18 +151,19 @@ type SimApp struct { DistrKeeper distrkeeper.Keeper GovKeeper govkeeper.Keeper UpgradeKeeper *upgradekeeper.Keeper - ParamsKeeper paramskeeper.Keeper AuthzKeeper authzkeeper.Keeper IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly - ICAControllerKeeper icacontrollerkeeper.Keeper - ICAHostKeeper icahostkeeper.Keeper - TransferKeeper ibctransferkeeper.Keeper + ICAControllerKeeper *icacontrollerkeeper.Keeper + ICAHostKeeper *icahostkeeper.Keeper + TransferKeeper *ibctransferkeeper.Keeper ConsensusParamsKeeper consensusparamkeeper.Keeper + PFMKeeper *packetforwardkeeper.Keeper + RateLimitKeeper *ratelimitkeeper.Keeper // make IBC modules public for test purposes // these modules are never directly routed to by the IBC Router - IBCMockModule ibcmock.IBCModule - ICAAuthModule ibcmock.IBCModule + IBCMockModule *ibcmock.IBCModule + ICAAuthModule *ibcmock.IBCModule MockModuleV2A mockv2.IBCModule MockModuleV2B mockv2.IBCModule @@ -251,9 +250,10 @@ func NewSimApp( keys := storetypes.NewKVStoreKeys( authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, group.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, - ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, + govtypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, + packetforwardtypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, authzkeeper.StoreKey, consensusparamtypes.StoreKey, + ratelimittypes.StoreKey, ) // register streaming services @@ -261,7 +261,6 @@ func NewSimApp( panic(err) } - tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey) memKeys := storetypes.NewMemoryStoreKeys(ibcmock.MemStoreKey) app := &SimApp{ @@ -271,12 +270,9 @@ func NewSimApp( txConfig: txConfig, interfaceRegistry: interfaceRegistry, keys: keys, - tkeys: tkeys, memKeys: memKeys, } - app.ParamsKeeper = initParamsKeeper(appCodec, legacyAmino, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) - // set the BaseApp's parameter store app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, runtime.NewKVStoreService(keys[consensusparamtypes.StoreKey]), authtypes.NewModuleAddress(govtypes.ModuleName).String(), runtime.EventService{}) bApp.SetParamStore(app.ConsensusParamsKeeper.ParamsStore) @@ -323,7 +319,7 @@ func NewSimApp( app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.GetSubspace(ibcexported.ModuleName), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, runtime.NewKVStoreService(keys[ibcexported.StoreKey]), app.UpgradeKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) govConfig := govtypes.DefaultConfig() @@ -344,8 +340,7 @@ func NewSimApp( // ICA Controller keeper app.ICAControllerKeeper = icacontrollerkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.GetSubspace(icacontrollertypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icacontrollertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), @@ -353,8 +348,7 @@ func NewSimApp( // ICA Host keeper app.ICAHostKeeper = icahostkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.GetSubspace(icahosttypes.SubModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, runtime.NewKVStoreService(keys[icahosttypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.AccountKeeper, app.MsgServiceRouter(), app.GRPCQueryRouter(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), @@ -368,14 +362,17 @@ func NewSimApp( // Create Transfer Keeper app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.GetSubspace(ibctransfertypes.ModuleName), - app.IBCKeeper.ChannelKeeper, + appCodec, app.AccountKeeper.AddressCodec(), + runtime.NewKVStoreService(keys[ibctransfertypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.MsgServiceRouter(), app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) + app.RateLimitKeeper = ratelimitkeeper.NewKeeper(appCodec, app.AccountKeeper.AddressCodec(), runtime.NewKVStoreService(keys[ratelimittypes.StoreKey]), app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ClientKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + app.PFMKeeper = packetforwardkeeper.NewKeeper(appCodec, app.AccountKeeper.AddressCodec(), runtime.NewKVStoreService(keys[packetforwardtypes.StoreKey]), app.TransferKeeper, app.IBCKeeper.ChannelKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + // Mock Module Stack // Mock Module setup for testing IBC and also acts as the interchain accounts authentication module @@ -403,10 +400,17 @@ func NewSimApp( // channel.RecvPacket -> transfer.OnRecvPacket // create IBC module from bottom to top of stack - var transferStack porttypes.IBCModule = transfer.NewIBCModule(app.TransferKeeper) + // - Rate Limit + // - Packet Forward Middleware + // - Transfer + transferStack := porttypes.NewIBCStackBuilder(app.IBCKeeper.ChannelKeeper) + transferApp := transfer.NewIBCModule(app.TransferKeeper) + transferStack.Base(transferApp). + Next(packetforward.NewIBCMiddleware(app.PFMKeeper, 0, packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp)). + Next(ratelimiting.NewIBCMiddleware(app.RateLimitKeeper)) // Add transfer stack to IBC Router - ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack) + ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack.Build()) // Create Interchain Accounts Stack // SendPacket, since it is originating from the application to core IBC: @@ -416,7 +420,7 @@ func NewSimApp( var icaControllerStack porttypes.IBCModule icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewIBCApp("")) var ok bool - app.ICAAuthModule, ok = icaControllerStack.(ibcmock.IBCModule) + app.ICAAuthModule, ok = icaControllerStack.(*ibcmock.IBCModule) if !ok { panic(fmt.Errorf("cannot convert %T into %T", icaControllerStack, app.ICAAuthModule)) } @@ -467,24 +471,25 @@ func NewSimApp( app.AccountKeeper, app.StakingKeeper, app, txConfig, ), - auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), - bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, app.GetSubspace(banktypes.ModuleName)), - gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(govtypes.ModuleName)), - mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, app.GetSubspace(minttypes.ModuleName)), - slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(slashingtypes.ModuleName), app.interfaceRegistry), - distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.GetSubspace(distrtypes.ModuleName)), - staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.GetSubspace(stakingtypes.ModuleName)), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper, nil), + gov.NewAppModule(appCodec, &app.GovKeeper, app.AccountKeeper, app.BankKeeper, nil), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, nil, nil), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil, app.interfaceRegistry), + distr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, nil), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, nil), upgrade.NewAppModule(app.UpgradeKeeper, app.AccountKeeper.AddressCodec()), - params.NewAppModule(app.ParamsKeeper), authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper), // IBC modules ibc.NewAppModule(app.IBCKeeper), transfer.NewAppModule(app.TransferKeeper), - ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), + ratelimiting.NewAppModule(app.RateLimitKeeper), + ica.NewAppModule(app.ICAControllerKeeper, app.ICAHostKeeper), mockModule, + packetforward.NewAppModule(app.PFMKeeper), // IBC light clients ibctm.NewAppModule(tmLightClientModule), @@ -499,11 +504,7 @@ func NewSimApp( app.ModuleManager, map[string]module.AppModuleBasic{ genutiltypes.ModuleName: genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator), - govtypes.ModuleName: gov.NewAppModuleBasic( - []govclient.ProposalHandler{ - paramsclient.ProposalHandler, - }, - ), + govtypes.ModuleName: gov.NewAppModuleBasic([]govclient.ProposalHandler{}), }) app.BasicModuleManager.RegisterLegacyAminoCodec(legacyAmino) app.BasicModuleManager.RegisterInterfaces(interfaceRegistry) @@ -525,9 +526,11 @@ func NewSimApp( stakingtypes.ModuleName, ibcexported.ModuleName, ibctransfertypes.ModuleName, + packetforwardtypes.ModuleName, genutiltypes.ModuleName, authz.ModuleName, icatypes.ModuleName, + ratelimittypes.ModuleName, ibcmock.ModuleName, ) app.ModuleManager.SetOrderEndBlockers( @@ -535,10 +538,10 @@ func NewSimApp( stakingtypes.ModuleName, ibcexported.ModuleName, ibctransfertypes.ModuleName, + packetforwardtypes.ModuleName, genutiltypes.ModuleName, icatypes.ModuleName, ibcmock.ModuleName, - group.ModuleName, ) // NOTE: The genutils module must occur after staking so that pools are @@ -548,9 +551,9 @@ func NewSimApp( authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, - ibcexported.ModuleName, genutiltypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, - icatypes.ModuleName, ibcmock.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, - vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, + ibcexported.ModuleName, genutiltypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, ratelimittypes.ModuleName, + packetforwardtypes.ModuleName, icatypes.ModuleName, ibcmock.ModuleName, upgradetypes.ModuleName, + vestingtypes.ModuleName, consensusparamtypes.ModuleName, } app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...) app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...) @@ -572,7 +575,7 @@ func NewSimApp( // NOTE: this is not required apps that don't use the simulator for fuzz testing // transactions overrideModules := map[string]module.AppModuleSimulation{ - authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, nil), } app.simulationManager = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules) @@ -580,7 +583,6 @@ func NewSimApp( // initialize stores app.MountKVStores(keys) - app.MountTransientStores(tkeys) app.MountMemoryStores(memKeys) // initialize BaseApp @@ -754,14 +756,6 @@ func (app *SimApp) GetStoreKeys() []storetypes.StoreKey { return keys } -// GetSubspace returns a param subspace for a given module name. -// -// NOTE: This is solely to be used for testing purposes. -func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace { - subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) - return subspace -} - // SimulationManager implements the SimulationApp interface func (app *SimApp) SimulationManager() *module.SimulationManager { return app.simulationManager @@ -833,21 +827,6 @@ func BlockedAddresses() map[string]bool { return modAccAddrs } -// initParamsKeeper init params keeper and its subspaces -func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { - paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) - - // register the key tables for legacy param subspaces - keyTable := ibcclienttypes.ParamKeyTable() - keyTable.RegisterParamSet(&ibcconnectiontypes.Params{}) - paramsKeeper.Subspace(ibcexported.ModuleName).WithKeyTable(keyTable) - paramsKeeper.Subspace(ibctransfertypes.ModuleName).WithKeyTable(ibctransfertypes.ParamKeyTable()) - paramsKeeper.Subspace(icacontrollertypes.SubModuleName).WithKeyTable(icacontrollertypes.ParamKeyTable()) - paramsKeeper.Subspace(icahosttypes.SubModuleName).WithKeyTable(icahosttypes.ParamKeyTable()) - - return paramsKeeper -} - // IBC TestingApp functions // GetBaseApp implements the TestingApp interface. diff --git a/testing/solomachine.go b/testing/solomachine.go index 85d0b8bd2e5..e43c84326d6 100644 --- a/testing/solomachine.go +++ b/testing/solomachine.go @@ -83,7 +83,7 @@ func GenerateKeys(t *testing.T, n uint64) ([]cryptotypes.PrivKey, []cryptotypes. privKeys := make([]cryptotypes.PrivKey, n) pubKeys := make([]cryptotypes.PubKey, n) - for i := uint64(0); i < n; i++ { + for i := range n { privKeys[i] = secp256k1.GenPrivKey() pubKeys[i] = privKeys[i].PubKey() } diff --git a/testing/utils.go b/testing/utils.go index ca90ade4cc1..77288f64223 100644 --- a/testing/utils.go +++ b/testing/utils.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/cosmos/gogoproto/proto" "github.com/stretchr/testify/require" "github.com/cosmos/cosmos-sdk/codec" @@ -65,7 +66,7 @@ func GenerateString(length uint) string { } // UnmarshalMsgResponses parse out msg responses from a transaction result -func UnmarshalMsgResponses(cdc codec.Codec, data []byte, msgs ...codec.ProtoMarshaler) error { +func UnmarshalMsgResponses(cdc codec.Codec, data []byte, msgs ...proto.Message) error { var txMsgData sdk.TxMsgData if err := cdc.Unmarshal(data, &txMsgData); err != nil { return err