diff --git a/.changes/added/3106.md b/.changes/added/3106.md new file mode 100644 index 00000000000..e9d038e7f85 --- /dev/null +++ b/.changes/added/3106.md @@ -0,0 +1 @@ +Add adapter for storing blocks on AWS S3 bucket \ No newline at end of file diff --git a/.github/actions/slack-notify-template/action.yml b/.github/actions/slack-notify-template/action.yml new file mode 100644 index 00000000000..4988191d3f6 --- /dev/null +++ b/.github/actions/slack-notify-template/action.yml @@ -0,0 +1,25 @@ +name: Notify Slack on Failure +description: Sends notification to Slack if job fails + +inputs: + github_token: + description: Github Token Secret + required: true + slack_webhook: + description: Slack webhook URL + required: true + +runs: + using: composite + steps: + - name: Notify if Job Fails + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + token: ${{ inputs.github_token }} + notification_title: '{workflow} has {status_message}' + message_format: '{emoji} *{workflow}* {status_message} in <{repo_url}|{repo}> : <{run_url}|View Run Results>' + footer: '' + notify_when: failure + env: + SLACK_WEBHOOK_URL: ${{ inputs.slack_webhook }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 703996769c9..e9e675e835f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -80,7 +80,7 @@ jobs: run: cargo install cargo-sort - name: Run Cargo.toml sort check run: cargo sort -w --check - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && github.ref == 'refs/heads/master' with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -93,7 +93,7 @@ jobs: # ensure openssl hasn't crept into the dep tree - name: Check if openssl is included run: ./.github/workflows/scripts/verify_openssl.sh - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && github.ref == 'refs/heads/master' with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -174,8 +174,6 @@ jobs: uses: davidB/rust-cargo-make@v1 with: version: "0.36.4" - - name: Install Protoc - uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: @@ -194,12 +192,45 @@ jobs: continue-on-error: true - name: ${{ matrix.command }} ${{ matrix.args }} run: ${{ matrix.env }} cargo ${{ matrix.command }} ${{ matrix.args }} - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && github.ref == 'refs/heads/master' with: github_token: ${{ secrets.GITHUB_TOKEN }} slack_webhook: ${{ secrets.SLACK_WEBHOOK_NOTIFY_BUILD }} + rpc-s3-integration-tests: + name: RPC S3 Integration Tests (w/LocalStack) + needs: + - lint-toml-files + - prevent-openssl + - rustfmt + - check-changelog + runs-on: buildjet-4vcpu-ubuntu-2204 + timeout-minutes: 45 + services: + localstack: + image: localstack/localstack:latest + ports: + - 4566:4566 + env: + SERVICES: s3 + DEBUG: 1 + + env: + AWS_ACCESS_KEY_ID: test + AWS_SECRET_ACCESS_KEY: test + AWS_REGION: us-east-1 + RUSTFLAGS: -D warnings + + steps: + - uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_VERSION }} + - name: Run RPC Integration Tests + run: cargo test --package fuel-core-tests --test integration_tests rpc_s3 --features rpc -- --test-threads=1 + publish-crates-check: runs-on: buildjet-4vcpu-ubuntu-2204 steps: @@ -239,7 +270,6 @@ jobs: - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} - - uses: arduino/setup-protoc@v3 - uses: rui314/setup-mold@v1 - uses: buildjet/cache@v3 with: @@ -258,6 +288,7 @@ jobs: - cargo-verifications - publish-crates-check - cargo-test-kms + - rpc-s3-integration-tests runs-on: ubuntu-latest steps: - run: echo "pass" @@ -302,7 +333,7 @@ jobs: publish-delay: 60000 registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -448,7 +479,7 @@ jobs: asset_name: ${{ env.ZIP_FILE_NAME }} asset_content_type: application/gzip - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') && matrix.job.os != 'macos-latest' with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/docker-images.yml b/.github/workflows/docker-images.yml index 78f58d37ffc..eb2e6322614 100644 --- a/.github/workflows/docker-images.yml +++ b/.github/workflows/docker-images.yml @@ -290,7 +290,7 @@ jobs: cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-debug:latest,mode=max,image-manifest=true,oci-mediatypes=true - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') with: github_token: ${{ secrets.GITHUB_TOKEN }} @@ -390,7 +390,7 @@ jobs: cache-from: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest cache-to: type=registry,ref=${{ env.REGISTRY_URL }}-build-cache-e2e:latest,mode=max,image-manifest=true,oci-mediatypes=true - - uses: FuelLabs/.github/.github/actions/slack-notify-template@master + - uses: ./.github/actions/slack-notify-template if: always() && (github.ref == 'refs/heads/master' || github.ref_type == 'tag') with: github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/Cargo.lock b/Cargo.lock index 25f29e6290b..f5f167dfded 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -442,7 +442,7 @@ dependencies = [ "async-trait", "auto_impl", "either", - "elliptic-curve", + "elliptic-curve 0.13.8", "k256", "thiserror 2.0.17", ] @@ -1290,9 +1290,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-config" -version = "1.8.8" +version = "1.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37cf2b6af2a95a20e266782b4f76f1a5e12bf412a9db2de9c1e9123b9d8c0ad8" +checksum = "a0149602eeaf915158e14029ba0c78dedb8c08d554b024d54c8f239aab46511d" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1320,9 +1320,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.8" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf26925f4a5b59eb76722b63c2892b1d70d06fa053c72e4a100ec308c1d47bc" +checksum = "b01c9521fa01558f750d183c8c68c81b0155b9d193a4ba7f84c36bd1b6d04a06" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -1355,13 +1355,14 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.5.12" +version = "1.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa006bb32360ed90ac51203feafb9d02e3d21046e1fd3a450a404b90ea73e5d" +checksum = "7ce527fb7e53ba9626fc47824f25e256250556c40d8f81d27dd92aa38239d632" dependencies = [ "aws-credential-types", "aws-sigv4", "aws-smithy-async", + "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-runtime", "aws-smithy-runtime-api", @@ -1379,9 +1380,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.90.0" +version = "1.96.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08a2b564e660ad69be524f569fea985380b15eea28694b8fd9f6206a437702b" +checksum = "995d40070271994fb774137aa603c10e7d29c4567a9605c6b801dff199c3d221" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1399,11 +1400,45 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws-sdk-s3" +version = "1.111.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55c660aeffc79b575971b67cd479af02d486f2c97e936d7dea2866bee0dac8ff" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand", + "hex", + "hmac", + "http 0.2.12", + "http 1.3.1", + "http-body 0.4.6", + "lru 0.12.5", + "percent-encoding", + "regex-lite", + "sha2 0.10.9", + "tracing", + "url", +] + [[package]] name = "aws-sdk-sso" -version = "1.86.0" +version = "1.90.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0abbfab841446cce6e87af853a3ba2cc1bc9afcd3f3550dd556c43d434c86d" +checksum = "4f18e53542c522459e757f81e274783a78f8c81acdfc8d1522ee8a18b5fb1c66" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1423,9 +1458,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.88.0" +version = "1.92.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a68d675582afea0e94d38b6ca9c5aaae4ca14f1d36faa6edb19b42e687e70d7" +checksum = "532f4d866012ffa724a4385c82e8dd0e59f0ca0e600f3f22d4c03b6824b34e4a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1445,9 +1480,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.88.0" +version = "1.94.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30990923f4f675523c51eb1c0dec9b752fb267b36a61e83cbc219c9d86da715" +checksum = "1be6fbbfa1a57724788853a623378223fe828fc4c09b146c992f0c95b6256174" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1468,24 +1503,30 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.3.5" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffc03068fbb9c8dd5ce1c6fb240678a5cffb86fb2b7b1985c999c4b83c8df68" +checksum = "c35452ec3f001e1f2f6db107b6373f1f48f05ec63ba2c5c9fa91f07dad32af11" dependencies = [ "aws-credential-types", + "aws-smithy-eventstream", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", + "crypto-bigint 0.5.5", "form_urlencoded", "hex", "hmac", "http 0.2.12", "http 1.3.1", + "p256 0.11.1", "percent-encoding", + "ring 0.17.14", "sha2 0.10.9", + "subtle", "time", "tracing", + "zeroize", ] [[package]] @@ -1499,17 +1540,50 @@ dependencies = [ "tokio", ] +[[package]] +name = "aws-smithy-checksums" +version = "0.63.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb9a26b2831e728924ec0089e92697a78a2f9cdcf90d81e8cfcc6a6c85080369" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc-fast", + "hex", + "http 0.2.12", + "http-body 0.4.6", + "md-5", + "pin-project-lite", + "sha1", + "sha2 0.10.9", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e29a304f8319781a39808847efb39561351b1bb76e933da7aa90232673638658" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + [[package]] name = "aws-smithy-http" -version = "0.62.4" +version = "0.62.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3feafd437c763db26aa04e0cc7591185d0961e64c61885bece0fb9d50ceac671" +checksum = "445d5d720c99eed0b4aa674ed00d835d9b1427dd73e04adaf2f94c6b2d6f9fca" dependencies = [ + "aws-smithy-eventstream", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", "bytes-utils", "futures-core", + "futures-util", "http 0.2.12", "http 1.3.1", "http-body 0.4.6", @@ -1521,28 +1595,34 @@ dependencies = [ [[package]] name = "aws-smithy-http-client" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1053b5e587e6fa40ce5a79ea27957b04ba660baa02b28b7436f64850152234f1" +checksum = "623254723e8dfd535f566ee7b2381645f8981da086b5c4aa26c0c41582bb1d2c" dependencies = [ "aws-smithy-async", + "aws-smithy-protocol-test", "aws-smithy-runtime-api", "aws-smithy-types", + "bytes", "h2 0.3.27", "h2 0.4.12", "http 0.2.12", "http 1.3.1", "http-body 0.4.6", + "http-body 1.0.1", "hyper 0.14.32", "hyper 1.7.0", "hyper-rustls 0.24.2", "hyper-rustls 0.27.7", "hyper-util", + "indexmap 2.12.0", "pin-project-lite", "rustls 0.21.12", "rustls 0.23.33", "rustls-native-certs 0.8.2", "rustls-pki-types", + "serde", + "serde_json", "tokio", "tokio-rustls 0.26.4", "tower 0.5.2", @@ -1551,13 +1631,25 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.61.6" +version = "0.61.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff418fc8ec5cadf8173b10125f05c2e7e1d46771406187b2c878557d4503390" +checksum = "2db31f727935fc63c6eeae8b37b438847639ec330a9161ece694efba257e0c54" dependencies = [ "aws-smithy-types", ] +[[package]] +name = "aws-smithy-mocks" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6c97048c104438d40390bd5211267ea783fa0880a35017ff42730b6dfc9136" +dependencies = [ + "aws-smithy-http-client", + "aws-smithy-runtime-api", + "aws-smithy-types", + "http 1.3.1", +] + [[package]] name = "aws-smithy-observability" version = "0.1.4" @@ -1567,6 +1659,25 @@ dependencies = [ "aws-smithy-runtime-api", ] +[[package]] +name = "aws-smithy-protocol-test" +version = "0.63.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa808d23a8edf0da73f6812d06d8c0a48d70f05d2d3696362982aad11ee475b7" +dependencies = [ + "assert-json-diff", + "aws-smithy-runtime-api", + "base64-simd", + "cbor-diag", + "ciborium", + "http 0.2.12", + "pretty_assertions", + "regex-lite", + "roxmltree", + "serde_json", + "thiserror 2.0.17", +] + [[package]] name = "aws-smithy-query" version = "0.60.8" @@ -1579,9 +1690,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ab99739082da5347660c556689256438defae3bcefd66c52b095905730e404" +checksum = "0bbe9d018d646b96c7be063dd07987849862b0e6d07c778aad7d93d1be6c1ef0" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -1599,13 +1710,14 @@ dependencies = [ "pin-utils", "tokio", "tracing", + "tracing-subscriber", ] [[package]] name = "aws-smithy-runtime-api" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3683c5b152d2ad753607179ed71988e8cfd52964443b4f74fd8e552d0bbfeb46" +checksum = "ec7204f9fd94749a7c53b26da1b961b4ac36bf070ef1e0b94bb09f79d4f6c193" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1620,9 +1732,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f5b3a7486f6690ba25952cabf1e7d75e34d69eaff5081904a47bc79074d6457" +checksum = "25f535879a207fce0db74b679cfc3e91a3159c8144d717d55f5832aea9eef46e" dependencies = [ "base64-simd", "bytes", @@ -1646,18 +1758,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.11" +version = "0.60.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c34127e8c624bc2999f3b657e749c1393bedc9cd97b92a804db8ced4d2e163" +checksum = "eab77cdd036b11056d2a30a7af7b775789fb024bf216acc13884c6c97752ae56" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "1.3.9" +version = "1.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2fd329bf0e901ff3f60425691410c69094dc2a1f34b331f37bfc4e9ac1565a1" +checksum = "d79fb68e3d7fe5d4833ea34dc87d2e97d26d3086cb3da660bb6b1f76d98680b6" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -1871,6 +1983,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base16ct" version = "0.2.0" @@ -2187,6 +2305,25 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "cbor-diag" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc245b6ecd09b23901a4fbad1ad975701fd5061ceaef6afa93a2d70605a64429" +dependencies = [ + "bs58", + "chrono", + "data-encoding", + "half", + "nom", + "num-bigint", + "num-rational", + "num-traits", + "separator", + "url", + "uuid", +] + [[package]] name = "cc" version = "1.2.41" @@ -2605,13 +2742,13 @@ checksum = "1394c263335da09e8ba8c4b2c675d804e3e0deb44cce0866a5f838d3ddd43d02" dependencies = [ "bip32", "cosmos-sdk-proto", - "ecdsa", + "ecdsa 0.16.9", "eyre", "k256", "rand_core 0.6.4", "serde", "serde_json", - "signature", + "signature 2.2.0", "subtle-encoding", "tendermint 0.40.4", "thiserror 1.0.69", @@ -2788,6 +2925,19 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" +[[package]] +name = "crc-fast" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bf62af4cc77d8fe1c22dde4e721d87f2f54056139d8c412e1366b740305f56f" +dependencies = [ + "crc", + "digest 0.10.7", + "libc", + "rand 0.9.2", + "regex", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -2906,6 +3056,18 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -3174,6 +3336,16 @@ dependencies = [ "uuid", ] +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der" version = "0.7.10" @@ -3410,19 +3582,31 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der", + "der 0.7.10", "digest 0.10.7", - "elliptic-curve", - "rfc6979", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", "serdect", - "signature", - "spki", + "signature 2.2.0", + "spki 0.7.3", ] [[package]] @@ -3431,9 +3615,9 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8", + "pkcs8 0.10.2", "serde", - "signature", + "signature 2.2.0", ] [[package]] @@ -3485,21 +3669,41 @@ dependencies = [ "serde", ] +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core 0.6.4", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.5", "digest 0.10.7", - "ff", + "ff 0.13.1", "generic-array", - "group", - "pkcs8", + "group 0.13.0", + "pkcs8 0.10.2", "rand_core 0.6.4", - "sec1", + "sec1 0.7.3", "serdect", "subtle", "zeroize", @@ -3705,6 +3909,16 @@ dependencies = [ "bytes", ] +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "ff" version = "0.13.1" @@ -3769,6 +3983,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + [[package]] name = "flex-error" version = "0.4.4" @@ -3970,7 +4194,7 @@ dependencies = [ "hex", "itertools 0.12.1", "num_enum", - "p256", + "p256 0.13.2", "postcard", "primitive-types", "quanta", @@ -4037,15 +4261,17 @@ version = "0.47.1" dependencies = [ "anyhow", "async-trait", - "bytes", + "aws-config", + "aws-sdk-s3", + "aws-smithy-mocks", "enum-iterator", + "flate2", + "fuel-core-protobuf", "fuel-core-services", "fuel-core-storage", "fuel-core-types 0.47.1", "futures", - "log", "num_enum", - "postcard", "proptest", "prost 0.14.1", "rand 0.8.5", @@ -4056,10 +4282,7 @@ dependencies = [ "tokio", "tokio-stream", "tonic 0.14.2", - "tonic-prost", - "tonic-prost-build", "tracing", - "tracing-subscriber", ] [[package]] @@ -4406,6 +4629,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "fuel-core-protobuf" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a616726038fbe445bd3294d2700afa8487e38fbc6abc86a8af12be4b596db598" +dependencies = [ + "prost 0.14.1", + "serde", + "tonic 0.14.2", + "tonic-prost", +] + [[package]] name = "fuel-core-provider" version = "0.47.1" @@ -4558,11 +4793,11 @@ dependencies = [ "alloy-provider", "alloy-rpc-types-eth", "anyhow", - "async-trait", "aws-config", "aws-sdk-kms", + "aws-sdk-s3", "clap", - "cynic", + "flate2", "fuel-core", "fuel-core-benches", "fuel-core-bin", @@ -4582,7 +4817,6 @@ dependencies = [ "fuel-core-types 0.47.1", "fuel-core-upgradable-executor", "futures", - "hex", "hyper 0.14.32", "insta", "itertools 0.12.1", @@ -4591,12 +4825,12 @@ dependencies = [ "pretty_assertions", "primitive-types", "proptest", + "prost 0.14.1", "rand 0.8.5", "regex", "reqwest 0.12.24", "rstest", "serde_json", - "spki", "tempfile", "test-case", "test-helpers", @@ -4751,11 +4985,11 @@ version = "0.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33548590131674e8f272a3e056be4dbaa1de7cb364eab2b17987cd5c0dc31cb0" dependencies = [ - "ecdsa", + "ecdsa 0.16.9", "ed25519-dalek", "fuel-types 0.56.0", "k256", - "p256", + "p256 0.13.2", "serde", "sha2 0.10.9", "zeroize", @@ -4770,11 +5004,11 @@ dependencies = [ "base64ct", "coins-bip32", "coins-bip39", - "ecdsa", + "ecdsa 0.16.9", "ed25519-dalek", "fuel-types 0.65.0", "k256", - "p256", + "p256 0.13.2", "rand 0.8.5", "secp256k1 0.30.0", "serde", @@ -4854,7 +5088,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbdd607c9c70921cc016becde659e5062ae460b7bb3f525a1dd65f8209c0083" dependencies = [ "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "regex", "tonic 0.11.0", ] @@ -5237,13 +5471,24 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.1", "rand_core 0.6.4", "subtle", ] @@ -6143,12 +6388,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "once_cell", "serdect", "sha2 0.10.9", - "signature", + "signature 2.2.0", ] [[package]] @@ -6963,6 +7208,16 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest 0.10.7", +] + [[package]] name = "memchr" version = "2.7.6" @@ -7006,6 +7261,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] @@ -7140,12 +7396,6 @@ dependencies = [ "unsigned-varint 0.8.0", ] -[[package]] -name = "multimap" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" - [[package]] name = "multistream-select" version = "0.13.0" @@ -7536,14 +7786,25 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.9", +] + [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.9", ] @@ -7785,14 +8046,24 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", +] + [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.10", + "spki 0.7.3", ] [[package]] @@ -7996,7 +8267,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.8", ] [[package]] @@ -8146,28 +8417,6 @@ dependencies = [ "prost-derive 0.14.1", ] -[[package]] -name = "prost-build" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" -dependencies = [ - "heck 0.5.0", - "itertools 0.14.0", - "log", - "multimap", - "once_cell", - "petgraph", - "prettyplease", - "prost 0.14.1", - "prost-types 0.14.1", - "pulldown-cmark", - "pulldown-cmark-to-cmark", - "regex", - "syn 2.0.107", - "tempfile", -] - [[package]] name = "prost-derive" version = "0.11.9" @@ -8229,15 +8478,6 @@ dependencies = [ "prost 0.12.6", ] -[[package]] -name = "prost-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" -dependencies = [ - "prost 0.14.1", -] - [[package]] name = "psl-types" version = "2.0.11" @@ -8254,26 +8494,6 @@ dependencies = [ "psl-types", ] -[[package]] -name = "pulldown-cmark" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" -dependencies = [ - "bitflags 2.9.4", - "memchr", - "unicase", -] - -[[package]] -name = "pulldown-cmark-to-cmark" -version = "21.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" -dependencies = [ - "pulldown-cmark", -] - [[package]] name = "pulley-interpreter" version = "38.0.4" @@ -8763,6 +8983,17 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac", + "zeroize", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -8846,6 +9077,15 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "roxmltree" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" +dependencies = [ + "xmlparser", +] + [[package]] name = "rstest" version = "0.15.0" @@ -9199,16 +9439,30 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct", - "der", + "base16ct 0.2.0", + "der 0.7.10", "generic-array", - "pkcs8", + "pkcs8 0.10.2", "serdect", "subtle", "zeroize", @@ -9322,6 +9576,12 @@ dependencies = [ "pest", ] +[[package]] +name = "separator" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" + [[package]] name = "seq-macro" version = "0.3.6" @@ -9374,6 +9634,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ + "indexmap 2.12.0", "itoa", "memchr", "ryu", @@ -9463,7 +9724,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" dependencies = [ - "base16ct", + "base16ct 0.2.0", "serde", ] @@ -9567,6 +9828,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "signature" version = "2.2.0" @@ -9577,6 +9848,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "similar" version = "2.7.0" @@ -9682,6 +9959,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -9689,7 +9976,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.10", ] [[package]] @@ -10051,13 +10338,13 @@ dependencies = [ "num-traits", "once_cell", "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "serde", "serde_bytes", "serde_json", "serde_repr", "sha2 0.10.9", - "signature", + "signature 2.2.0", "subtle", "subtle-encoding", "tendermint-proto 0.36.0", @@ -10087,7 +10374,7 @@ dependencies = [ "serde_json", "serde_repr", "sha2 0.10.9", - "signature", + "signature 2.2.0", "subtle", "subtle-encoding", "tendermint-proto 0.40.4", @@ -10118,7 +10405,7 @@ dependencies = [ "bytes", "flex-error", "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "serde", "serde_bytes", "subtle-encoding", @@ -10709,18 +10996,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tonic-build" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" -dependencies = [ - "prettyplease", - "proc-macro2", - "quote", - "syn 2.0.107", -] - [[package]] name = "tonic-prost" version = "0.14.2" @@ -10732,22 +11007,6 @@ dependencies = [ "tonic 0.14.2", ] -[[package]] -name = "tonic-prost-build" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "prost-types 0.14.1", - "quote", - "syn 2.0.107", - "tempfile", - "tonic-build", -] - [[package]] name = "tower" version = "0.4.13" @@ -11001,12 +11260,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicase" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" - [[package]] name = "unicode-ident" version = "1.0.19" diff --git a/Cargo.toml b/Cargo.toml index 96bd7d0ef0c..4ddeed4aedb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,12 @@ async-graphql = { version = "=7.0.15", features = [ ], default-features = false } async-graphql-value = { version = "=7.0.15" } async-trait = "0.1" -aws-sdk-kms = "1.37" + +# Fuel dependencies +aws-config = { version = "1.8.11", features = ["behavior-version-latest"] } +aws-sdk-kms = "1.96" +aws-sdk-s3 = "1" +aws-smithy-mocks = "0.2.0" axum = "0.5" bytes = "1.5.0" clap = "4.4" @@ -91,6 +96,7 @@ educe = { version = "0.6", default-features = false, features = [ ] } enum-iterator = "1.2" enum_dispatch = "0.3.13" +flate2 = "1.1.5" fuel-core = { version = "0.47.1", path = "./crates/fuel-core", default-features = false } fuel-core-bin = { version = "0.47.1", path = "./bin/fuel-core" } # Workspace members @@ -110,6 +116,7 @@ fuel-core-p2p = { version = "0.47.1", path = "./crates/services/p2p" } fuel-core-parallel-executor = { version = "0.47.1", path = "./crates/services/parallel-executor" } fuel-core-poa = { version = "0.47.1", path = "./crates/services/consensus_module/poa" } fuel-core-producer = { version = "0.47.1", path = "./crates/services/producer" } +fuel-core-protobuf = { version = "0.4.0" } fuel-core-provider = { version = "0.47.1", path = "./crates/provider" } fuel-core-relayer = { version = "0.47.1", path = "./crates/services/relayer" } fuel-core-services = { version = "0.47.1", path = "./crates/services" } @@ -124,8 +131,6 @@ fuel-core-types = { version = "0.47.1", path = "./crates/types", default-feature fuel-core-upgradable-executor = { version = "0.47.1", path = "./crates/services/upgradable-executor" } fuel-core-wasm-executor = { version = "0.47.1", path = "./crates/services/upgradable-executor/wasm-executor", default-features = false } fuel-gas-price-algorithm = { version = "0.47.1", path = "crates/fuel-gas-price-algorithm" } - -# Fuel dependencies fuel-vm-private = { version = "0.65.0", package = "fuel-vm", default-features = false } futures = "0.3" hex = { version = "0.4", features = ["serde"] } diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 4baceaa0968..3d635e3fd6c 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -16,6 +16,7 @@ fault-proving = [ "fuel-core-database/fault-proving", "fuel-core-sync/fault-proving", ] +rpc = ["fuel-core/rpc"] [dependencies] anyhow = { workspace = true } diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index b363f7d3233..b809e208af0 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -401,6 +401,7 @@ fn service_with_many_contracts( Default::default(), Default::default(), Default::default(), + #[cfg(feature = "rpc")] Default::default(), ), config.clone(), diff --git a/bin/fuel-core/Cargo.toml b/bin/fuel-core/Cargo.toml index d94b550976c..8aaea4c79d2 100644 --- a/bin/fuel-core/Cargo.toml +++ b/bin/fuel-core/Cargo.toml @@ -48,10 +48,10 @@ fault-proving = [ [dependencies] anyhow = { workspace = true } -aws-config = { version = "1.1.7", features = [ +aws-config = { workspace = true, features = [ "behavior-version-latest", ], optional = true } -aws-sdk-kms = { version = "1.37.0", optional = true } +aws-sdk-kms = { workspace = true, optional = true } clap = { workspace = true, features = ["derive", "env", "string"] } const_format = { version = "0.2", optional = true } dirs = "4.0" diff --git a/bin/fuel-core/src/cli/rollback.rs b/bin/fuel-core/src/cli/rollback.rs index cdc99f092fd..afeaf945718 100644 --- a/bin/fuel-core/src/cli/rollback.rs +++ b/bin/fuel-core/src/cli/rollback.rs @@ -59,7 +59,7 @@ pub async fn exec(command: Command) -> anyhow::Result<()> { use crate::cli::ShutdownListener; let path = command.database_path.as_path(); - let db = CombinedDatabase::open( + let mut db = CombinedDatabase::open( path, StateRewindPolicy::RewindFullRange, DatabaseConfig { diff --git a/bin/fuel-core/src/cli/run.rs b/bin/fuel-core/src/cli/run.rs index dbd8eb5d9cf..78f473786f1 100644 --- a/bin/fuel-core/src/cli/run.rs +++ b/bin/fuel-core/src/cli/run.rs @@ -107,6 +107,7 @@ use std::num::NonZeroUsize; #[cfg(feature = "p2p")] mod p2p; +#[cfg(feature = "rpc")] mod rpc; #[cfg(feature = "shared-sequencer")] @@ -293,7 +294,8 @@ pub struct Command { pub p2p_args: p2p::P2PArgs, #[clap(flatten)] - pub rpc_args: rpc::RpcArgs, + #[cfg(feature = "rpc")] + pub rpc_args: Option, #[cfg_attr(feature = "p2p", clap(flatten))] #[cfg(feature = "p2p")] @@ -374,6 +376,7 @@ impl Command { relayer_args, #[cfg(feature = "p2p")] p2p_args, + #[cfg(feature = "rpc")] rpc_args, #[cfg(feature = "p2p")] sync_args, @@ -457,7 +460,8 @@ impl Command { .echo_delegation_interval, }; - let rpc_config = rpc_args.into_config(); + #[cfg(feature = "rpc")] + let rpc_config = rpc_args.map(|args| args.into_config()); let trigger: Trigger = poa_trigger.into(); diff --git a/bin/fuel-core/src/cli/run/rpc.rs b/bin/fuel-core/src/cli/run/rpc.rs index 324cc8daee5..88f1fc64849 100644 --- a/bin/fuel-core/src/cli/run/rpc.rs +++ b/bin/fuel-core/src/cli/run/rpc.rs @@ -1,4 +1,8 @@ -use clap::Args; +use clap::{ + Args, + Subcommand, +}; +use fuel_core_types::fuel_types::BlockHeight; use std::net; #[derive(Debug, Clone, Args)] @@ -10,12 +14,72 @@ pub struct RpcArgs { /// The port to bind the RPC service to #[clap(long = "rpc_port", default_value = "4001", env)] pub rpc_port: u16, + + #[command(subcommand)] + pub storage_method: Option, + + #[clap(long = "api_buffer_size", default_value = "1000", env)] + pub api_buffer_size: usize, +} + +#[derive(Debug, Clone, Subcommand)] +pub enum StorageMethod { + Local, + S3 { + #[clap(long = "bucket", env)] + bucket: String, + #[clap(long = "endpoint_url", env)] + endpoint_url: Option, + #[clap(long = "requester_pays", env, default_value = "false")] + requester_pays: bool, + }, + S3NoPublish { + #[clap(long = "bucket", env)] + bucket: String, + #[clap(long = "endpoint_url", env)] + endpoint_url: Option, + #[clap(long = "requester_pays", env, default_value = "false")] + requester_pays: bool, + }, } impl RpcArgs { pub fn into_config(self) -> fuel_core_block_aggregator_api::integration::Config { fuel_core_block_aggregator_api::integration::Config { addr: net::SocketAddr::new(self.rpc_ip, self.rpc_port), + sync_from: Some(BlockHeight::from(0)), + storage_method: self.storage_method.map(Into::into).unwrap_or_default(), + api_buffer_size: self.api_buffer_size, + } + } +} + +impl From for fuel_core_block_aggregator_api::integration::StorageMethod { + fn from(storage_method: StorageMethod) -> Self { + match storage_method { + StorageMethod::Local => { + fuel_core_block_aggregator_api::integration::StorageMethod::Local + } + StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + } => fuel_core_block_aggregator_api::integration::StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + }, + StorageMethod::S3NoPublish { + bucket, + endpoint_url, + requester_pays, + } => { + fuel_core_block_aggregator_api::integration::StorageMethod::S3NoPublish { + bucket, + endpoint_url, + requester_pays, + } + } } } } diff --git a/bin/fuel-core/src/lib.rs b/bin/fuel-core/src/lib.rs index 704f0f3fa5b..67b5193783f 100644 --- a/bin/fuel-core/src/lib.rs +++ b/bin/fuel-core/src/lib.rs @@ -6,4 +6,5 @@ pub mod cli; pub use fuel_core::service::FuelService; +use fuel_core_block_aggregator_api as _; use tikv_jemallocator as _; // Used only by the binary diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index 808d8b5920c..222cd43d17f 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -16,7 +16,7 @@ default = ["rocksdb", "serde"] serde = ["dep:serde_with"] p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"] relayer = ["dep:fuel-core-relayer"] -rpc = ["fuel-core/rpc"] +rpc = ["fuel-core/rpc", "dep:fuel-core-block-aggregator-api"] shared-sequencer = ["dep:fuel-core-shared-sequencer", "dep:cosmrs"] rocksdb = ["dep:rocksdb", "dep:tempfile", "dep:num_cpus"] backup = ["rocksdb", "fuel-core-database/backup"] @@ -42,6 +42,7 @@ fault-proving = [ "fuel-core-executor/fault-proving", "fuel-core-storage/fault-proving", "fuel-core-chain-config/fault-proving", + "fuel-core-block-aggregator-api?/fault-proving", "fuel-core-database/fault-proving", "fuel-core-sync?/fault-proving", "fuel-core-importer/fault-proving", @@ -58,9 +59,9 @@ async-trait = { workspace = true } axum = { workspace = true } clap = { workspace = true, features = ["derive"] } cosmrs = { version = "0.21", optional = true } -derive_more = { version = "0.99" } +derive_more = { workspace = true } enum-iterator = { workspace = true } -fuel-core-block-aggregator-api = { workspace = true } +fuel-core-block-aggregator-api = { workspace = true, optional = true } fuel-core-chain-config = { workspace = true, features = ["std"] } fuel-core-compression-service = { workspace = true } fuel-core-consensus-module = { workspace = true } @@ -126,7 +127,7 @@ fuel-core-executor = { workspace = true, features = [ fuel-core-services = { path = "./../services", features = ["test-helpers"] } fuel-core-storage = { path = "./../storage", features = ["test-helpers"] } fuel-core-trace = { path = "./../trace" } -fuel-core-types = { path = "./../types", features = ["test-helpers"] } +fuel-core-types = { path = "./../types", features = ["test-helpers", "random"] } fuel-core-upgradable-executor = { workspace = true, features = [ "test-helpers", ] } diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index 6d6b128ff74..464e4d62707 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -3,6 +3,8 @@ use crate::state::{ historical_rocksdb::StateRewindPolicy, rocks_db::DatabaseConfig, }; +#[cfg(feature = "rpc")] +use anyhow::anyhow; use crate::{ database::{ @@ -10,7 +12,6 @@ use crate::{ GenesisDatabase, Result as DatabaseResult, database_description::{ - block_aggregator::BlockAggregatorDatabase, compression::CompressionDatabase, gas_price::GasPriceDatabase, off_chain::OffChain, @@ -20,6 +21,11 @@ use crate::{ }, service::DbType, }; + +#[cfg(feature = "rpc")] +use crate::database::database_description::block_aggregator::BlockAggregatorDatabase; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::db::table::LatestBlock; #[cfg(feature = "test-helpers")] use fuel_core_chain_config::{ StateConfig, @@ -37,6 +43,11 @@ use fuel_core_storage::tables::{ ContractsState, Messages, }; +#[cfg(feature = "rpc")] +use fuel_core_storage::{ + Error as StorageError, + StorageAsRef, +}; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, fuel_types::BlockHeight, @@ -61,7 +72,8 @@ pub struct CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, - block_aggregation: Database, + #[cfg(feature = "rpc")] + block_aggregation_storage: Database, } impl CombinedDatabase { @@ -71,7 +83,9 @@ impl CombinedDatabase { relayer: Database, gas_price: Database, compression: Database, - block_aggregation: Database, + #[cfg(feature = "rpc")] block_aggregation_storage: Database< + BlockAggregatorDatabase, + >, ) -> Self { Self { on_chain, @@ -79,7 +93,8 @@ impl CombinedDatabase { relayer, gas_price, compression, - block_aggregation, + #[cfg(feature = "rpc")] + block_aggregation_storage, } } @@ -90,6 +105,8 @@ impl CombinedDatabase { crate::state::rocks_db::RocksDb::::prune(path)?; crate::state::rocks_db::RocksDb::::prune(path)?; crate::state::rocks_db::RocksDb::::prune(path)?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::prune(path)?; Ok(()) } @@ -133,6 +150,12 @@ impl CombinedDatabase { crate::state::rocks_db::RocksDb::::backup(db_dir, temp_dir) .trace_err("Failed to backup compression database")?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::backup( + db_dir, temp_dir, + ) + .trace_err("Failed to backup block aggregation storage database")?; + Ok(()) } @@ -187,6 +210,13 @@ impl CombinedDatabase { ) .trace_err("Failed to restore compression database")?; + #[cfg(feature = "rpc")] + crate::state::rocks_db::RocksDb::::restore( + temp_restore_dir, + backup_dir, + ) + .trace_err("Failed to restore block aggregation storage database")?; + Ok(()) } @@ -244,7 +274,8 @@ impl CombinedDatabase { ..database_config }, )?; - let block_aggregation = Database::open_rocksdb( + #[cfg(feature = "rpc")] + let block_aggregation_storage = Database::open_rocksdb( path, state_rewind_policy, DatabaseConfig { @@ -259,7 +290,8 @@ impl CombinedDatabase { relayer, gas_price, compression, - block_aggregation, + #[cfg(feature = "rpc")] + block_aggregation_storage, }) } @@ -275,7 +307,8 @@ impl CombinedDatabase { relayer: Default::default(), gas_price: Default::default(), compression: Default::default(), - block_aggregation: Default::default(), + #[cfg(feature = "rpc")] + block_aggregation_storage: Default::default(), }) } @@ -321,6 +354,7 @@ impl CombinedDatabase { Database::in_memory(), Database::in_memory(), Database::in_memory(), + #[cfg(feature = "rpc")] Database::in_memory(), ) } @@ -331,6 +365,8 @@ impl CombinedDatabase { self.relayer.check_version()?; self.gas_price.check_version()?; self.compression.check_version()?; + #[cfg(feature = "rpc")] + self.block_aggregation_storage.check_version()?; Ok(()) } @@ -342,8 +378,16 @@ impl CombinedDatabase { &self.compression } - pub fn block_aggregation(&self) -> &Database { - &self.block_aggregation + #[cfg(feature = "rpc")] + pub fn block_aggregation_storage(&self) -> &Database { + &self.block_aggregation_storage + } + + #[cfg(feature = "rpc")] + pub fn block_aggregation_storage_mut( + &mut self, + ) -> &mut Database { + &mut self.block_aggregation_storage } #[cfg(any(feature = "test-helpers", test))] @@ -424,7 +468,7 @@ impl CombinedDatabase { /// Rollbacks the state of the blockchain to a specific block height. pub fn rollback_to( - &self, + &mut self, target_block_height: BlockHeight, shutdown_listener: &mut S, ) -> anyhow::Result<()> @@ -452,12 +496,42 @@ impl CombinedDatabase { let compression_db_rolled_back = is_equal_or_none(compression_db_height, target_block_height); - if on_chain_height == target_block_height - && off_chain_height == target_block_height - && gas_price_rolled_back - && compression_db_rolled_back + #[cfg(feature = "rpc")] { - break; + let block_aggregation_storage_height = self + .block_aggregation_storage() + .storage_as_ref::() + .get(&()) + .map_err(|e: StorageError| anyhow!(e))? + .map(|b| b.height()); + let block_aggregation_storage_rolled_back = is_equal_or_less_than_or_none( + block_aggregation_storage_height, + target_block_height, + ); + + if !block_aggregation_storage_rolled_back { + self.block_aggregation_storage_mut() + .rollback_to(target_block_height)?; + } + if on_chain_height == target_block_height + && off_chain_height == target_block_height + && gas_price_rolled_back + && compression_db_rolled_back + && block_aggregation_storage_rolled_back + { + break; + } + } + + #[cfg(not(feature = "rpc"))] + { + if on_chain_height == target_block_height + && off_chain_height == target_block_height + && gas_price_rolled_back + && compression_db_rolled_back + { + break; + } } if on_chain_height < target_block_height { @@ -615,6 +689,8 @@ impl CombinedDatabase { self.relayer.shutdown(); self.gas_price.shutdown(); self.compression.shutdown(); + #[cfg(feature = "rpc")] + self.block_aggregation_storage.shutdown(); } } @@ -646,6 +722,11 @@ fn is_equal_or_none(maybe_left: Option, right: T) -> bool { maybe_left.map(|left| left == right).unwrap_or(true) } +#[cfg(feature = "rpc")] +fn is_equal_or_less_than_or_none(maybe_left: Option, right: T) -> bool { + maybe_left.map(|left| left <= right).unwrap_or(true) +} + #[allow(non_snake_case)] #[cfg(feature = "backup")] #[cfg(test)] diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 7e8f6ec7061..6149787a893 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -69,6 +69,8 @@ use std::{ pub type Result = core::result::Result; // TODO: Extract `Database` and all belongs into `fuel-core-database`. +#[cfg(feature = "rpc")] +use crate::database::database_description::block_aggregator::BlockAggregatorDatabase; #[cfg(feature = "rocksdb")] use crate::state::{ historical_rocksdb::{ @@ -84,12 +86,25 @@ use crate::state::{ }; use crate::{ database::database_description::{ - block_aggregator::BlockAggregatorDatabase, gas_price::GasPriceDatabase, indexation_availability, }, state::HeightType, }; + +#[cfg(feature = "rpc")] +use anyhow::anyhow; +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::db::table::{ + Blocks, + LatestBlock, + Mode, +}; +#[cfg(feature = "rpc")] +use fuel_core_storage::{ + StorageAsRef, + transactional::WriteTransaction, +}; #[cfg(feature = "rocksdb")] use std::path::Path; @@ -442,12 +457,46 @@ impl Modifiable for Database { } } +#[cfg(feature = "rpc")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + // Does not need to be monotonically increasing because + // storage values are modified in parallel from different heights commit_changes_with_height_update(self, changes, |_iter| Ok(Vec::new())) } } +#[cfg(feature = "rpc")] +impl Database { + pub fn rollback_to(&mut self, block_height: BlockHeight) -> StorageResult<()> { + let mut tx = self.write_transaction(); + let mode = tx + .storage_as_ref::() + .get(&())? + .map(|m| m.into_owned()); + let new = match mode { + None => None, + Some(Mode::Local(_)) => Some(Mode::new_local(block_height)), + Some(Mode::S3(_)) => Some(Mode::new_s3(block_height)), + }; + if let Some(Mode::Local(_)) = mode { + let remove_heights = tx + .iter_all_keys::(Some(IterDirection::Reverse)) + .flatten() + .take_while(|height| height > &block_height) + .collect::>(); + for height in remove_heights { + tx.storage_as_mut::().remove(&height)?; + } + } + if let Some(new) = new { + tx.storage_as_mut::().insert(&(), &new)?; + tx.commit().map_err(|e: StorageError| anyhow!(e))?; + } + Ok(()) + } +} + #[cfg(feature = "relayer")] impl Modifiable for Database { fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { diff --git a/crates/fuel-core/src/database/database_description.rs b/crates/fuel-core/src/database/database_description.rs index e991c2bc7f1..9a300158fd4 100644 --- a/crates/fuel-core/src/database/database_description.rs +++ b/crates/fuel-core/src/database/database_description.rs @@ -13,6 +13,7 @@ pub mod off_chain; pub mod on_chain; pub mod relayer; +#[cfg(feature = "rpc")] pub mod block_aggregator; pub trait DatabaseHeight: PartialEq + Default + Debug + Copy + Send + Sync { diff --git a/crates/fuel-core/src/database/database_description/block_aggregator.rs b/crates/fuel-core/src/database/database_description/block_aggregator.rs index 2d55678552f..2357e3d5cf9 100644 --- a/crates/fuel-core/src/database/database_description/block_aggregator.rs +++ b/crates/fuel-core/src/database/database_description/block_aggregator.rs @@ -1,5 +1,5 @@ use crate::database::database_description::DatabaseDescription; -use fuel_core_block_aggregator_api::db::storage_db::table::Column; +use fuel_core_block_aggregator_api::db::table::Column; use fuel_core_types::fuel_types::BlockHeight; #[derive(Clone, Copy, Debug)] @@ -14,7 +14,7 @@ impl DatabaseDescription for BlockAggregatorDatabase { } fn name() -> String { - "block_aggregator".to_string() + "block_aggregator_storage".to_string() } fn metadata_column() -> Self::Column { diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index 508764d3c76..71daef2fa21 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -1,7 +1,5 @@ //! # Helpers for creating networks of nodes -#[cfg(feature = "rpc")] -use crate::service::config::free_local_addr; use crate::{ chain_config::{ CoinConfig, @@ -94,6 +92,8 @@ pub struct CustomizeConfig { max_functional_peers_connected: Option, max_discovery_peers_connected: Option, subscribe_to_transactions: Option, + #[cfg(feature = "rpc")] + rpc_config: Option, } impl CustomizeConfig { @@ -103,6 +103,8 @@ impl CustomizeConfig { max_functional_peers_connected: None, max_discovery_peers_connected: None, subscribe_to_transactions: None, + #[cfg(feature = "rpc")] + rpc_config: None, } } @@ -502,7 +504,7 @@ pub fn make_config( node_config.name = name.clone(); #[cfg(feature = "rpc")] { - node_config.rpc_config.addr = free_local_addr(); + node_config.rpc_config = config_overrides.rpc_config; } if let Some(min_gas_price) = config_overrides.min_exec_gas_price { diff --git a/crates/fuel-core/src/query/message.rs b/crates/fuel-core/src/query/message.rs index ce8da628f6f..106bedcdb0c 100644 --- a/crates/fuel-core/src/query/message.rs +++ b/crates/fuel-core/src/query/message.rs @@ -313,7 +313,7 @@ fn message_receipts_proof( return Err(anyhow::anyhow!( "Unable to generate the Merkle proof for the message from its receipts" ) - .into()); + .into()) }; // Return the proof. diff --git a/crates/fuel-core/src/schema/tx/assemble_tx.rs b/crates/fuel-core/src/schema/tx/assemble_tx.rs index c73f67cd6c6..84033001ee5 100644 --- a/crates/fuel-core/src/schema/tx/assemble_tx.rs +++ b/crates/fuel-core/src/schema/tx/assemble_tx.rs @@ -637,10 +637,10 @@ where if *amount == 0 { self.tx.outputs_mut().pop(); } else { - break; + break } } else { - break; + break } } } @@ -852,7 +852,7 @@ where } if contracts_not_in_inputs.is_empty() { - break; + break } for contract_id in contracts_not_in_inputs { @@ -913,13 +913,13 @@ where for input in self.tx.inputs() { if input_is_spendable_as_fee(input) { let Some(amount) = input.amount() else { - continue; + continue }; let Some(asset_id) = input.asset_id(&base_asset_id) else { - continue; + continue }; let Some(owner) = input.input_owner() else { - continue; + continue }; if asset_id == &base_asset_id && &fee_payer_account.owner() == owner { @@ -949,7 +949,7 @@ where let need_to_cover = final_fee.saturating_add(self.base_asset_reserved); if need_to_cover <= total_base_asset { - break; + break } let remaining_input_slots = self.remaining_input_slots()?; @@ -1021,7 +1021,7 @@ where for item in items { let key = extractor(item); if !duplicates.insert(key) { - return true + return true; } } diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 2075361ebc6..367972feae3 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -193,6 +193,7 @@ impl FuelService { Default::default(), Default::default(), Default::default(), + #[cfg(feature = "rpc")] Default::default(), ); Self::from_combined_database(combined_database, config).await @@ -544,9 +545,12 @@ mod tests { let mut i = 0; loop { let mut shutdown = ShutdownListener::spawn(); + #[cfg(not(feature = "rpc"))] + let config = Config::local_node(); + #[cfg(feature = "rpc")] + let config = Config::local_node_with_rpc(); let service = - FuelService::new(Default::default(), Config::local_node(), &mut shutdown) - .unwrap(); + FuelService::new(Default::default(), config, &mut shutdown).unwrap(); service.start_and_await().await.unwrap(); sleep(Duration::from_secs(1)); for service in service.sub_services() { diff --git a/crates/fuel-core/src/service/adapters.rs b/crates/fuel-core/src/service/adapters.rs index d697a8d344b..1abb981ffa7 100644 --- a/crates/fuel-core/src/service/adapters.rs +++ b/crates/fuel-core/src/service/adapters.rs @@ -80,12 +80,15 @@ pub mod fuel_gas_price_provider; pub mod gas_price_adapters; pub mod graphql_api; pub mod import_result_provider; + #[cfg(feature = "p2p")] pub mod p2p; pub mod producer; pub mod ready_signal; #[cfg(feature = "relayer")] pub mod relayer; +#[cfg(feature = "rpc")] +pub mod rpc; #[cfg(feature = "shared-sequencer")] pub mod shared_sequencer; #[cfg(feature = "p2p")] diff --git a/crates/fuel-core/src/service/adapters/rpc.rs b/crates/fuel-core/src/service/adapters/rpc.rs new file mode 100644 index 00000000000..13395b56c46 --- /dev/null +++ b/crates/fuel-core/src/service/adapters/rpc.rs @@ -0,0 +1,53 @@ +use crate::{ + database::{ + Database, + database_description::off_chain::OffChain, + }, + fuel_core_graphql_api::storage::transactions::TransactionStatuses, +}; +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::sync_service::TxReceipts, + result::{ + Error as RPCError, + Result as RPCResult, + }, +}; +use fuel_core_storage::StorageInspect; +use fuel_core_types::{ + fuel_tx::{ + Receipt, + TxId, + }, + services::transaction_status::TransactionExecutionStatus, +}; + +pub struct ReceiptSource { + off_chain: Database, +} + +impl ReceiptSource { + pub fn new(off_chain: Database) -> Self { + Self { off_chain } + } +} + +impl TxReceipts for ReceiptSource { + async fn get_receipts(&self, tx_id: &TxId) -> RPCResult> { + let tx_status = + StorageInspect::::get(&self.off_chain, tx_id) + .map_err(RPCError::receipt_error)?; + if let Some(status) = tx_status { + match status.into_owned() { + TransactionExecutionStatus::Success { receipts, .. } => { + Ok(receipts.to_vec()) + } + _ => Ok(Vec::new()), + } + } else { + Ok(Vec::new()) + } + } +} + +#[cfg(test)] +mod tests; diff --git a/crates/fuel-core/src/service/adapters/rpc/tests.rs b/crates/fuel-core/src/service/adapters/rpc/tests.rs new file mode 100644 index 00000000000..d3065e89eab --- /dev/null +++ b/crates/fuel-core/src/service/adapters/rpc/tests.rs @@ -0,0 +1,46 @@ +#![allow(non_snake_case)] + +use super::*; +use fuel_core_storage::{ + StorageMutate, + transactional::WriteTransaction, +}; +use rand::{ + Rng, + SeedableRng, + prelude::StdRng, +}; +use std::sync::Arc; + +#[tokio::test] +async fn get_receipt__gets_the_receipt_for_expected_tx() { + let mut rng = StdRng::seed_from_u64(9999); + + // given + let mut db = Database::in_memory(); + let tx_id = rng.r#gen(); + let expected = vec![Receipt::Return { + id: rng.r#gen(), + val: 987, + pc: 123, + is: 456, + }]; + let status = TransactionExecutionStatus::Success { + block_height: Default::default(), + time: fuel_core_types::tai64::Tai64(123u64), + result: None, + receipts: Arc::new(expected.clone()), + total_gas: 0, + total_fee: 0, + }; + let mut tx = db.write_transaction(); + StorageMutate::::insert(&mut tx, &tx_id, &status).unwrap(); + tx.commit().unwrap(); + let receipt_source = ReceiptSource::new(db); + + // when + let actual = receipt_source.get_receipts(&tx_id).await.unwrap(); + + // then + assert_eq!(actual, expected); +} diff --git a/crates/fuel-core/src/service/config.rs b/crates/fuel-core/src/service/config.rs index e2d0299bd58..9464d83ce0b 100644 --- a/crates/fuel-core/src/service/config.rs +++ b/crates/fuel-core/src/service/config.rs @@ -1,8 +1,21 @@ +use crate::{ + combined_database::CombinedDatabaseConfig, + graphql_api::ServiceConfig as GraphQLConfig, +}; use clap::ValueEnum; -#[cfg(feature = "test-helpers")] -use std::net::{ - SocketAddr, - TcpListener, +use fuel_core_chain_config::SnapshotReader; +pub use fuel_core_consensus_module::RelayerConsensusConfig; +pub use fuel_core_importer; +pub use fuel_core_poa::Trigger; +use fuel_core_tx_status_manager::config::Config as TxStatusManagerConfig; +use fuel_core_txpool::config::Config as TxPoolConfig; +use fuel_core_types::{ + blockchain::header::StateTransitionBytecodeVersion, + fuel_types::{ + AssetId, + ChainId, + }, + signer::SignMode, }; use std::{ num::{ @@ -18,40 +31,32 @@ use strum_macros::{ EnumVariantNames, }; -use fuel_core_chain_config::SnapshotReader; -#[cfg(feature = "test-helpers")] -use fuel_core_chain_config::{ - ChainConfig, - StateConfig, -}; -pub use fuel_core_consensus_module::RelayerConsensusConfig; -pub use fuel_core_importer; +#[cfg(feature = "parallel-executor")] +use std::num::NonZeroUsize; + +#[cfg(feature = "relayer")] +use fuel_core_relayer::Config as RelayerConfig; + #[cfg(feature = "p2p")] use fuel_core_p2p::config::{ Config as P2PConfig, NotInitialized, }; -pub use fuel_core_poa::Trigger; -#[cfg(feature = "relayer")] -use fuel_core_relayer::Config as RelayerConfig; -use fuel_core_tx_status_manager::config::Config as TxStatusManagerConfig; -use fuel_core_txpool::config::Config as TxPoolConfig; -use fuel_core_types::{ - blockchain::header::StateTransitionBytecodeVersion, - signer::SignMode, -}; -use crate::{ - combined_database::CombinedDatabaseConfig, - graphql_api::ServiceConfig as GraphQLConfig, +#[cfg(feature = "rpc")] +use fuel_core_block_aggregator_api::integration::StorageMethod; +#[cfg(feature = "test-helpers")] +use fuel_core_chain_config::{ + ChainConfig, + StateConfig, }; - -use fuel_core_types::fuel_types::{ - AssetId, - ChainId, +#[cfg(feature = "rpc")] +use fuel_core_types::fuel_types::BlockHeight; +#[cfg(feature = "test-helpers")] +use std::net::{ + SocketAddr, + TcpListener, }; -#[cfg(feature = "parallel-executor")] -use std::num::NonZeroUsize; #[derive(Clone, Debug)] pub struct Config { @@ -82,7 +87,7 @@ pub struct Config { pub block_producer: fuel_core_producer::Config, pub gas_price_config: GasPriceConfig, #[cfg(feature = "rpc")] - pub rpc_config: fuel_core_block_aggregator_api::integration::Config, + pub rpc_config: Option, pub da_compression: DaCompressionMode, pub block_importer: fuel_core_importer::Config, #[cfg(feature = "relayer")] @@ -121,6 +126,34 @@ impl Config { Self::local_node_with_state_config(StateConfig::local_testnet()) } + #[cfg(feature = "test-helpers")] + #[cfg(feature = "rpc")] + pub fn local_node_with_rpc() -> Self { + let mut config = Self::local_node_with_state_config(StateConfig::local_testnet()); + let rpc_config = fuel_core_block_aggregator_api::integration::Config { + addr: free_local_addr(), + sync_from: Some(BlockHeight::new(0)), + storage_method: StorageMethod::Local, + api_buffer_size: 100, + }; + config.rpc_config = Some(rpc_config); + config + } + + #[cfg(feature = "test-helpers")] + #[cfg(feature = "rpc")] + pub fn local_node_with_rpc_and_storage_method(storage_method: StorageMethod) -> Self { + let mut config = Self::local_node_with_state_config(StateConfig::local_testnet()); + let rpc_config = fuel_core_block_aggregator_api::integration::Config { + addr: free_local_addr(), + sync_from: Some(BlockHeight::new(0)), + storage_method, + api_buffer_size: 100, + }; + config.rpc_config = Some(rpc_config); + config + } + #[cfg(feature = "test-helpers")] pub fn local_node_with_state_config(state_config: StateConfig) -> Self { Self::local_node_with_configs(ChainConfig::local_testnet(), state_config) @@ -170,9 +203,7 @@ impl Config { const MAX_TXS_TTL: Duration = Duration::from_secs(60 * 100000000); #[cfg(feature = "rpc")] - let rpc_config = fuel_core_block_aggregator_api::integration::Config { - addr: free_local_addr(), - }; + let rpc_config = None; Self { graphql_config: GraphQLConfig { diff --git a/crates/fuel-core/src/service/sub_services.rs b/crates/fuel-core/src/service/sub_services.rs index 412ba2b4b56..e3a2df0aff4 100644 --- a/crates/fuel-core/src/service/sub_services.rs +++ b/crates/fuel-core/src/service/sub_services.rs @@ -1,40 +1,5 @@ #![allow(clippy::let_unit_value)] -#[cfg(feature = "relayer")] -use crate::relayer::Config as RelayerConfig; -#[cfg(feature = "p2p")] -use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ - key_generator::Ed25519KeyGenerator, - trigger::TimeBasedTrigger, - tx_receiver::PreconfirmationsReceiver, -}; -#[cfg(feature = "rpc")] -use fuel_core_block_aggregator_api::{ - blocks::importer_and_db_source::serializer_adapter::SerializerAdapter, - db::storage_db::StorageDB, -}; -use fuel_core_compression_service::service::new_service as new_compression_service; -use fuel_core_gas_price_service::v1::{ - algorithm::AlgorithmV1, - da_source_service::block_committer_costs::{ - BlockCommitterDaBlockCosts, - BlockCommitterHttpApi, - }, - metadata::V1AlgorithmConfig, - service::SharedData, - uninitialized_task::new_gas_price_service_v1, -}; -use fuel_core_poa::Trigger; -use fuel_core_storage::{ - self, - transactional::AtomicView, -}; -#[cfg(feature = "relayer")] -use fuel_core_types::blockchain::primitives::DaBlockHeight; -use fuel_core_types::signer::SignMode; -use std::sync::Arc; -use tokio::sync::Mutex; - use super::{ DbType, adapters::{ @@ -49,6 +14,16 @@ use super::{ config::DaCompressionMode, genesis::create_genesis_block, }; +#[cfg(feature = "rpc")] +use crate::database::database_description::on_chain::OnChain; +#[cfg(feature = "relayer")] +use crate::relayer::Config as RelayerConfig; +#[cfg(feature = "p2p")] +use crate::service::adapters::consensus_module::poa::pre_confirmation_signature::{ + key_generator::Ed25519KeyGenerator, + trigger::TimeBasedTrigger, + tx_receiver::PreconfirmationsReceiver, +}; use crate::{ combined_database::CombinedDatabase, database::Database, @@ -87,6 +62,47 @@ use crate::{ }, }, }; +use fuel_core_compression_service::service::new_service as new_compression_service; +use fuel_core_gas_price_service::v1::{ + algorithm::AlgorithmV1, + da_source_service::block_committer_costs::{ + BlockCommitterDaBlockCosts, + BlockCommitterHttpApi, + }, + metadata::V1AlgorithmConfig, + service::SharedData, + uninitialized_task::new_gas_price_service_v1, +}; +use fuel_core_poa::Trigger; +use fuel_core_storage::{ + self, + transactional::AtomicView, +}; +#[cfg(feature = "relayer")] +use fuel_core_types::blockchain::primitives::DaBlockHeight; +use fuel_core_types::signer::SignMode; +#[cfg(feature = "rpc")] +use rpc::*; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[cfg(feature = "rpc")] +mod rpc { + pub use crate::{ + database::database_description::block_aggregator::BlockAggregatorDatabase, + service::adapters::rpc::ReceiptSource, + }; + pub use fuel_core_block_aggregator_api::{ + api::protobuf_adapter::ProtobufAPI, + blocks::importer_and_db_source::{ + ImporterAndDbSource, + serializer_adapter::SerializerAdapter, + }, + integration::UninitializedTask, + }; + pub use fuel_core_services::ServiceRunner; + pub use fuel_core_types::fuel_types::BlockHeight; +} pub type PoAService = fuel_core_poa::Service< BlockProducerAdapter, @@ -459,20 +475,15 @@ pub fn init_sub_services( }; #[cfg(feature = "rpc")] - let block_aggregator_rpc = { - let block_aggregator_config = config.rpc_config.clone(); - let db = database.block_aggregation().clone(); - let db_adapter = StorageDB::new(db); - let serializer = SerializerAdapter; - let onchain_db = database.on_chain().clone(); - let importer = importer_adapter.events_shared_result(); - fuel_core_block_aggregator_api::integration::new_service( - &block_aggregator_config, - db_adapter, - serializer, - onchain_db, - importer, - ) + let block_aggregator_rpc = if let Some(config) = config.rpc_config.as_ref() { + Some(init_rpc_server( + config, + &database, + &importer_adapter, + genesis_block_height, + )?) + } else { + None }; let graph_ql = fuel_core_graphql_api::api_service::new_service( @@ -540,7 +551,9 @@ pub fn init_sub_services( services.push(Box::new(graphql_worker)); services.push(Box::new(tx_status_manager)); #[cfg(feature = "rpc")] - services.push(Box::new(block_aggregator_rpc)); + if let Some(block_aggregator_rpc) = block_aggregator_rpc { + services.push(Box::new(block_aggregator_rpc)); + } if let Some(compression_service) = compression_service { services.push(Box::new(compression_service)); @@ -553,3 +566,34 @@ pub fn init_sub_services( Ok((services, shared)) } + +#[allow(clippy::type_complexity)] +#[cfg(feature = "rpc")] +fn init_rpc_server( + config: &fuel_core_block_aggregator_api::integration::Config, + database: &CombinedDatabase, + importer_adapter: &BlockImporterAdapter, + genesis_height: BlockHeight, +) -> anyhow::Result< + ServiceRunner< + UninitializedTask< + ProtobufAPI, + ImporterAndDbSource, ReceiptSource>, + Database, + >, + >, +> { + let receipts = ReceiptSource::new(database.off_chain().clone()); + let serializer = SerializerAdapter; + let onchain_db = database.on_chain().clone(); + let importer = importer_adapter.events_shared_result(); + fuel_core_block_aggregator_api::integration::new_service( + database.block_aggregation_storage().clone(), + serializer, + onchain_db, + receipts, + importer, + config.clone(), + genesis_height, + ) +} diff --git a/crates/services/block_aggregator_api/Cargo.toml b/crates/services/block_aggregator_api/Cargo.toml index 03342654df9..294c41e218f 100644 --- a/crates/services/block_aggregator_api/Cargo.toml +++ b/crates/services/block_aggregator_api/Cargo.toml @@ -8,7 +8,6 @@ homepage = { workspace = true } license = { workspace = true } repository = { workspace = true } rust-version = { workspace = true } -build = "build.rs" [features] fault-proving = ["fuel-core-types/fault-proving"] @@ -16,15 +15,16 @@ fault-proving = ["fuel-core-types/fault-proving"] [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -bytes = { workspace = true, features = ["serde"] } +aws-config = { workspace = true } +aws-sdk-s3 = { workspace = true } enum-iterator = { workspace = true } +flate2 = { workspace = true } +fuel-core-protobuf = { workspace = true } fuel-core-services = { workspace = true } fuel-core-storage = { workspace = true, features = ["std"] } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } -log = "0.4.27" num_enum = { workspace = true } -postcard = { workspace = true } prost = { workspace = true, features = ["derive"] } rand = { workspace = true } serde = { workspace = true, features = ["derive"] } @@ -34,16 +34,13 @@ thiserror = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tonic = { workspace = true } -tonic-prost = { workspace = true } tracing = { workspace = true } -[build-dependencies] -tonic-prost-build = { workspace = true } - [dev-dependencies] +aws-sdk-s3 = { workspace = true, features = ["test-util"] } +aws-smithy-mocks = { workspace = true } fuel-core-services = { workspace = true, features = ["test-helpers"] } fuel-core-storage = { workspace = true, features = ["test-helpers"] } fuel-core-types = { workspace = true, features = ["std", "test-helpers"] } proptest = { workspace = true } tokio-stream = { workspace = true } -tracing-subscriber = { workspace = true } diff --git a/crates/services/block_aggregator_api/build.rs b/crates/services/block_aggregator_api/build.rs deleted file mode 100644 index 190a1538000..00000000000 --- a/crates/services/block_aggregator_api/build.rs +++ /dev/null @@ -1,7 +0,0 @@ -fn main() -> Result<(), Box> { - tonic_prost_build::configure() - .type_attribute(".", "#[derive(serde::Serialize,serde::Deserialize)]") - .type_attribute(".", "#[allow(clippy::large_enum_variant)]") - .compile_protos(&["proto/api.proto"], &["proto/"])?; - Ok(()) -} diff --git a/crates/services/block_aggregator_api/proto/api.proto b/crates/services/block_aggregator_api/proto/api.proto deleted file mode 100644 index b478c8b69b2..00000000000 --- a/crates/services/block_aggregator_api/proto/api.proto +++ /dev/null @@ -1,677 +0,0 @@ -syntax = "proto3"; - -package blockaggregator; - -message BlockHeightRequest {} - -message BlockHeightResponse { - uint32 height = 1; -} - -message BlockRangeRequest { - uint32 start = 1; - uint32 end = 2; -} - -message Block { - oneof versioned_block { - V1Block v1 = 1; - } -} - -message V1Block { - Header header = 1; - repeated Transaction transactions = 2; -} - -message Header { - oneof versioned_header { - V1Header v1 = 1; - V2Header v2 = 2; - } -} - -// pub struct BlockHeaderV1 { -// /// The application header. -// pub(crate) application: ApplicationHeader, -// /// The consensus header. -// pub(crate) consensus: ConsensusHeader, -// /// The header metadata calculated during creation. -// /// The field is pub(crate) to enforce the use of the [`PartialBlockHeader::generate`] method. -// #[cfg_attr(feature = "serde", serde(skip))] -// #[educe(PartialEq(ignore))] -// pub(crate) metadata: Option, -//} -// pub struct ApplicationHeader { -// /// The layer 1 height of messages and events to include since the last layer 1 block number. -// /// This is not meant to represent the layer 1 block this was committed to. Validators will need -// /// to have some rules in place to ensure the block number was chosen in a reasonable way. For -// /// example, they should verify that the block number satisfies the finality requirements of the -// /// layer 1 chain. They should also verify that the block number isn't too stale and is increasing. -// /// Some similar concerns are noted in this issue: -// pub da_height: DaBlockHeight, -// /// The version of the consensus parameters used to execute this block. -// pub consensus_parameters_version: ConsensusParametersVersion, -// /// The version of the state transition bytecode used to execute this block. -// pub state_transition_bytecode_version: StateTransitionBytecodeVersion, -// /// Generated application fields. -// pub generated: Generated, -//} -// pub struct GeneratedApplicationFieldsV1 { -// /// Number of transactions in this block. -// pub transactions_count: u16, -// /// Number of message receipts in this block. -// pub message_receipt_count: u32, -// /// Merkle root of transactions. -// pub transactions_root: Bytes32, -// /// Merkle root of message receipts in this block. -// pub message_outbox_root: Bytes32, -// /// Root hash of all imported events from L1 -// pub event_inbox_root: Bytes32, -//} -// pub struct ConsensusHeader { -// /// Merkle root of all previous block header hashes. -// pub prev_root: Bytes32, -// /// Fuel block height. -// pub height: BlockHeight, -// /// The block producer time. -// pub time: Tai64, -// /// generated consensus fields. -// pub generated: Generated, -//} -// pub struct GeneratedConsensusFields { -// /// Hash of the application header. -// pub application_hash: Bytes32, -//} -// pub struct BlockHeaderMetadata { -// /// Hash of the header. -// id: BlockId, -//} -message V1Header { - uint64 da_height = 1; - uint32 consensus_parameters_version = 2; - uint32 state_transition_bytecode_version = 3; - uint32 transactions_count = 4; - uint32 message_receipt_count = 5; - bytes transactions_root = 6; - bytes message_outbox_root = 7; - bytes event_inbox_root = 8; - bytes prev_root = 9; - uint32 height = 10; - uint64 time = 11; - bytes application_hash = 12; - optional bytes block_id = 13; -} - -// pub struct GeneratedApplicationFieldsV2 { -// /// Number of transactions in this block. -// pub transactions_count: u16, -// /// Number of message receipts in this block. -// pub message_receipt_count: u32, -// /// Merkle root of transactions. -// pub transactions_root: Bytes32, -// /// Merkle root of message receipts in this block. -// pub message_outbox_root: Bytes32, -// /// Root hash of all imported events from L1 -// pub event_inbox_root: Bytes32, -// /// TxID commitment -// pub tx_id_commitment: Bytes32, -//} -message V2Header { - uint64 da_height = 1; - uint32 consensus_parameters_version = 2; - uint32 state_transition_bytecode_version = 3; - uint32 transactions_count = 4; - uint32 message_receipt_count = 5; - bytes transactions_root = 6; - bytes message_outbox_root = 7; - bytes event_inbox_root = 8; - bytes tx_id_commitment = 9; - bytes prev_root = 10; - uint32 height = 11; - uint64 time = 12; - bytes application_hash = 13; - optional bytes block_id = 14; -} - -message Transaction { - oneof variant { - ScriptTransaction script = 1; - CreateTransaction create = 2; - MintTransaction mint = 3; - UpgradeTransaction upgrade = 4; - UploadTransaction upload = 5; - BlobTransaction blob = 6; - } -} - -// pub struct ChargeableTransaction -//where -// Body: BodyConstraints, -//{ -// pub(crate) body: Body, -// pub(crate) policies: Policies, -// pub(crate) inputs: Vec, -// pub(crate) outputs: Vec, -// pub(crate) witnesses: Vec, -// #[serde(skip)] -// #[cfg_attr(feature = "da-compression", compress(skip))] -// #[educe(PartialEq(ignore))] -// #[educe(Hash(ignore))] -// #[canonical(skip)] -// pub(crate) metadata: Option>, -//} -// pub struct ScriptBody { -// pub(crate) script_gas_limit: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub(crate) receipts_root: Bytes32, -// pub(crate) script: ScriptCode, -// #[educe(Debug(method(fmt_truncated_hex::<16>)))] -// pub(crate) script_data: Vec, -//} -// #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] -//pub struct ScriptMetadata { -// pub script_data_offset: usize, -//} -message ScriptTransaction { - uint64 script_gas_limit = 1; - bytes receipts_root = 2; - bytes script = 3; - bytes script_data = 4; - Policies policies = 5; - repeated Input inputs = 6; - repeated Output outputs = 7; - repeated bytes witnesses = 8; - ScriptMetadata metadata = 9; -} - -message CreateTransaction { - uint32 bytecode_witness_index = 1; - bytes salt = 2; - repeated StorageSlot storage_slots = 3; - Policies policies = 4; - repeated Input inputs = 5; - repeated Output outputs = 6; - repeated bytes witnesses = 7; - CreateMetadata metadata = 8; -} - -message MintTransaction { - TxPointer tx_pointer = 1; - ContractInput input_contract = 2; - ContractOutput output_contract = 3; - uint64 mint_amount = 4; - bytes mint_asset_id = 5; - uint64 gas_price = 6; - MintMetadata metadata = 7; -} - -message UpgradeTransaction { - UpgradePurpose purpose = 1; - Policies policies = 2; - repeated Input inputs = 3; - repeated Output outputs = 4; - repeated bytes witnesses = 5; - UpgradeMetadata metadata = 6; -} - -message UploadTransaction { - bytes root = 1; - uint32 witness_index = 2; - uint32 subsection_index = 3; - uint32 subsections_number = 4; - repeated bytes proof_set = 5; - Policies policies = 6; - repeated Input inputs = 7; - repeated Output outputs = 8; - repeated bytes witnesses = 9; - UploadMetadata metadata = 10; -} - -message BlobTransaction { - bytes blob_id = 1; - uint32 witness_index = 2; - Policies policies = 3; - repeated Input inputs = 4; - repeated Output outputs = 5; - repeated bytes witnesses = 6; - BlobMetadata metadata = 7; -} - -// pub struct Policies { -// /// A bitmask that indicates what policies are set. -// bits: PoliciesBits, -// /// The array of policy values. -// values: [Word; POLICIES_NUMBER], -//} -message Policies { - uint32 bits = 1; - repeated uint64 values = 2; -} - -// pub enum Input { -// CoinSigned(CoinSigned), -// CoinPredicate(CoinPredicate), -// Contract(Contract), -// MessageCoinSigned(MessageCoinSigned), -// MessageCoinPredicate(MessageCoinPredicate), -// MessageDataSigned(MessageDataSigned), -// MessageDataPredicate(MessageDataPredicate), -//} -message Input { - oneof variant { - CoinSignedInput coin_signed = 1; - CoinPredicateInput coin_predicate = 2; - ContractInput contract = 3; - MessageCoinSignedInput message_coin_signed = 4; - MessageCoinPredicateInput message_coin_predicate = 5; - MessageDataSignedInput message_data_signed = 6; - MessageDataPredicateInput message_data_predicate = 7; - } -} - -// pub struct Coin -//where -// Specification: CoinSpecification, -//{ -// pub utxo_id: UtxoId, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub owner: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub amount: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub asset_id: AssetId, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub tx_pointer: TxPointer, -// #[educe(Debug(method(fmt_as_field)))] -// pub witness_index: Specification::Witness, -// /// Exact amount of gas used by the predicate. -// /// If the predicate consumes different amount of gas, -// /// it's considered to be false. -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_gas_used: Specification::PredicateGasUsed, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate: Specification::Predicate, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_data: Specification::PredicateData, -//} -// impl CoinSpecification for Signed { -// type Predicate = Empty; -// type PredicateData = Empty>; -// type PredicateGasUsed = Empty; -// type Witness = u16; -//} -message CoinSignedInput { - UtxoId utxo_id = 1; - bytes owner = 2; - uint64 amount = 3; - bytes asset_id = 4; - TxPointer tx_pointer = 5; - uint32 witness_index = 6; - uint64 predicate_gas_used = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -//impl CoinSpecification for Predicate { -// type Predicate = PredicateCode; -// type PredicateData = Vec; -// type PredicateGasUsed = Word; -// type Witness = Empty; -//} -message CoinPredicateInput { - UtxoId utxo_id = 1; - bytes owner = 2; - uint64 amount = 3; - bytes asset_id = 4; - TxPointer tx_pointer = 5; - uint32 witness_index = 6; - uint64 predicate_gas_used = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -// pub struct Contract { -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub utxo_id: UtxoId, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub balance_root: Bytes32, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub state_root: Bytes32, -// /// Pointer to transaction that last modified the contract state. -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub tx_pointer: TxPointer, -// pub contract_id: ContractId, -//} -message ContractInput { - UtxoId utxo_id = 1; - bytes balance_root = 2; - bytes state_root = 3; - TxPointer tx_pointer = 4; - bytes contract_id = 5; -} - -// pub struct Message -//where -// Specification: MessageSpecification, -//{ -// /// The sender from the L1 chain. -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub sender: Address, -// /// The receiver on the `Fuel` chain. -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub recipient: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub amount: Word, -// // Unique identifier of the message -// pub nonce: Nonce, -// #[educe(Debug(method(fmt_as_field)))] -// pub witness_index: Specification::Witness, -// /// Exact amount of gas used by the predicate. -// /// If the predicate consumes different amount of gas, -// /// it's considered to be false. -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_gas_used: Specification::PredicateGasUsed, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// #[educe(Debug(method(fmt_as_field)))] -// pub data: Specification::Data, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate: Specification::Predicate, -// #[educe(Debug(method(fmt_as_field)))] -// pub predicate_data: Specification::PredicateData, -//} -// pub struct MessageCoin(core::marker::PhantomData); -// -// impl MessageSpecification for MessageCoin { -// type Data = Empty>; -// type Predicate = Empty; -// type PredicateData = Empty>; -// type PredicateGasUsed = Empty; -// type Witness = u16; -// } -message MessageCoinSignedInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -// impl MessageSpecification for MessageCoin { -// type Data = Empty>; -// type Predicate = PredicateCode; -// type PredicateData = Vec; -// type PredicateGasUsed = Word; -// type Witness = Empty; -// } -message MessageCoinPredicateInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -// pub type MessageDataSigned = Message>; -message MessageDataSignedInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -// pub type MessageDataPredicate = -// Message>; -message MessageDataPredicateInput { - bytes sender = 1; - bytes recipient = 2; - uint64 amount = 3; - bytes nonce = 4; - uint32 witness_index = 5; - uint64 predicate_gas_used = 6; - bytes data = 7; - bytes predicate = 8; - bytes predicate_data = 9; -} - -// pub enum Output { -// Coin { -// to: Address, -// amount: Word, -// asset_id: AssetId, -// }, -// -// Contract(Contract), -// -// Change { -// to: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// amount: Word, -// asset_id: AssetId, -// }, -// -// Variable { -// #[cfg_attr(feature = "da-compression", compress(skip))] -// to: Address, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// amount: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// asset_id: AssetId, -// }, -// -// ContractCreated { -// contract_id: ContractId, -// state_root: Bytes32, -// }, -//} -message Output { - oneof variant { - CoinOutput coin = 1; - ContractOutput contract = 2; - ChangeOutput change = 3; - VariableOutput variable = 4; - ContractCreatedOutput contract_created = 5; - } -} -message CoinOutput { - bytes to = 1; - uint64 amount = 2; - bytes asset_id = 3; -} -message ContractOutput { - uint32 input_index = 1; - bytes balance_root = 2; - bytes state_root = 3; -} -message ChangeOutput { - bytes to = 1; - uint64 amount = 2; - bytes asset_id = 3; -} -message VariableOutput { - bytes to = 1; - uint64 amount = 2; - bytes asset_id = 3; -} -message ContractCreatedOutput { - bytes contract_id = 1; - bytes state_root = 2; -} - -// pub struct UtxoId { -// /// transaction id -// tx_id: TxId, -// /// output index -// output_index: u16, -//} -message UtxoId { - bytes tx_id = 1; - uint32 output_index = 2; -} - -message TxPointer { - uint32 block_height = 1; - uint32 tx_index = 2; -} - -message StorageSlot { - bytes key = 1; - bytes value = 2; -} - - -// #[derive(Debug, Clone, PartialEq, Eq, Hash)] -//pub struct ChargeableMetadata { -// pub common: CommonMetadata, -// pub body: Body, -//} -// pub struct ScriptBody { -// pub(crate) script_gas_limit: Word, -// #[cfg_attr(feature = "da-compression", compress(skip))] -// pub(crate) receipts_root: Bytes32, -// pub(crate) script: ScriptCode, -// #[educe(Debug(method(fmt_truncated_hex::<16>)))] -// pub(crate) script_data: Vec, -//} -// #[derive(Debug, Clone, PartialEq, Eq, Hash)] -//pub struct CommonMetadata { -// pub id: Bytes32, -// pub inputs_offset: usize, -// pub inputs_offset_at: Vec, -// pub inputs_predicate_offset_at: Vec>, -// pub outputs_offset: usize, -// pub outputs_offset_at: Vec, -// pub witnesses_offset: usize, -// pub witnesses_offset_at: Vec, -//} - -message ScriptMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; - uint64 script_gas_limit = 9; - bytes receipts_root = 10; - bytes script = 11; - bytes script_data = 12; -} - -message CreateMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; - bytes contract_id = 9; - bytes contract_root = 10; - bytes state_root = 11; -} - -message MintMetadata { - bytes id = 1; -} - -message UpgradePurpose { - oneof variant { - UpgradeConsensusParameters consensus_parameters = 1; - UpgradeStateTransition state_transition = 2; - } -} - -message UpgradeConsensusParameters { - uint32 witness_index = 1; - bytes checksum = 2; -} - -message UpgradeStateTransition { - bytes root = 1; -} - -message UpgradeMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; - oneof variant { - UpgradeConsensusParametersMetadata consensus_parameters = 9; - UpgradeStateTransitionMetadata state_transition = 10; - } -} - -message UpgradeConsensusParametersMetadata { - bytes consensus_parameters = 1; - bytes calculated_checksum = 2; -} - -message UpgradeStateTransitionMetadata {} - -message UploadMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; -} - -message BlobMetadata { - bytes id = 1; - uint32 inputs_offset = 2; - repeated uint32 inputs_offset_at = 3; - repeated PredicateOffset inputs_predicate_offset_at = 4; - uint32 outputs_offset = 5; - repeated uint32 outputs_offset_at = 6; - uint32 witnesses_offset = 7; - repeated uint32 witnesses_offset_at = 8; -} - -message PredicateOffset { - optional InnerPredicateOffset offset = 1; -} - -message InnerPredicateOffset { - uint32 offset = 1; - uint32 length = 2; -} - - -message BlockResponse { - oneof payload { - Block literal = 1; - string remote = 2; - } -} - -message NewBlockSubscriptionRequest {} - -service BlockAggregator { - rpc GetBlockHeight (BlockHeightRequest) returns (BlockHeightResponse); - rpc GetBlockRange (BlockRangeRequest) returns (stream BlockResponse); - rpc NewBlockSubscription (NewBlockSubscriptionRequest) returns (stream BlockResponse); -} diff --git a/crates/services/block_aggregator_api/src/api.rs b/crates/services/block_aggregator_api/src/api.rs index 4beb51c47f3..0e07b31b0bb 100644 --- a/crates/services/block_aggregator_api/src/api.rs +++ b/crates/services/block_aggregator_api/src/api.rs @@ -25,11 +25,11 @@ pub enum BlockAggregatorQuery { response: tokio::sync::oneshot::Sender, }, GetCurrentHeight { - response: tokio::sync::oneshot::Sender, + response: tokio::sync::oneshot::Sender>, }, // TODO: Do we need a way to unsubscribe or can we just see that the receiver is dropped? NewBlockSubscription { - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender<(BlockHeight, Block)>, }, } @@ -68,13 +68,15 @@ impl BlockAggregatorQuery { (query, receiver) } - pub fn get_current_height() -> (Self, tokio::sync::oneshot::Receiver) { + pub fn get_current_height() + -> (Self, tokio::sync::oneshot::Receiver>) { let (sender, receiver) = tokio::sync::oneshot::channel(); let query = Self::GetCurrentHeight { response: sender }; (query, receiver) } - pub fn new_block_subscription() -> (Self, tokio::sync::mpsc::Receiver) { + pub fn new_block_subscription() + -> (Self, tokio::sync::mpsc::Receiver<(BlockHeight, B)>) { const ARBITRARY_CHANNEL_SIZE: usize = 10; let (sender, receiver) = tokio::sync::mpsc::channel(ARBITRARY_CHANNEL_SIZE); let query = Self::NewBlockSubscription { response: sender }; diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs index c944e199917..e771b662fd4 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter.rs @@ -3,7 +3,10 @@ use crate::{ BlockAggregatorApi, BlockAggregatorQuery, }, - block_range_response::BlockRangeResponse, + block_range_response::{ + BlockRangeResponse, + BoxStream, + }, protobuf_types::{ Block as ProtoBlock, BlockHeightRequest as ProtoBlockHeightRequest, @@ -11,21 +14,37 @@ use crate::{ BlockRangeRequest as ProtoBlockRangeRequest, BlockResponse as ProtoBlockResponse, NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + RemoteBlockResponse as ProtoRemoteBlockResponse, + RemoteS3Bucket as ProtoRemoteS3Bucket, block_aggregator_server::{ BlockAggregator, BlockAggregatorServer as ProtoBlockAggregatorServer, }, block_response as proto_block_response, + remote_block_response::Location as ProtoRemoteLocation, }, result::{ Error, Result, }, }; +use anyhow::anyhow; use async_trait::async_trait; +use fuel_core_services::{ + RunnableService, + RunnableTask, + Service, + ServiceRunner, + StateWatcher, + TaskNextAction, + try_or_stop, +}; use futures::StreamExt; use tokio_stream::wrappers::ReceiverStream; -use tonic::Status; +use tonic::{ + Status, + transport::server::Router, +}; #[cfg(test)] mod tests; @@ -47,7 +66,7 @@ impl Server { #[async_trait] impl BlockAggregator for Server { - async fn get_block_height( + async fn get_synced_block_height( &self, request: tonic::Request, ) -> Result, tonic::Status> { @@ -60,7 +79,7 @@ impl BlockAggregator for Server { let res = receiver.await; match res { Ok(height) => Ok(tonic::Response::new(ProtoBlockHeightResponse { - height: *height, + height: height.map(|inner| *inner), })), Err(e) => Err(tonic::Status::internal(format!( "Failed to receive height: {}", @@ -68,13 +87,12 @@ impl BlockAggregator for Server { ))), } } - type GetBlockRangeStream = ReceiverStream>; + type GetBlockRangeStream = BoxStream>; async fn get_block_range( &self, request: tonic::Request, ) -> Result, tonic::Status> { - const ARB_LITERAL_BLOCK_BUFFER_SIZE: usize = 100; let req = request.into_inner(); let (response, receiver) = tokio::sync::oneshot::channel(); let query = BlockAggregatorQuery::GetBlockRange { @@ -90,27 +108,42 @@ impl BlockAggregator for Server { match res { Ok(block_range_response) => match block_range_response { BlockRangeResponse::Literal(inner) => { - let (tx, rx) = tokio::sync::mpsc::channel::< - Result, - >(ARB_LITERAL_BLOCK_BUFFER_SIZE); - - tokio::spawn(async move { - let mut s = inner; - while let Some(pb) = s.next().await { + let stream = inner + .map(|(height, res)| { let response = ProtoBlockResponse { - payload: Some(proto_block_response::Payload::Literal(pb)), + height: *height, + payload: Some(proto_block_response::Payload::Literal( + res, + )), }; - if tx.send(Ok(response)).await.is_err() { - break; - } - } - }); - - Ok(tonic::Response::new(ReceiverStream::new(rx))) + Ok(response) + }) + .boxed(); + Ok(tonic::Response::new(stream)) } - BlockRangeResponse::Remote(_) => { - tracing::error!("Remote block range not implemented"); - todo!() + BlockRangeResponse::S3(inner) => { + let stream = inner + .map(|(height, res)| { + let s3 = ProtoRemoteS3Bucket { + bucket: res.bucket, + key: res.key, + requester_pays: res.requester_pays, + endpoint: res.aws_endpoint, + }; + let location = ProtoRemoteLocation::S3(s3); + let proto_response = ProtoRemoteBlockResponse { + location: Some(location), + }; + let response = ProtoBlockResponse { + height: *height, + payload: Some(proto_block_response::Payload::Remote( + proto_response, + )), + }; + Ok(response) + }) + .boxed(); + Ok(tonic::Response::new(stream)) } }, Err(e) => Err(tonic::Status::internal(format!( @@ -127,7 +160,7 @@ impl BlockAggregator for Server { request: tonic::Request, ) -> Result, tonic::Status> { const ARB_CHANNEL_SIZE: usize = 100; - tracing::warn!("get_block_range: {:?}", request); + tracing::debug!("get_block_range: {:?}", request); let (response, mut receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); let query = BlockAggregatorQuery::NewBlockSubscription { response }; self.query_sender @@ -137,8 +170,9 @@ impl BlockAggregator for Server { let (task_sender, task_receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); tokio::spawn(async move { - while let Some(nb) = receiver.recv().await { + while let Some((height, nb)) = receiver.recv().await { let response = ProtoBlockResponse { + height: *height, payload: Some(proto_block_response::Payload::Literal(nb)), }; if task_sender.send(Ok(response)).await.is_err() { @@ -152,41 +186,97 @@ impl BlockAggregator for Server { } pub struct ProtobufAPI { - _server_task_handle: tokio::task::JoinHandle<()>, - shutdown_sender: Option>, + _server_service: ServiceRunner, query_receiver: tokio::sync::mpsc::Receiver>, } -impl ProtobufAPI { - pub fn new(url: String) -> Self { - let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< - BlockAggregatorQuery, - >(100); - let server = Server::new(query_sender); - let addr = url.parse().unwrap(); - let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::<()>(); - let _server_task_handle = tokio::spawn(async move { - let service = tonic::transport::Server::builder() - .add_service(ProtoBlockAggregatorServer::new(server)); - tokio::select! { - res = service.serve(addr) => { +pub struct ServerTask { + addr: std::net::SocketAddr, + query_sender: + tokio::sync::mpsc::Sender>, + router: Option, +} +#[async_trait::async_trait] +impl RunnableService for ServerTask { + const NAME: &'static str = "ProtobufServerTask"; + type SharedData = (); + type Task = Self; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData {} + + async fn into_task( + mut self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + self.start_router()?; + Ok(self) + } +} + +impl ServerTask { + fn start_router(&mut self) -> anyhow::Result<()> { + let server = Server::new(self.query_sender.clone()); + let router = tonic::transport::Server::builder() + .add_service(ProtoBlockAggregatorServer::new(server)); + self.router = Some(router); + Ok(()) + } + + fn get_router(&mut self) -> anyhow::Result { + self.router + .take() + .ok_or_else(|| anyhow!("Router has not been initialized yet")) + } +} + +impl RunnableTask for ServerTask { + async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { + let router_res = self.get_router(); + let router = try_or_stop!(router_res, |e| tracing::error!( + "Failed to get router, has not been started: {:?}", + e + )); + tokio::select! { + res = router.serve(self.addr) => { if let Err(e) = res { tracing::error!("BlockAggregator tonic server error: {}", e); + TaskNextAction::Stop } else { tracing::info!("BlockAggregator tonic server stopped"); + TaskNextAction::Stop } }, - _ = shutdown_receiver => { - tracing::info!("Shutting down BlockAggregator tonic server"); - }, + _ = watcher.while_started() => { + TaskNextAction::Stop } + } + } + + async fn shutdown(self) -> anyhow::Result<()> { + Ok(()) + } +} + +impl ProtobufAPI { + pub fn new(url: String, buffer_size: usize) -> Result { + let (query_sender, query_receiver) = tokio::sync::mpsc::channel::< + BlockAggregatorQuery, + >(buffer_size); + let addr = url.parse().unwrap(); + let _server_service = ServiceRunner::new(ServerTask { + addr, + query_sender, + router: None, }); - Self { - _server_task_handle, - shutdown_sender: Some(shutdown_sender), + _server_service.start().map_err(Error::Api)?; + let api = Self { + _server_service, query_receiver, - } + }; + Ok(api) } } @@ -205,11 +295,3 @@ impl BlockAggregatorApi for ProtobufAPI { Ok(query) } } - -impl Drop for ProtobufAPI { - fn drop(&mut self) { - if let Some(shutdown_sender) = self.shutdown_sender.take() { - let _ = shutdown_sender.send(()); - } - } -} diff --git a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs index 7807ac02180..86c52b650c8 100644 --- a/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs +++ b/crates/services/block_aggregator_api/src/api/protobuf_adapter/tests.rs @@ -6,7 +6,10 @@ use crate::{ BlockAggregatorQuery, protobuf_adapter::ProtobufAPI, }, - block_range_response::BlockRangeResponse, + block_range_response::{ + BlockRangeResponse, + RemoteS3Response, + }, blocks::importer_and_db_source::{ BlockSerializer, serializer_adapter::SerializerAdapter, @@ -23,6 +26,7 @@ use crate::{ block_response::Payload, }, }; +use fuel_core_protobuf::remote_block_response::Location; use fuel_core_types::{ blockchain::block::Block as FuelBlock, fuel_types::BlockHeight, @@ -31,19 +35,18 @@ use futures::{ StreamExt, TryStreamExt, }; -use std::net::TcpListener; fn free_local_addr() -> String { - let listener = TcpListener::bind("[::1]:0").unwrap(); - let addr = listener.local_addr().unwrap(); // OS picks a free port - format!("[::1]:{}", addr.port()) + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + format!("127.0.0.1:{}", addr.port()) } #[tokio::test] async fn await_query__get_current_height__client_receives_expected_value() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -54,7 +57,7 @@ async fn await_query__get_current_height__client_receives_expected_value() { let handle = tokio::spawn(async move { tracing::info!("querying with client"); client - .get_block_height(BlockHeightRequest {}) + .get_synced_block_height(BlockHeightRequest {}) .await .expect("could not get height") }); @@ -66,21 +69,21 @@ async fn await_query__get_current_height__client_receives_expected_value() { // then // return response through query's channel if let BlockAggregatorQuery::GetCurrentHeight { response } = query { - response.send(BlockHeight::new(42)).unwrap(); + response.send(Some(BlockHeight::new(42))).unwrap(); } else { panic!("expected GetCurrentHeight query"); } let res = handle.await.unwrap(); // assert client received expected value - assert_eq!(res.into_inner().height, 42); + assert_eq!(res.into_inner().height, Some(42)); } #[tokio::test] -async fn await_query__get_block_range__client_receives_expected_value() { +async fn await_query__get_block_range__client_receives_expected_value__literal() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -105,15 +108,16 @@ async fn await_query__get_block_range__client_receives_expected_value() { let serializer_adapter = SerializerAdapter; let fuel_block_1 = FuelBlock::default(); let mut fuel_block_2 = FuelBlock::default(); - let block_height_2 = fuel_block_1.header().height().succ().unwrap(); + let block_height_1 = fuel_block_1.header().height(); + let block_height_2 = block_height_1.succ().unwrap(); fuel_block_2.header_mut().set_block_height(block_height_2); let block1 = serializer_adapter - .serialize_block(&fuel_block_1) + .serialize_block(&fuel_block_1, &[]) .expect("could not serialize block"); let block2 = serializer_adapter - .serialize_block(&fuel_block_2) + .serialize_block(&fuel_block_2, &[]) .expect("could not serialize block"); - let list = vec![block1, block2]; + let list = vec![(*block_height_1, block1), (block_height_2, block2)]; // return response through query's channel if let BlockAggregatorQuery::GetBlockRange { first, @@ -133,7 +137,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { tracing::info!("awaiting query"); let response = handle.await.unwrap(); let expected = list; - let actual: Vec = response + let actual: Vec<(BlockHeight, ProtoBlock)> = response .into_inner() .try_collect::>() .await @@ -141,7 +145,94 @@ async fn await_query__get_block_range__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner + (BlockHeight::new(b.height), inner) + } else { + panic!("unexpected response type") + } + }) + .collect(); + + assert_eq!(expected, actual); +} +#[tokio::test] +async fn await_query__get_block_range__client_receives_expected_value__remote() { + // given + let path = free_local_addr(); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // call get current height endpoint with client + let url = format!("http://{}", path); + let mut client = ProtoBlockAggregatorClient::connect(url.to_string()) + .await + .expect("could not connect to server"); + let request = BlockRangeRequest { start: 0, end: 1 }; + let handle = tokio::spawn(async move { + tracing::info!("querying with client"); + client + .get_block_range(request) + .await + .expect("could not get height") + }); + + // when + tracing::info!("awaiting query"); + let query = api.await_query().await.unwrap(); + + // then + let list: Vec<_> = [(BlockHeight::new(1), "1"), (BlockHeight::new(2), "2")] + .iter() + .map(|(height, key)| { + let bucket = "test-bucket".to_string(); + let key = key.to_string(); + let res = RemoteS3Response { + bucket, + key, + requester_pays: false, + aws_endpoint: None, + }; + (*height, res) + }) + .collect(); + // return response through query's channel + if let BlockAggregatorQuery::GetBlockRange { + first, + last, + response, + } = query + { + assert_eq!(first, BlockHeight::new(0)); + assert_eq!(last, BlockHeight::new(1)); + tracing::info!("correct query received, sending response"); + let stream = tokio_stream::iter(list.clone()).boxed(); + let range = BlockRangeResponse::S3(stream); + response.send(range).unwrap(); + } else { + panic!("expected GetBlockRange query"); + } + tracing::info!("awaiting query"); + let response = handle.await.unwrap(); + let expected = list; + let actual: Vec<(BlockHeight, RemoteS3Response)> = response + .into_inner() + .try_collect::>() + .await + .unwrap() + .into_iter() + .map(|b| { + if let Some(Payload::Remote(inner)) = b.payload { + let height = BlockHeight::new(b.height); + let location = inner.location.unwrap(); + let Location::S3(s3) = location else { + panic!("unexpected location type") + }; + let res = RemoteS3Response { + bucket: s3.bucket, + key: s3.key, + requester_pays: false, + aws_endpoint: None, + }; + (height, res) } else { panic!("unexpected response type") } @@ -155,7 +246,7 @@ async fn await_query__get_block_range__client_receives_expected_value() { async fn await_query__new_block_stream__client_receives_expected_value() { // given let path = free_local_addr(); - let mut api = ProtobufAPI::new(path.to_string()); + let mut api = ProtobufAPI::new(path.to_string(), 100).unwrap(); tokio::time::sleep(std::time::Duration::from_millis(100)).await; // call get current height endpoint with client @@ -185,16 +276,16 @@ async fn await_query__new_block_stream__client_receives_expected_value() { let mut fuel_block_2 = FuelBlock::default(); fuel_block_2.header_mut().set_block_height(height2); let block1 = serializer_adapter - .serialize_block(&fuel_block_1) + .serialize_block(&fuel_block_1, &[]) .expect("could not serialize block"); let block2 = serializer_adapter - .serialize_block(&fuel_block_2) + .serialize_block(&fuel_block_2, &[]) .expect("could not serialize block"); - let list = vec![block1, block2]; + let list = vec![(height1, block1), (height2, block2)]; if let BlockAggregatorQuery::NewBlockSubscription { response } = query { tracing::info!("correct query received, sending response"); - for block in list.clone() { - response.send(block).await.unwrap(); + for (height, block) in list.clone() { + response.send((height, block)).await.unwrap(); } } else { panic!("expected GetBlockRange query"); @@ -202,7 +293,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { tracing::info!("awaiting query"); let response = handle.await.unwrap(); let expected = list; - let actual: Vec = response + let actual: Vec<(BlockHeight, ProtoBlock)> = response .into_inner() .try_collect::>() .await @@ -210,7 +301,7 @@ async fn await_query__new_block_stream__client_receives_expected_value() { .into_iter() .map(|b| { if let Some(Payload::Literal(inner)) = b.payload { - inner + (BlockHeight::new(b.height), inner) } else { panic!("unexpected response type") } diff --git a/crates/services/block_aggregator_api/src/block_aggregator.rs b/crates/services/block_aggregator_api/src/block_aggregator.rs index 4fde80d22b7..48009d6cfa0 100644 --- a/crates/services/block_aggregator_api/src/block_aggregator.rs +++ b/crates/services/block_aggregator_api/src/block_aggregator.rs @@ -85,7 +85,7 @@ where async fn handle_get_current_height_query( &mut self, - response: tokio::sync::oneshot::Sender, + response: tokio::sync::oneshot::Sender>, ) -> TaskNextAction { let res = self.database.get_current_height().await; let height = try_or_stop!(res, |e| { @@ -100,7 +100,7 @@ where async fn handle_new_block_subscription( &mut self, - response: tokio::sync::mpsc::Sender, + response: tokio::sync::mpsc::Sender<(BlockHeight, Blocks::Block)>, ) -> TaskNextAction { self.new_block_subscriptions.push(response); TaskNextAction::Continue @@ -117,14 +117,14 @@ where let event = try_or_stop!(res, |e| { tracing::error!("Error receiving block from source: {e:?}"); }); - let (id, block) = match event { - BlockSourceEvent::NewBlock(id, block) => { + match &event { + BlockSourceEvent::NewBlock(height, block) => { self.new_block_subscriptions.retain_mut(|sub| { - let send_res = sub.try_send(block.clone()); + let send_res = sub.try_send((*height, block.clone())); match send_res { Ok(_) => true, Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => { - tracing::error!("Error sending new block to subscriber due to full channel: {id:?}"); + tracing::error!("Error sending new block to subscriber due to full channel: {height:?}"); true }, Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { @@ -133,11 +133,13 @@ where }, } }); - (id, block) } - BlockSourceEvent::OldBlock(id, block) => (id, block), + BlockSourceEvent::OldBlock(_id, _block) => { + // Do nothing + // Only stream new blocks + } }; - let res = self.database.store_block(id, block).await; + let res = self.database.store_block(event).await; try_or_stop!(res, |e| { tracing::error!("Error storing block in database: {e:?}"); }); diff --git a/crates/services/block_aggregator_api/src/block_range_response.rs b/crates/services/block_aggregator_api/src/block_range_response.rs index 24e78af6ff4..76d05465906 100644 --- a/crates/services/block_aggregator_api/src/block_range_response.rs +++ b/crates/services/block_aggregator_api/src/block_range_response.rs @@ -1,14 +1,23 @@ use crate::protobuf_types::Block as ProtoBlock; use fuel_core_services::stream::Stream; +use fuel_core_types::fuel_types::BlockHeight; pub type BoxStream = core::pin::Pin + Send + 'static>>; /// The response to a block range query, either as a literal stream of blocks or as a remote URL pub enum BlockRangeResponse { /// A literal stream of blocks - Literal(BoxStream), + Literal(BoxStream<(BlockHeight, ProtoBlock)>), /// A remote URL where the blocks can be fetched - Remote(String), + S3(BoxStream<(BlockHeight, RemoteS3Response)>), +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RemoteS3Response { + pub bucket: String, + pub key: String, + pub requester_pays: bool, + pub aws_endpoint: Option, } #[cfg(test)] @@ -16,9 +25,7 @@ impl std::fmt::Debug for BlockRangeResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { BlockRangeResponse::Literal(_) => f.debug_struct("Literal").finish(), - BlockRangeResponse::Remote(url) => { - f.debug_struct("Remote").field("url", url).finish() - } + BlockRangeResponse::S3(_url) => f.debug_struct("Remote").finish(), } } } diff --git a/crates/services/block_aggregator_api/src/blocks.rs b/crates/services/block_aggregator_api/src/blocks.rs index fb8dc76a9c1..cc846995477 100644 --- a/crates/services/block_aggregator_api/src/blocks.rs +++ b/crates/services/block_aggregator_api/src/blocks.rs @@ -1,6 +1,8 @@ use crate::result::Result; -use bytes::Bytes; -use fuel_core_types::fuel_types::BlockHeight; +use fuel_core_types::fuel_types::{ + BlockHeight, + bytes::Bytes, +}; use std::fmt::Debug; pub mod importer_and_db_source; @@ -17,26 +19,36 @@ pub trait BlockSource: Send + Sync { fn drain(&mut self) -> impl Future> + Send; } -#[derive(Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum BlockSourceEvent { NewBlock(BlockHeight, B), OldBlock(BlockHeight, B), } +impl BlockSourceEvent { + pub fn into_inner(self) -> (BlockHeight, B) { + match self { + Self::NewBlock(height, block) | Self::OldBlock(height, block) => { + (height, block) + } + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] -pub struct Block { +pub struct BlockBytes { bytes: Bytes, } -impl Block { +impl BlockBytes { pub fn new(bytes: Bytes) -> Self { Self { bytes } } #[cfg(test)] pub fn arb_size(rng: &mut Rng, size: usize) -> Self { - let bytes: Bytes = (0..size).map(|_| rng.r#gen()).collect(); - Self::new(bytes) + let bytes: Vec = (0..size).map(|_| rng.r#gen::()).collect(); + Self::new(bytes.into()) } #[cfg(test)] @@ -50,7 +62,7 @@ impl Block { } } -impl From> for Block { +impl From> for BlockBytes { fn from(value: Vec) -> Self { let bytes = Bytes::from(value); Self::new(bytes) diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs index 892b2b40120..d6dfbc78fa7 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source.rs @@ -16,6 +16,7 @@ use fuel_core_services::{ stream::BoxStream, }; use fuel_core_storage::{ + Error as StorageError, StorageInspect, tables::FuelBlocks, }; @@ -25,8 +26,12 @@ use fuel_core_types::{ services::block_importer::SharedImportResult, }; -use crate::blocks::importer_and_db_source::sync_service::SyncTask; +use crate::blocks::importer_and_db_source::sync_service::{ + SyncTask, + TxReceipts, +}; use fuel_core_storage::tables::Transactions; +use fuel_core_types::fuel_tx::Receipt as FuelReceipt; pub mod importer_service; pub mod sync_service; @@ -37,59 +42,60 @@ pub mod serializer_adapter; pub trait BlockSerializer { type Block; - fn serialize_block(&self, block: &FuelBlock) -> Result; + fn serialize_block( + &self, + block: &FuelBlock, + receipts: &[FuelReceipt], + ) -> Result; } -pub struct ImporterAndDbSource +/// A block source that combines an importer and a database sync task. +/// Old blocks will be synced from a target database and new blocks will be received from +/// the importer +pub struct ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect, - DB: StorageInspect, - E: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { importer_task: ServiceRunner>, - sync_task: ServiceRunner>, + sync_task: ServiceRunner>, /// Receive blocks from the importer and sync tasks receiver: tokio::sync::mpsc::Receiver>, - - _error_marker: std::marker::PhantomData, } -impl ImporterAndDbSource +impl ImporterAndDbSource where Serializer: BlockSerializer + Clone + Send + Sync + 'static, ::Block: Send + Sync + 'static, - DB: StorageInspect + Send + Sync, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: StorageInspect + Send + Sync, + DB: StorageInspect + Send + 'static, + Receipts: TxReceipts, { pub fn new( importer: BoxStream, serializer: Serializer, - database: DB, + db: DB, + receipts: Receipts, db_starting_height: BlockHeight, - db_ending_height: Option, + db_ending_height: BlockHeight, ) -> Self { const ARB_CHANNEL_SIZE: usize = 100; let (block_return, receiver) = tokio::sync::mpsc::channel(ARB_CHANNEL_SIZE); - let (new_end_sender, new_end_receiver) = tokio::sync::oneshot::channel(); - let importer_task = ImporterTask::new( - importer, - serializer.clone(), - block_return.clone(), - Some(new_end_sender), - ); + let importer_task = + ImporterTask::new(importer, serializer.clone(), block_return.clone()); let importer_runner = ServiceRunner::new(importer_task); importer_runner.start().unwrap(); let sync_task = SyncTask::new( serializer, block_return, - database, + db, + receipts, db_starting_height, db_ending_height, - new_end_receiver, ); let sync_runner = ServiceRunner::new(sync_task); sync_runner.start().unwrap(); @@ -97,19 +103,19 @@ where importer_task: importer_runner, sync_task: sync_runner, receiver, - _error_marker: std::marker::PhantomData, } } } -impl BlockSource for ImporterAndDbSource +impl BlockSource + for ImporterAndDbSource where Serializer: BlockSerializer + Send + Sync + 'static, ::Block: Send + Sync + 'static, - DB: Send + Sync, - DB: StorageInspect, - DB: StorageInspect, - E: std::fmt::Debug + Send + Sync, + DB: Send + Sync + 'static, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { type Block = Serializer::Block; @@ -129,6 +135,9 @@ where } async fn drain(&mut self) -> Result<()> { + self.importer_task.stop(); + self.sync_task.stop(); + self.receiver.close(); Ok(()) } } diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs index 74151e2a0c7..99721b06ad2 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/importer_service.rs @@ -11,10 +11,7 @@ use fuel_core_services::{ try_or_continue, try_or_stop, }; -use fuel_core_types::{ - fuel_types::BlockHeight, - services::block_importer::SharedImportResult, -}; +use fuel_core_types::services::block_importer::SharedImportResult; use futures::StreamExt; use tokio::sync::mpsc::Sender; @@ -22,7 +19,6 @@ pub struct ImporterTask { importer: BoxStream, serializer: Serializer, block_return_sender: Sender>, - new_end_sender: Option>, } impl ImporterTask @@ -34,13 +30,11 @@ where importer: BoxStream, serializer: Serializer, block_return: Sender>, - new_end_sender: Option>, ) -> Self { Self { importer, serializer, block_return_sender: block_return, - new_end_sender, } } } @@ -75,25 +69,15 @@ where match maybe_import_result { Some(import_result) => { let height = import_result.sealed_block.entity.header().height(); - if let Some(sender) = self.new_end_sender.take() { - match sender.send(*height) { - Ok(_) => { - tracing::debug!( - "sent new end height to sync task: {:?}", - height - ); - } - Err(e) => { - tracing::error!( - "failed to send new end height to sync task: {:?}", - e - ); - } - } - } + let receipts = import_result + .tx_status + .iter() + .flat_map(|status| status.result.receipts()) + .map(Clone::clone) + .collect::>(); let res = self .serializer - .serialize_block(&import_result.sealed_block.entity); + .serialize_block(&import_result.sealed_block.entity, &receipts); let block = try_or_continue!(res); let event = BlockSourceEvent::NewBlock(*height, block); let res = self.block_return_sender.send(event).await; diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs index fa7e7db2d8f..e24932de4b3 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter.rs @@ -1,122 +1,21 @@ -#[cfg(feature = "fault-proving")] -use crate::protobuf_types::V2Header as ProtoV2Header; use crate::{ blocks::importer_and_db_source::BlockSerializer, protobuf_types::{ - BlobTransaction as ProtoBlobTx, Block as ProtoBlock, - ChangeOutput as ProtoChangeOutput, - CoinOutput as ProtoCoinOutput, - CoinPredicateInput as ProtoCoinPredicateInput, - CoinSignedInput as ProtoCoinSignedInput, - ContractCreatedOutput as ProtoContractCreatedOutput, - ContractInput as ProtoContractInput, - ContractOutput as ProtoContractOutput, - CreateTransaction as ProtoCreateTx, - Header as ProtoHeader, - Input as ProtoInput, - MessageCoinPredicateInput as ProtoMessageCoinPredicateInput, - MessageCoinSignedInput as ProtoMessageCoinSignedInput, - MessageDataPredicateInput as ProtoMessageDataPredicateInput, - MessageDataSignedInput as ProtoMessageDataSignedInput, - MintTransaction as ProtoMintTx, - Output as ProtoOutput, - Policies as ProtoPolicies, - ScriptTransaction as ProtoScriptTx, - StorageSlot as ProtoStorageSlot, - Transaction as ProtoTransaction, - TxPointer as ProtoTxPointer, - UpgradeConsensusParameters as ProtoUpgradeConsensusParameters, - UpgradePurpose as ProtoUpgradePurpose, - UpgradeStateTransition as ProtoUpgradeStateTransition, - UpgradeTransaction as ProtoUpgradeTx, - UploadTransaction as ProtoUploadTx, - UtxoId as ProtoUtxoId, V1Block as ProtoV1Block, - V1Header as ProtoV1Header, - VariableOutput as ProtoVariableOutput, block::VersionedBlock as ProtoVersionedBlock, - header::VersionedHeader as ProtoVersionedHeader, - input::Variant as ProtoInputVariant, - output::Variant as ProtoOutputVariant, - transaction::Variant as ProtoTransactionVariant, - upgrade_purpose::Variant as ProtoUpgradePurposeVariant, - }, - result::{ - Error, - Result, }, }; -use anyhow::anyhow; #[cfg(feature = "fault-proving")] -use fuel_core_types::{ - blockchain::header::BlockHeaderV2, - fuel_types::ChainId, -}; +use fuel_core_types::fuel_types::ChainId; use fuel_core_types::{ blockchain::{ block::Block as FuelBlock, - header::{ - ApplicationHeader, - BlockHeader, - BlockHeaderV1, - ConsensusHeader, - GeneratedConsensusFields, - PartialBlockHeader, - }, - primitives::{ - BlockId, - DaBlockHeight, - Empty, - }, - }, - fuel_tx::{ - Address, - BlobBody, - Bytes32, - Input, - Output, - StorageSlot, - Transaction as FuelTransaction, - TxPointer, - UpgradePurpose, - UploadBody, - UtxoId, - Witness, - field::{ - BlobId as _, - BytecodeRoot as _, - BytecodeWitnessIndex as _, - InputContract as _, - Inputs, - MintAmount as _, - MintAssetId as _, - MintGasPrice as _, - OutputContract as _, - Outputs, - Policies as _, - ProofSet as _, - ReceiptsRoot as _, - Salt as _, - Script as _, - ScriptData as _, - ScriptGasLimit as _, - StorageSlots as _, - SubsectionIndex as _, - SubsectionsNumber as _, - TxPointer as TxPointerField, - UpgradePurpose as UpgradePurposeField, - Witnesses as _, - }, - policies::{ - Policies as FuelPolicies, - PoliciesBits, - PolicyType, - }, }, - tai64, }; +use fuel_core_types::fuel_tx::Receipt as FuelReceipt; +use crate::blocks::importer_and_db_source::serializer_adapter::fuel_to_proto_conversions::{proto_header_from_header, proto_receipt_from_receipt, proto_tx_from_tx}; #[derive(Clone)] pub struct SerializerAdapter; @@ -124,15 +23,22 @@ pub struct SerializerAdapter; impl BlockSerializer for SerializerAdapter { type Block = ProtoBlock; - fn serialize_block(&self, block: &FuelBlock) -> crate::result::Result { - // TODO: Should this be owned to begin with? - let (header, txs) = block.clone().into_inner(); - let proto_header = proto_header_from_header(header); + fn serialize_block( + &self, + block: &FuelBlock, + receipts: &[FuelReceipt], + ) -> crate::result::Result { + let proto_header = proto_header_from_header(block.header()); match &block { FuelBlock::V1(_) => { let proto_v1_block = ProtoV1Block { header: Some(proto_header), - transactions: txs.into_iter().map(proto_tx_from_tx).collect(), + transactions: block + .transactions() + .iter() + .map(proto_tx_from_tx) + .collect(), + receipts: receipts.iter().map(proto_receipt_from_receipt).collect(), }; Ok(ProtoBlock { versioned_block: Some(ProtoVersionedBlock::V1(proto_v1_block)), @@ -142,1352 +48,8 @@ impl BlockSerializer for SerializerAdapter { } } -fn proto_header_from_header(header: BlockHeader) -> ProtoHeader { - let block_id = header.id(); - let consensus = *header.consensus(); - let versioned_header = match header { - BlockHeader::V1(header) => { - let proto_v1_header = - proto_v1_header_from_v1_header(consensus, block_id, header); - ProtoVersionedHeader::V1(proto_v1_header) - } - #[cfg(feature = "fault-proving")] - BlockHeader::V2(header) => { - let proto_v2_header = - proto_v2_header_from_v2_header(consensus, block_id, header); - ProtoVersionedHeader::V2(proto_v2_header) - } - }; - - ProtoHeader { - versioned_header: Some(versioned_header), - } -} - -fn proto_v1_header_from_v1_header( - consensus: ConsensusHeader, - block_id: BlockId, - header: BlockHeaderV1, -) -> ProtoV1Header { - let application = header.application(); - let generated = application.generated; - - ProtoV1Header { - da_height: application.da_height.0, - consensus_parameters_version: application.consensus_parameters_version, - state_transition_bytecode_version: application.state_transition_bytecode_version, - transactions_count: u32::from(generated.transactions_count), - message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - prev_root: bytes32_to_vec(&consensus.prev_root), - height: u32::from(consensus.height), - time: consensus.time.0, - application_hash: bytes32_to_vec(&consensus.generated.application_hash), - block_id: Some(block_id.as_slice().to_vec()), - } -} - -#[cfg(feature = "fault-proving")] -fn proto_v2_header_from_v2_header( - consensus: ConsensusHeader, - block_id: BlockId, - header: BlockHeaderV2, -) -> ProtoV2Header { - let application = *header.application(); - let generated = application.generated; - - ProtoV2Header { - da_height: application.da_height.0, - consensus_parameters_version: application.consensus_parameters_version, - state_transition_bytecode_version: application.state_transition_bytecode_version, - transactions_count: u32::from(generated.transactions_count), - message_receipt_count: generated.message_receipt_count, - transactions_root: bytes32_to_vec(&generated.transactions_root), - message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), - event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), - tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), - prev_root: bytes32_to_vec(&consensus.prev_root), - height: u32::from(consensus.height), - time: consensus.time.0, - application_hash: bytes32_to_vec(&consensus.generated.application_hash), - block_id: Some(block_id.as_slice().to_vec()), - } -} - -fn proto_tx_from_tx(tx: FuelTransaction) -> ProtoTransaction { - match tx { - FuelTransaction::Script(script) => { - let proto_script = ProtoScriptTx { - script_gas_limit: *script.script_gas_limit(), - receipts_root: bytes32_to_vec(script.receipts_root()), - script: script.script().clone(), - script_data: script.script_data().clone(), - policies: Some(proto_policies_from_policies(script.policies())), - inputs: script - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: script - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: script - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Script(proto_script)), - } - } - FuelTransaction::Create(create) => { - let proto_create = ProtoCreateTx { - bytecode_witness_index: u32::from(*create.bytecode_witness_index()), - salt: create.salt().as_ref().to_vec(), - storage_slots: create - .storage_slots() - .iter() - .map(proto_storage_slot_from_storage_slot) - .collect(), - policies: Some(proto_policies_from_policies(create.policies())), - inputs: create - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: create - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: create - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Create(proto_create)), - } - } - FuelTransaction::Mint(mint) => { - let proto_mint = ProtoMintTx { - tx_pointer: Some(proto_tx_pointer(mint.tx_pointer())), - input_contract: Some(proto_contract_input_from_contract( - mint.input_contract(), - )), - output_contract: Some(proto_contract_output_from_contract( - mint.output_contract(), - )), - mint_amount: *mint.mint_amount(), - mint_asset_id: mint.mint_asset_id().as_ref().to_vec(), - gas_price: *mint.gas_price(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Mint(proto_mint)), - } - } - FuelTransaction::Upgrade(upgrade) => { - let proto_upgrade = ProtoUpgradeTx { - purpose: Some(proto_upgrade_purpose(upgrade.upgrade_purpose())), - policies: Some(proto_policies_from_policies(upgrade.policies())), - inputs: upgrade - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: upgrade - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: upgrade - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Upgrade(proto_upgrade)), - } - } - FuelTransaction::Upload(upload) => { - let proto_upload = ProtoUploadTx { - root: bytes32_to_vec(upload.bytecode_root()), - witness_index: u32::from(*upload.bytecode_witness_index()), - subsection_index: u32::from(*upload.subsection_index()), - subsections_number: u32::from(*upload.subsections_number()), - proof_set: upload.proof_set().iter().map(bytes32_to_vec).collect(), - policies: Some(proto_policies_from_policies(upload.policies())), - inputs: upload - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: upload - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: upload - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Upload(proto_upload)), - } - } - FuelTransaction::Blob(blob) => { - let proto_blob = ProtoBlobTx { - blob_id: blob.blob_id().as_ref().to_vec(), - witness_index: u32::from(*blob.bytecode_witness_index()), - policies: Some(proto_policies_from_policies(blob.policies())), - inputs: blob - .inputs() - .iter() - .cloned() - .map(proto_input_from_input) - .collect(), - outputs: blob - .outputs() - .iter() - .cloned() - .map(proto_output_from_output) - .collect(), - witnesses: blob - .witnesses() - .iter() - .map(|witness| witness.as_ref().to_vec()) - .collect(), - metadata: None, - }; - - ProtoTransaction { - variant: Some(ProtoTransactionVariant::Blob(proto_blob)), - } - } - } -} - -fn proto_input_from_input(input: Input) -> ProtoInput { - match input { - Input::CoinSigned(coin_signed) => ProtoInput { - variant: Some(ProtoInputVariant::CoinSigned(ProtoCoinSignedInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_signed.utxo_id)), - owner: coin_signed.owner.as_ref().to_vec(), - amount: coin_signed.amount, - asset_id: coin_signed.asset_id.as_ref().to_vec(), - tx_pointer: Some(proto_tx_pointer(&coin_signed.tx_pointer)), - witness_index: coin_signed.witness_index.into(), - predicate_gas_used: 0, - predicate: vec![], - predicate_data: vec![], - })), - }, - Input::CoinPredicate(coin_predicate) => ProtoInput { - variant: Some(ProtoInputVariant::CoinPredicate(ProtoCoinPredicateInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_predicate.utxo_id)), - owner: coin_predicate.owner.as_ref().to_vec(), - amount: coin_predicate.amount, - asset_id: coin_predicate.asset_id.as_ref().to_vec(), - tx_pointer: Some(proto_tx_pointer(&coin_predicate.tx_pointer)), - witness_index: 0, - predicate_gas_used: coin_predicate.predicate_gas_used, - predicate: coin_predicate.predicate.as_ref().to_vec(), - predicate_data: coin_predicate.predicate_data.as_ref().to_vec(), - })), - }, - Input::Contract(contract) => ProtoInput { - variant: Some(ProtoInputVariant::Contract(ProtoContractInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), - balance_root: bytes32_to_vec(&contract.balance_root), - state_root: bytes32_to_vec(&contract.state_root), - tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), - contract_id: contract.contract_id.as_ref().to_vec(), - })), - }, - Input::MessageCoinSigned(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageCoinSigned( - ProtoMessageCoinSignedInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: message.witness_index.into(), - predicate_gas_used: 0, - data: Vec::new(), - predicate: Vec::new(), - predicate_data: Vec::new(), - }, - )), - }, - Input::MessageCoinPredicate(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageCoinPredicate( - ProtoMessageCoinPredicateInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: 0, - predicate_gas_used: message.predicate_gas_used, - data: Vec::new(), - predicate: message.predicate.as_ref().to_vec(), - predicate_data: message.predicate_data.as_ref().to_vec(), - }, - )), - }, - Input::MessageDataSigned(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageDataSigned( - ProtoMessageDataSignedInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: message.witness_index.into(), - predicate_gas_used: 0, - data: message.data.as_ref().to_vec(), - predicate: Vec::new(), - predicate_data: Vec::new(), - }, - )), - }, - Input::MessageDataPredicate(message) => ProtoInput { - variant: Some(ProtoInputVariant::MessageDataPredicate( - ProtoMessageDataPredicateInput { - sender: message.sender.as_ref().to_vec(), - recipient: message.recipient.as_ref().to_vec(), - amount: message.amount, - nonce: message.nonce.as_ref().to_vec(), - witness_index: 0, - predicate_gas_used: message.predicate_gas_used, - data: message.data.as_ref().to_vec(), - predicate: message.predicate.as_ref().to_vec(), - predicate_data: message.predicate_data.as_ref().to_vec(), - }, - )), - }, - } -} - -fn proto_utxo_id_from_utxo_id(utxo_id: &UtxoId) -> ProtoUtxoId { - ProtoUtxoId { - tx_id: utxo_id.tx_id().as_ref().to_vec(), - output_index: utxo_id.output_index().into(), - } -} - -fn proto_tx_pointer(tx_pointer: &TxPointer) -> ProtoTxPointer { - ProtoTxPointer { - block_height: tx_pointer.block_height().into(), - tx_index: tx_pointer.tx_index().into(), - } -} - -fn proto_storage_slot_from_storage_slot(slot: &StorageSlot) -> ProtoStorageSlot { - ProtoStorageSlot { - key: slot.key().as_ref().to_vec(), - value: slot.value().as_ref().to_vec(), - } -} - -fn proto_contract_input_from_contract( - contract: &fuel_core_types::fuel_tx::input::contract::Contract, -) -> ProtoContractInput { - ProtoContractInput { - utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), - balance_root: bytes32_to_vec(&contract.balance_root), - state_root: bytes32_to_vec(&contract.state_root), - tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), - contract_id: contract.contract_id.as_ref().to_vec(), - } -} - -fn proto_contract_output_from_contract( - contract: &fuel_core_types::fuel_tx::output::contract::Contract, -) -> ProtoContractOutput { - ProtoContractOutput { - input_index: u32::from(contract.input_index), - balance_root: bytes32_to_vec(&contract.balance_root), - state_root: bytes32_to_vec(&contract.state_root), - } -} - -fn proto_output_from_output(output: Output) -> ProtoOutput { - let variant = match output { - Output::Coin { - to, - amount, - asset_id, - } => ProtoOutputVariant::Coin(ProtoCoinOutput { - to: to.as_ref().to_vec(), - amount, - asset_id: asset_id.as_ref().to_vec(), - }), - Output::Contract(contract) => { - ProtoOutputVariant::Contract(proto_contract_output_from_contract(&contract)) - } - Output::Change { - to, - amount, - asset_id, - } => ProtoOutputVariant::Change(ProtoChangeOutput { - to: to.as_ref().to_vec(), - amount, - asset_id: asset_id.as_ref().to_vec(), - }), - Output::Variable { - to, - amount, - asset_id, - } => ProtoOutputVariant::Variable(ProtoVariableOutput { - to: to.as_ref().to_vec(), - amount, - asset_id: asset_id.as_ref().to_vec(), - }), - Output::ContractCreated { - contract_id, - state_root, - } => ProtoOutputVariant::ContractCreated(ProtoContractCreatedOutput { - contract_id: contract_id.as_ref().to_vec(), - state_root: bytes32_to_vec(&state_root), - }), - }; - - ProtoOutput { - variant: Some(variant), - } -} - -fn proto_upgrade_purpose(purpose: &UpgradePurpose) -> ProtoUpgradePurpose { - let variant = match purpose { - UpgradePurpose::ConsensusParameters { - witness_index, - checksum, - } => ProtoUpgradePurposeVariant::ConsensusParameters( - ProtoUpgradeConsensusParameters { - witness_index: u32::from(*witness_index), - checksum: checksum.as_ref().to_vec(), - }, - ), - UpgradePurpose::StateTransition { root } => { - ProtoUpgradePurposeVariant::StateTransition(ProtoUpgradeStateTransition { - root: root.as_ref().to_vec(), - }) - } - }; - - ProtoUpgradePurpose { - variant: Some(variant), - } -} - -fn proto_policies_from_policies( - policies: &fuel_core_types::fuel_tx::policies::Policies, -) -> ProtoPolicies { - let mut values = [0u64; 6]; - if policies.is_set(PolicyType::Tip) { - values[0] = policies.get(PolicyType::Tip).unwrap_or_default(); - } - if policies.is_set(PolicyType::WitnessLimit) { - let value = policies.get(PolicyType::WitnessLimit).unwrap_or_default(); - values[1] = value; - } - if policies.is_set(PolicyType::Maturity) { - let value = policies.get(PolicyType::Maturity).unwrap_or_default(); - values[2] = value; - } - if policies.is_set(PolicyType::MaxFee) { - values[3] = policies.get(PolicyType::MaxFee).unwrap_or_default(); - } - if policies.is_set(PolicyType::Expiration) { - values[4] = policies.get(PolicyType::Expiration).unwrap_or_default(); - } - if policies.is_set(PolicyType::Owner) { - values[5] = policies.get(PolicyType::Owner).unwrap_or_default(); - } - let bits = policies.bits(); - ProtoPolicies { - bits, - values: values.to_vec(), - } -} - -fn tx_pointer_from_proto(proto: &ProtoTxPointer) -> Result { - let block_height = proto.block_height.into(); - #[allow(clippy::useless_conversion)] - let tx_index = proto.tx_index.try_into().map_err(|e| { - Error::Serialization(anyhow!("Could not convert tx_index to target type: {}", e)) - })?; - Ok(TxPointer::new(block_height, tx_index)) -} - -fn storage_slot_from_proto(proto: &ProtoStorageSlot) -> Result { - let key = Bytes32::try_from(proto.key.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert storage slot key to Bytes32: {}", - e - )) - })?; - let value = Bytes32::try_from(proto.value.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert storage slot value to Bytes32: {}", - e - )) - })?; - Ok(StorageSlot::new(key, value)) -} - -fn contract_input_from_proto( - proto: &ProtoContractInput, -) -> Result { - let utxo_proto = proto.utxo_id.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing utxo_id on contract input")) - })?; - let utxo_id = utxo_id_from_proto(utxo_proto)?; - let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!("Could not convert balance_root to Bytes32: {}", e)) - })?; - let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!("Could not convert state_root to Bytes32: {}", e)) - })?; - let tx_pointer_proto = proto.tx_pointer.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing tx_pointer on contract input")) - })?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - let contract_id = - fuel_core_types::fuel_types::ContractId::try_from(proto.contract_id.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - Ok(fuel_core_types::fuel_tx::input::contract::Contract { - utxo_id, - balance_root, - state_root, - tx_pointer, - contract_id, - }) -} - -fn contract_output_from_proto( - proto: &ProtoContractOutput, -) -> Result { - let input_index = u16::try_from(proto.input_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert contract output input_index to u16: {}", - e - )) - })?; - let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert contract output balance_root to Bytes32: {}", - e - )) - })?; - let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert contract output state_root to Bytes32: {}", - e - )) - })?; - - Ok(fuel_core_types::fuel_tx::output::contract::Contract { - input_index, - balance_root, - state_root, - }) -} - -fn output_from_proto_output(proto_output: &ProtoOutput) -> Result { - match proto_output - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing output variant")))? - { - ProtoOutputVariant::Coin(coin) => { - let to = Address::try_from(coin.to.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let asset_id = - fuel_core_types::fuel_types::AssetId::try_from(coin.asset_id.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - Ok(Output::coin(to, coin.amount, asset_id)) - } - ProtoOutputVariant::Contract(contract) => { - let contract = contract_output_from_proto(contract)?; - Ok(Output::Contract(contract)) - } - ProtoOutputVariant::Change(change) => { - let to = Address::try_from(change.to.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - change.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - Ok(Output::change(to, change.amount, asset_id)) - } - ProtoOutputVariant::Variable(variable) => { - let to = Address::try_from(variable.to.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - variable.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - Ok(Output::variable(to, variable.amount, asset_id)) - } - ProtoOutputVariant::ContractCreated(contract_created) => { - let contract_id = fuel_core_types::fuel_types::ContractId::try_from( - contract_created.contract_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let state_root = Bytes32::try_from(contract_created.state_root.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert state_root to Bytes32: {}", - e - )) - })?; - Ok(Output::contract_created(contract_id, state_root)) - } - } -} - -fn upgrade_purpose_from_proto(proto: &ProtoUpgradePurpose) -> Result { - match proto - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing upgrade purpose variant")))? - { - ProtoUpgradePurposeVariant::ConsensusParameters(consensus) => { - let witness_index = u16::try_from(consensus.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - let checksum = - Bytes32::try_from(consensus.checksum.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert checksum to Bytes32: {}", - e - )) - })?; - Ok(UpgradePurpose::ConsensusParameters { - witness_index, - checksum, - }) - } - ProtoUpgradePurposeVariant::StateTransition(state) => { - let root = Bytes32::try_from(state.root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert state transition root to Bytes32: {}", - e - )) - })?; - Ok(UpgradePurpose::StateTransition { root }) - } - } -} - -fn utxo_id_from_proto(proto_utxo: &ProtoUtxoId) -> Result { - let tx_id = Bytes32::try_from(proto_utxo.tx_id.as_slice()).map_err(|e| { - Error::Serialization(anyhow!("Could not convert tx_id to Bytes32: {}", e)) - })?; - let output_index = u16::try_from(proto_utxo.output_index).map_err(|e| { - Error::Serialization(anyhow!("Could not convert output_index to u16: {}", e)) - })?; - Ok(UtxoId::new(tx_id, output_index)) -} - -fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { - bytes.as_ref().to_vec() -} - -pub fn fuel_block_from_protobuf( - proto_block: ProtoBlock, - msg_ids: &[fuel_core_types::fuel_tx::MessageId], - event_inbox_root: Bytes32, -) -> Result { - let versioned_block = proto_block - .versioned_block - .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) - .map_err(Error::Serialization)?; - let partial_header = match &versioned_block { - ProtoVersionedBlock::V1(v1_block) => { - let proto_header = v1_block - .header - .clone() - .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) - .map_err(Error::Serialization)?; - partial_header_from_proto_header(proto_header)? - } - }; - let txs = match versioned_block { - ProtoVersionedBlock::V1(v1_inner) => v1_inner - .transactions - .iter() - .map(tx_from_proto_tx) - .collect::>()?, - }; - FuelBlock::new( - partial_header, - txs, - msg_ids, - event_inbox_root, - #[cfg(feature = "fault-proving")] - &ChainId::default(), - ) - .map_err(|e| anyhow!(e)) - .map_err(Error::Serialization) -} - -pub fn partial_header_from_proto_header( - proto_header: ProtoHeader, -) -> Result { - let partial_header = PartialBlockHeader { - consensus: proto_header_to_empty_consensus_header(&proto_header)?, - application: proto_header_to_empty_application_header(&proto_header)?, - }; - Ok(partial_header) -} - -pub fn tx_from_proto_tx(proto_tx: &ProtoTransaction) -> Result { - let variant = proto_tx - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing transaction variant")))?; - - match variant { - ProtoTransactionVariant::Script(proto_script) => { - let policies = proto_script - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_script - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_script - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_script - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let mut script_tx = FuelTransaction::script( - proto_script.script_gas_limit, - proto_script.script.clone(), - proto_script.script_data.clone(), - policies, - inputs, - outputs, - witnesses, - ); - *script_tx.receipts_root_mut() = Bytes32::try_from( - proto_script.receipts_root.as_slice(), - ) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert receipts_root to Bytes32: {}", - e - )) - })?; - - Ok(FuelTransaction::Script(script_tx)) - } - ProtoTransactionVariant::Create(proto_create) => { - let policies = proto_create - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_create - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_create - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_create - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let storage_slots = proto_create - .storage_slots - .iter() - .map(storage_slot_from_proto) - .collect::>>()?; - let salt = - fuel_core_types::fuel_types::Salt::try_from(proto_create.salt.as_slice()) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let bytecode_witness_index = - u16::try_from(proto_create.bytecode_witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert bytecode_witness_index to u16: {}", - e - )) - })?; - - let create_tx = FuelTransaction::create( - bytecode_witness_index, - policies, - salt, - storage_slots, - inputs, - outputs, - witnesses, - ); - - Ok(FuelTransaction::Create(create_tx)) - } - ProtoTransactionVariant::Mint(proto_mint) => { - let tx_pointer_proto = proto_mint.tx_pointer.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing tx_pointer on mint transaction")) - })?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - let input_contract_proto = - proto_mint.input_contract.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!( - "Missing input_contract on mint transaction" - )) - })?; - let input_contract = contract_input_from_proto(input_contract_proto)?; - let output_contract_proto = - proto_mint.output_contract.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!( - "Missing output_contract on mint transaction" - )) - })?; - let output_contract = contract_output_from_proto(output_contract_proto)?; - let mint_asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_mint.mint_asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - let mint_tx = FuelTransaction::mint( - tx_pointer, - input_contract, - output_contract, - proto_mint.mint_amount, - mint_asset_id, - proto_mint.gas_price, - ); - - Ok(FuelTransaction::Mint(mint_tx)) - } - ProtoTransactionVariant::Upgrade(proto_upgrade) => { - let purpose_proto = proto_upgrade.purpose.as_ref().ok_or_else(|| { - Error::Serialization(anyhow!("Missing purpose on upgrade transaction")) - })?; - let upgrade_purpose = upgrade_purpose_from_proto(purpose_proto)?; - let policies = proto_upgrade - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_upgrade - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_upgrade - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_upgrade - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - - let upgrade_tx = FuelTransaction::upgrade( - upgrade_purpose, - policies, - inputs, - outputs, - witnesses, - ); - - Ok(FuelTransaction::Upgrade(upgrade_tx)) - } - ProtoTransactionVariant::Upload(proto_upload) => { - let policies = proto_upload - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_upload - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_upload - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_upload - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let root = Bytes32::try_from(proto_upload.root.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert upload root to Bytes32: {}", - e - )) - })?; - let witness_index = - u16::try_from(proto_upload.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - let subsection_index = - u16::try_from(proto_upload.subsection_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert subsection_index to u16: {}", - e - )) - })?; - let subsections_number = u16::try_from(proto_upload.subsections_number) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert subsections_number to u16: {}", - e - )) - })?; - let proof_set = proto_upload - .proof_set - .iter() - .map(|entry| { - Bytes32::try_from(entry.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert proof_set entry to Bytes32: {}", - e - )) - }) - }) - .collect::>>()?; - - let body = UploadBody { - root, - witness_index, - subsection_index, - subsections_number, - proof_set, - }; - - let upload_tx = - FuelTransaction::upload(body, policies, inputs, outputs, witnesses); - - Ok(FuelTransaction::Upload(upload_tx)) - } - ProtoTransactionVariant::Blob(proto_blob) => { - let policies = proto_blob - .policies - .clone() - .map(policies_from_proto_policies) - .unwrap_or_default(); - let inputs = proto_blob - .inputs - .iter() - .map(input_from_proto_input) - .collect::>>()?; - let outputs = proto_blob - .outputs - .iter() - .map(output_from_proto_output) - .collect::>>()?; - let witnesses = proto_blob - .witnesses - .iter() - .map(|w| Ok(Witness::from(w.clone()))) - .collect::>>()?; - let blob_id = fuel_core_types::fuel_types::BlobId::try_from( - proto_blob.blob_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let witness_index = u16::try_from(proto_blob.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert blob witness_index to u16: {}", - e - )) - })?; - let body = BlobBody { - id: blob_id, - witness_index, - }; - - let blob_tx = - FuelTransaction::blob(body, policies, inputs, outputs, witnesses); - - Ok(FuelTransaction::Blob(blob_tx)) - } - } -} - -fn input_from_proto_input(proto_input: &ProtoInput) -> Result { - let variant = proto_input - .variant - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing input variant")))?; - - match variant { - ProtoInputVariant::CoinSigned(proto_coin_signed) => { - let utxo_proto = proto_coin_signed - .utxo_id - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; - let utxo_id = utxo_id_from_proto(utxo_proto)?; - let owner = - Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert owner to Address: {}", - e - )) - })?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_coin_signed.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let tx_pointer_proto = proto_coin_signed - .tx_pointer - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - let witness_index = - u16::try_from(proto_coin_signed.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - - Ok(Input::coin_signed( - utxo_id, - owner, - proto_coin_signed.amount, - asset_id, - tx_pointer, - witness_index, - )) - } - ProtoInputVariant::CoinPredicate(proto_coin_predicate) => { - let utxo_proto = proto_coin_predicate - .utxo_id - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; - let utxo_id = utxo_id_from_proto(utxo_proto)?; - let owner = Address::try_from(proto_coin_predicate.owner.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert owner to Address: {}", - e - )) - })?; - let asset_id = fuel_core_types::fuel_types::AssetId::try_from( - proto_coin_predicate.asset_id.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let tx_pointer_proto = proto_coin_predicate - .tx_pointer - .as_ref() - .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; - let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; - - Ok(Input::coin_predicate( - utxo_id, - owner, - proto_coin_predicate.amount, - asset_id, - tx_pointer, - proto_coin_predicate.predicate_gas_used, - proto_coin_predicate.predicate.clone(), - proto_coin_predicate.predicate_data.clone(), - )) - } - ProtoInputVariant::Contract(proto_contract) => { - let contract = contract_input_from_proto(proto_contract)?; - Ok(Input::Contract(contract)) - } - ProtoInputVariant::MessageCoinSigned(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let witness_index = - u16::try_from(proto_message.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - - Ok(Input::message_coin_signed( - sender, - recipient, - proto_message.amount, - nonce, - witness_index, - )) - } - ProtoInputVariant::MessageCoinPredicate(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - Ok(Input::message_coin_predicate( - sender, - recipient, - proto_message.amount, - nonce, - proto_message.predicate_gas_used, - proto_message.predicate.clone(), - proto_message.predicate_data.clone(), - )) - } - ProtoInputVariant::MessageDataSigned(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - let witness_index = - u16::try_from(proto_message.witness_index).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert witness_index to u16: {}", - e - )) - })?; - - Ok(Input::message_data_signed( - sender, - recipient, - proto_message.amount, - nonce, - witness_index, - proto_message.data.clone(), - )) - } - ProtoInputVariant::MessageDataPredicate(proto_message) => { - let sender = - Address::try_from(proto_message.sender.as_slice()).map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert sender to Address: {}", - e - )) - })?; - let recipient = Address::try_from(proto_message.recipient.as_slice()) - .map_err(|e| { - Error::Serialization(anyhow!( - "Could not convert recipient to Address: {}", - e - )) - })?; - let nonce = fuel_core_types::fuel_types::Nonce::try_from( - proto_message.nonce.as_slice(), - ) - .map_err(|e| Error::Serialization(anyhow!(e)))?; - - Ok(Input::message_data_predicate( - sender, - recipient, - proto_message.amount, - nonce, - proto_message.predicate_gas_used, - proto_message.data.clone(), - proto_message.predicate.clone(), - proto_message.predicate_data.clone(), - )) - } - } -} - -fn policies_from_proto_policies(proto_policies: ProtoPolicies) -> FuelPolicies { - let ProtoPolicies { bits, values } = proto_policies; - let mut policies = FuelPolicies::default(); - let bits = - PoliciesBits::from_bits(bits).expect("Should be able to create from `u32`"); - if bits.contains(PoliciesBits::Tip) - && let Some(tip) = values.first() - { - policies.set(PolicyType::Tip, Some(*tip)); - } - if bits.contains(PoliciesBits::WitnessLimit) - && let Some(witness_limit) = values.get(1) - { - policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); - } - if bits.contains(PoliciesBits::Maturity) - && let Some(maturity) = values.get(2) - { - policies.set(PolicyType::Maturity, Some(*maturity)); - } - if bits.contains(PoliciesBits::MaxFee) - && let Some(max_fee) = values.get(3) - { - policies.set(PolicyType::MaxFee, Some(*max_fee)); - } - if bits.contains(PoliciesBits::Expiration) - && let Some(expiration) = values.get(4) - { - policies.set(PolicyType::Expiration, Some(*expiration)); - } - if bits.contains(PoliciesBits::Owner) - && let Some(owner) = values.get(5) - { - policies.set(PolicyType::Owner, Some(*owner)); - } - policies -} - -pub fn proto_header_to_empty_application_header( - proto_header: &ProtoHeader, -) -> Result> { - match proto_header.versioned_header.clone() { - Some(ProtoVersionedHeader::V1(header)) => { - let app_header = ApplicationHeader { - da_height: DaBlockHeight::from(header.da_height), - consensus_parameters_version: header.consensus_parameters_version, - state_transition_bytecode_version: header - .state_transition_bytecode_version, - generated: Empty {}, - }; - Ok(app_header) - } - Some(ProtoVersionedHeader::V2(header)) => { - if cfg!(feature = "fault-proving") { - let app_header = ApplicationHeader { - da_height: DaBlockHeight::from(header.da_height), - consensus_parameters_version: header.consensus_parameters_version, - state_transition_bytecode_version: header - .state_transition_bytecode_version, - generated: Empty {}, - }; - Ok(app_header) - } else { - Err(anyhow!("V2 headers require the 'fault-proving' feature")) - .map_err(Error::Serialization) - } - } - None => Err(anyhow!("Missing protobuf versioned_header")) - .map_err(Error::Serialization), - } -} - -/// Alias the consensus header into an empty one. -pub fn proto_header_to_empty_consensus_header( - proto_header: &ProtoHeader, -) -> Result> { - match proto_header.versioned_header.clone() { - Some(ProtoVersionedHeader::V1(header)) => { - let consensus_header = ConsensusHeader { - prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root).ok_or( - Error::Serialization(anyhow!("Could create `Bytes32` from bytes")), - )?, - height: header.height.into(), - time: tai64::Tai64(header.time), - generated: Empty {}, - }; - Ok(consensus_header) - } - Some(ProtoVersionedHeader::V2(header)) => { - if cfg!(feature = "fault-proving") { - let consensus_header = ConsensusHeader { - prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) - .ok_or(Error::Serialization(anyhow!( - "Could create `Bytes32` from bytes" - )))?, - height: header.height.into(), - time: tai64::Tai64(header.time), - generated: Empty {}, - }; - Ok(consensus_header) - } else { - Err(anyhow!("V2 headers require the 'fault-proving' feature")) - .map_err(Error::Serialization) - } - } - None => Err(anyhow!("Missing protobuf versioned_header")) - .map_err(Error::Serialization), - } -} +pub mod fuel_to_proto_conversions; +pub mod proto_to_fuel_conversions; // TODO: Add coverage for V2 Block stuff // https://github.com/FuelLabs/fuel-core/issues/3139 @@ -1496,26 +58,30 @@ pub fn proto_header_to_empty_consensus_header( #[cfg(test)] mod tests { use super::*; - use fuel_core_types::test_helpers::arb_block; + use fuel_core_types::test_helpers::{arb_block, arb_receipts}; use proptest::prelude::*; + use crate::blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf; proptest! { #![proptest_config(ProptestConfig { - cases: 100, .. ProptestConfig::default() + cases: 1, .. ProptestConfig::default() })] #[test] - fn serialize_block__roundtrip((block, msg_ids, event_inbox_root) in arb_block()) { + fn serialize_block__roundtrip( + (block, msg_ids, event_inbox_root) in arb_block(), + receipts in arb_receipts()) + { // given let serializer = SerializerAdapter; // when - let proto_block = serializer.serialize_block(&block).unwrap(); + let proto_block = serializer.serialize_block(&block, &receipts).unwrap(); // then - let deserialized_block = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); + let (deserialized_block, deserialized_receipts) = fuel_block_from_protobuf(proto_block, &msg_ids, event_inbox_root).unwrap(); assert_eq!(block, deserialized_block); - - } + assert_eq!(receipts, deserialized_receipts); + } } #[test] diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs new file mode 100644 index 00000000000..66c3a84ce29 --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/fuel_to_proto_conversions.rs @@ -0,0 +1,855 @@ +#[cfg(feature = "fault-proving")] +use crate::protobuf_types::V2Header as ProtoV2Header; +use crate::{ + blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::bytes32_to_vec, + protobuf_types::{ + BlobTransaction as ProtoBlobTx, + ChangeOutput as ProtoChangeOutput, + CoinOutput as ProtoCoinOutput, + CoinPredicateInput as ProtoCoinPredicateInput, + CoinSignedInput as ProtoCoinSignedInput, + ContractCreatedOutput as ProtoContractCreatedOutput, + ContractInput as ProtoContractInput, + ContractOutput as ProtoContractOutput, + CreateTransaction as ProtoCreateTx, + Header as ProtoHeader, + Input as ProtoInput, + MessageCoinPredicateInput as ProtoMessageCoinPredicateInput, + MessageCoinSignedInput as ProtoMessageCoinSignedInput, + MessageDataPredicateInput as ProtoMessageDataPredicateInput, + MessageDataSignedInput as ProtoMessageDataSignedInput, + MintTransaction as ProtoMintTx, + Output as ProtoOutput, + Policies as ProtoPolicies, + Receipt as ProtoReceipt, + ScriptTransaction as ProtoScriptTx, + StorageSlot as ProtoStorageSlot, + Transaction as ProtoTransaction, + TxPointer as ProtoTxPointer, + UpgradeConsensusParameters as ProtoUpgradeConsensusParameters, + UpgradePurpose as ProtoUpgradePurpose, + UpgradeStateTransition as ProtoUpgradeStateTransition, + UpgradeTransaction as ProtoUpgradeTx, + UploadTransaction as ProtoUploadTx, + UtxoId as ProtoUtxoId, + V1Header as ProtoV1Header, + VariableOutput as ProtoVariableOutput, + header::VersionedHeader as ProtoVersionedHeader, + input::Variant as ProtoInputVariant, + output::Variant as ProtoOutputVariant, + receipt::Variant as ProtoReceiptVariant, + script_execution_result::Variant as ProtoScriptExecutionResultVariant, + transaction::Variant as ProtoTransactionVariant, + upgrade_purpose::Variant as ProtoUpgradePurposeVariant, + }, +}; + +#[cfg(feature = "fault-proving")] +use fuel_core_types::blockchain::header::BlockHeaderV2; +use fuel_core_types::{ + blockchain::{ + header::{ + BlockHeader, + BlockHeaderV1, + ConsensusHeader, + GeneratedConsensusFields, + }, + primitives::BlockId, + }, + fuel_asm::PanicInstruction, + fuel_tx::{ + Input, + Output, + Receipt as FuelReceipt, + ScriptExecutionResult, + StorageSlot, + Transaction as FuelTransaction, + TxPointer, + UpgradePurpose, + UtxoId, + field::{ + BlobId as _, + BytecodeRoot as _, + BytecodeWitnessIndex as _, + InputContract as _, + Inputs, + MintAmount as _, + MintAssetId as _, + MintGasPrice as _, + OutputContract as _, + Outputs, + Policies as _, + ProofSet as _, + ReceiptsRoot as _, + Salt as _, + Script as _, + ScriptData as _, + ScriptGasLimit as _, + StorageSlots as _, + SubsectionIndex as _, + SubsectionsNumber as _, + TxPointer as TxPointerField, + UpgradePurpose as UpgradePurposeField, + Witnesses as _, + }, + policies::PolicyType, + }, +}; + +pub fn proto_header_from_header(header: &BlockHeader) -> ProtoHeader { + let block_id = header.id(); + let consensus = header.consensus(); + let versioned_header = match header { + BlockHeader::V1(header) => { + let proto_v1_header = + proto_v1_header_from_v1_header(consensus, &block_id, header); + ProtoVersionedHeader::V1(proto_v1_header) + } + #[cfg(feature = "fault-proving")] + BlockHeader::V2(header) => { + let proto_v2_header = + proto_v2_header_from_v2_header(consensus, &block_id, header); + ProtoVersionedHeader::V2(proto_v2_header) + } + }; + + ProtoHeader { + versioned_header: Some(versioned_header), + } +} + +fn proto_v1_header_from_v1_header( + consensus: &ConsensusHeader, + block_id: &BlockId, + header: &BlockHeaderV1, +) -> ProtoV1Header { + let application = header.application(); + let generated = application.generated; + + ProtoV1Header { + da_height: application.da_height.0, + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0, + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +#[cfg(feature = "fault-proving")] +fn proto_v2_header_from_v2_header( + consensus: &ConsensusHeader, + block_id: &BlockId, + header: &BlockHeaderV2, +) -> ProtoV2Header { + let application = *header.application(); + let generated = application.generated; + + ProtoV2Header { + da_height: application.da_height.0, + consensus_parameters_version: application.consensus_parameters_version, + state_transition_bytecode_version: application.state_transition_bytecode_version, + transactions_count: u32::from(generated.transactions_count), + message_receipt_count: generated.message_receipt_count, + transactions_root: bytes32_to_vec(&generated.transactions_root), + message_outbox_root: bytes32_to_vec(&generated.message_outbox_root), + event_inbox_root: bytes32_to_vec(&generated.event_inbox_root), + tx_id_commitment: bytes32_to_vec(&generated.tx_id_commitment), + prev_root: bytes32_to_vec(&consensus.prev_root), + height: u32::from(consensus.height), + time: consensus.time.0, + application_hash: bytes32_to_vec(&consensus.generated.application_hash), + block_id: Some(block_id.as_slice().to_vec()), + } +} + +pub fn proto_tx_from_tx(tx: &FuelTransaction) -> ProtoTransaction { + match tx { + FuelTransaction::Script(script) => { + let proto_script = ProtoScriptTx { + script_gas_limit: *script.script_gas_limit(), + receipts_root: bytes32_to_vec(script.receipts_root()), + script: script.script().clone(), + script_data: script.script_data().clone(), + policies: Some(proto_policies_from_policies(script.policies())), + inputs: script.inputs().iter().map(proto_input_from_input).collect(), + outputs: script + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: script + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Script(proto_script)), + } + } + FuelTransaction::Create(create) => { + let proto_create = ProtoCreateTx { + bytecode_witness_index: u32::from(*create.bytecode_witness_index()), + salt: create.salt().as_ref().to_vec(), + storage_slots: create + .storage_slots() + .iter() + .map(proto_storage_slot_from_storage_slot) + .collect(), + policies: Some(proto_policies_from_policies(create.policies())), + inputs: create.inputs().iter().map(proto_input_from_input).collect(), + outputs: create + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: create + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Create(proto_create)), + } + } + FuelTransaction::Mint(mint) => { + let proto_mint = ProtoMintTx { + tx_pointer: Some(proto_tx_pointer(mint.tx_pointer())), + input_contract: Some(proto_contract_input_from_contract( + mint.input_contract(), + )), + output_contract: Some(proto_contract_output_from_contract( + mint.output_contract(), + )), + mint_amount: *mint.mint_amount(), + mint_asset_id: mint.mint_asset_id().as_ref().to_vec(), + gas_price: *mint.gas_price(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Mint(proto_mint)), + } + } + FuelTransaction::Upgrade(upgrade) => { + let proto_upgrade = ProtoUpgradeTx { + purpose: Some(proto_upgrade_purpose(upgrade.upgrade_purpose())), + policies: Some(proto_policies_from_policies(upgrade.policies())), + inputs: upgrade + .inputs() + .iter() + .map(proto_input_from_input) + .collect(), + outputs: upgrade + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: upgrade + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upgrade(proto_upgrade)), + } + } + FuelTransaction::Upload(upload) => { + let proto_upload = ProtoUploadTx { + root: bytes32_to_vec(upload.bytecode_root()), + witness_index: u32::from(*upload.bytecode_witness_index()), + subsection_index: u32::from(*upload.subsection_index()), + subsections_number: u32::from(*upload.subsections_number()), + proof_set: upload.proof_set().iter().map(bytes32_to_vec).collect(), + policies: Some(proto_policies_from_policies(upload.policies())), + inputs: upload.inputs().iter().map(proto_input_from_input).collect(), + outputs: upload + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: upload + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Upload(proto_upload)), + } + } + FuelTransaction::Blob(blob) => { + let proto_blob = ProtoBlobTx { + blob_id: blob.blob_id().as_ref().to_vec(), + witness_index: u32::from(*blob.bytecode_witness_index()), + policies: Some(proto_policies_from_policies(blob.policies())), + inputs: blob.inputs().iter().map(proto_input_from_input).collect(), + outputs: blob + .outputs() + .iter() + .map(proto_output_from_output) + .collect(), + witnesses: blob + .witnesses() + .iter() + .map(|witness| witness.as_ref().to_vec()) + .collect(), + metadata: None, + }; + + ProtoTransaction { + variant: Some(ProtoTransactionVariant::Blob(proto_blob)), + } + } + } +} + +fn proto_input_from_input(input: &Input) -> ProtoInput { + match input { + Input::CoinSigned(coin_signed) => ProtoInput { + variant: Some(ProtoInputVariant::CoinSigned(ProtoCoinSignedInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_signed.utxo_id)), + owner: coin_signed.owner.as_ref().to_vec(), + amount: coin_signed.amount, + asset_id: coin_signed.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_signed.tx_pointer)), + witness_index: coin_signed.witness_index.into(), + predicate_gas_used: 0, + predicate: vec![], + predicate_data: vec![], + })), + }, + Input::CoinPredicate(coin_predicate) => ProtoInput { + variant: Some(ProtoInputVariant::CoinPredicate(ProtoCoinPredicateInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&coin_predicate.utxo_id)), + owner: coin_predicate.owner.as_ref().to_vec(), + amount: coin_predicate.amount, + asset_id: coin_predicate.asset_id.as_ref().to_vec(), + tx_pointer: Some(proto_tx_pointer(&coin_predicate.tx_pointer)), + witness_index: 0, + predicate_gas_used: coin_predicate.predicate_gas_used, + predicate: coin_predicate.predicate.as_ref().to_vec(), + predicate_data: coin_predicate.predicate_data.as_ref().to_vec(), + })), + }, + Input::Contract(contract) => ProtoInput { + variant: Some(ProtoInputVariant::Contract(ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + })), + }, + Input::MessageCoinSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinSigned( + ProtoMessageCoinSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: Vec::new(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageCoinPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageCoinPredicate( + ProtoMessageCoinPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: Vec::new(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + Input::MessageDataSigned(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataSigned( + ProtoMessageDataSignedInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: message.witness_index.into(), + predicate_gas_used: 0, + data: message.data.as_ref().to_vec(), + predicate: Vec::new(), + predicate_data: Vec::new(), + }, + )), + }, + Input::MessageDataPredicate(message) => ProtoInput { + variant: Some(ProtoInputVariant::MessageDataPredicate( + ProtoMessageDataPredicateInput { + sender: message.sender.as_ref().to_vec(), + recipient: message.recipient.as_ref().to_vec(), + amount: message.amount, + nonce: message.nonce.as_ref().to_vec(), + witness_index: 0, + predicate_gas_used: message.predicate_gas_used, + data: message.data.as_ref().to_vec(), + predicate: message.predicate.as_ref().to_vec(), + predicate_data: message.predicate_data.as_ref().to_vec(), + }, + )), + }, + } +} + +fn proto_utxo_id_from_utxo_id(utxo_id: &UtxoId) -> ProtoUtxoId { + ProtoUtxoId { + tx_id: utxo_id.tx_id().as_ref().to_vec(), + output_index: utxo_id.output_index().into(), + } +} + +fn proto_tx_pointer(tx_pointer: &TxPointer) -> ProtoTxPointer { + ProtoTxPointer { + block_height: tx_pointer.block_height().into(), + tx_index: tx_pointer.tx_index().into(), + } +} + +fn proto_storage_slot_from_storage_slot(slot: &StorageSlot) -> ProtoStorageSlot { + ProtoStorageSlot { + key: slot.key().as_ref().to_vec(), + value: slot.value().as_ref().to_vec(), + } +} + +fn proto_contract_input_from_contract( + contract: &fuel_core_types::fuel_tx::input::contract::Contract, +) -> ProtoContractInput { + ProtoContractInput { + utxo_id: Some(proto_utxo_id_from_utxo_id(&contract.utxo_id)), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + tx_pointer: Some(proto_tx_pointer(&contract.tx_pointer)), + contract_id: contract.contract_id.as_ref().to_vec(), + } +} + +fn proto_contract_output_from_contract( + contract: &fuel_core_types::fuel_tx::output::contract::Contract, +) -> ProtoContractOutput { + ProtoContractOutput { + input_index: u32::from(contract.input_index), + balance_root: bytes32_to_vec(&contract.balance_root), + state_root: bytes32_to_vec(&contract.state_root), + } +} + +fn proto_output_from_output(output: &Output) -> ProtoOutput { + let variant = match output { + Output::Coin { + to, + amount, + asset_id, + } => ProtoOutputVariant::Coin(ProtoCoinOutput { + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Contract(contract) => { + ProtoOutputVariant::Contract(proto_contract_output_from_contract(contract)) + } + Output::Change { + to, + amount, + asset_id, + } => ProtoOutputVariant::Change(ProtoChangeOutput { + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::Variable { + to, + amount, + asset_id, + } => ProtoOutputVariant::Variable(ProtoVariableOutput { + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + }), + Output::ContractCreated { + contract_id, + state_root, + } => ProtoOutputVariant::ContractCreated(ProtoContractCreatedOutput { + contract_id: contract_id.as_ref().to_vec(), + state_root: bytes32_to_vec(state_root), + }), + }; + + ProtoOutput { + variant: Some(variant), + } +} + +fn proto_upgrade_purpose(purpose: &UpgradePurpose) -> ProtoUpgradePurpose { + let variant = match purpose { + UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + } => ProtoUpgradePurposeVariant::ConsensusParameters( + ProtoUpgradeConsensusParameters { + witness_index: u32::from(*witness_index), + checksum: checksum.as_ref().to_vec(), + }, + ), + UpgradePurpose::StateTransition { root } => { + ProtoUpgradePurposeVariant::StateTransition(ProtoUpgradeStateTransition { + root: root.as_ref().to_vec(), + }) + } + }; + + ProtoUpgradePurpose { + variant: Some(variant), + } +} + +fn proto_policies_from_policies( + policies: &fuel_core_types::fuel_tx::policies::Policies, +) -> ProtoPolicies { + let mut values = [0u64; 6]; + let mut truncated_len = 0; + if let Some(value) = policies.get(PolicyType::Tip) { + values[0] = value; + truncated_len = 1; + } + if let Some(value) = policies.get(PolicyType::WitnessLimit) { + values[1] = value; + truncated_len = 2; + } + if let Some(value) = policies.get(PolicyType::Maturity) { + values[2] = value; + truncated_len = 3; + } + if let Some(value) = policies.get(PolicyType::MaxFee) { + values[3] = value; + truncated_len = 4; + } + if let Some(value) = policies.get(PolicyType::Expiration) { + values[4] = value; + truncated_len = 5; + } + if let Some(value) = policies.get(PolicyType::Owner) { + values[5] = value; + truncated_len = 6; + } + let bits = policies.bits(); + values[..truncated_len].to_vec(); + ProtoPolicies { + bits, + values: values.to_vec(), + } +} + +fn proto_script_execution_result( + result: &ScriptExecutionResult, +) -> crate::protobuf_types::ScriptExecutionResult { + use crate::protobuf_types::{ + ScriptExecutionResult as ProtoScriptExecutionResult, + ScriptExecutionResultGenericFailure as ProtoScriptExecutionResultGenericFailure, + ScriptExecutionResultPanic as ProtoScriptExecutionResultPanic, + ScriptExecutionResultRevert as ProtoScriptExecutionResultRevert, + ScriptExecutionResultSuccess as ProtoScriptExecutionResultSuccess, + }; + + let variant = match result { + ScriptExecutionResult::Success => ProtoScriptExecutionResultVariant::Success( + ProtoScriptExecutionResultSuccess {}, + ), + ScriptExecutionResult::Revert => { + ProtoScriptExecutionResultVariant::Revert(ProtoScriptExecutionResultRevert {}) + } + ScriptExecutionResult::Panic => { + ProtoScriptExecutionResultVariant::Panic(ProtoScriptExecutionResultPanic {}) + } + ScriptExecutionResult::GenericFailure(code) => { + ProtoScriptExecutionResultVariant::GenericFailure( + ProtoScriptExecutionResultGenericFailure { code: *code }, + ) + } + }; + + ProtoScriptExecutionResult { + variant: Some(variant), + } +} + +fn proto_panic_instruction( + panic_instruction: &PanicInstruction, +) -> crate::protobuf_types::PanicInstruction { + use crate::protobuf_types::PanicReason as ProtoPanicReason; + + let reason_value = *panic_instruction.reason() as u8; + let reason = ProtoPanicReason::try_from(i32::from(reason_value)) + .unwrap_or(ProtoPanicReason::Unknown); + + crate::protobuf_types::PanicInstruction { + reason: reason as i32, + instruction: *panic_instruction.instruction(), + } +} + +pub fn proto_receipt_from_receipt(receipt: &FuelReceipt) -> ProtoReceipt { + match receipt { + FuelReceipt::Call { + id, + to, + amount, + asset_id, + gas, + param1, + param2, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Call( + crate::protobuf_types::CallReceipt { + id: id.as_ref().to_vec(), + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + gas: *gas, + param1: *param1, + param2: *param2, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::Return { id, val, pc, is } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::ReturnReceipt( + crate::protobuf_types::ReturnReceipt { + id: id.as_ref().to_vec(), + val: *val, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::ReturnData { + id, + ptr, + len, + digest, + pc, + is, + data, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::ReturnData( + crate::protobuf_types::ReturnDataReceipt { + id: id.as_ref().to_vec(), + ptr: *ptr, + len: *len, + digest: digest.as_ref().to_vec(), + pc: *pc, + is: *is, + data: data.as_ref().map(|b| b.to_vec()), + }, + )), + }, + FuelReceipt::Panic { + id, + reason, + pc, + is, + contract_id, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Panic( + crate::protobuf_types::PanicReceipt { + id: id.as_ref().to_vec(), + reason: Some(proto_panic_instruction(reason)), + pc: *pc, + is: *is, + contract_id: contract_id.as_ref().map(|cid| cid.as_ref().to_vec()), + }, + )), + }, + FuelReceipt::Revert { id, ra, pc, is } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Revert( + crate::protobuf_types::RevertReceipt { + id: id.as_ref().to_vec(), + ra: *ra, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::Log { + id, + ra, + rb, + rc, + rd, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Log( + crate::protobuf_types::LogReceipt { + id: id.as_ref().to_vec(), + ra: *ra, + rb: *rb, + rc: *rc, + rd: *rd, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::LogData { + id, + ra, + rb, + ptr, + len, + digest, + pc, + is, + data, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::LogData( + crate::protobuf_types::LogDataReceipt { + id: id.as_ref().to_vec(), + ra: *ra, + rb: *rb, + ptr: *ptr, + len: *len, + digest: digest.as_ref().to_vec(), + pc: *pc, + is: *is, + data: data.as_ref().map(|b| b.to_vec()), + }, + )), + }, + FuelReceipt::Transfer { + id, + to, + amount, + asset_id, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Transfer( + crate::protobuf_types::TransferReceipt { + id: id.as_ref().to_vec(), + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::TransferOut { + id, + to, + amount, + asset_id, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::TransferOut( + crate::protobuf_types::TransferOutReceipt { + id: id.as_ref().to_vec(), + to: to.as_ref().to_vec(), + amount: *amount, + asset_id: asset_id.as_ref().to_vec(), + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::ScriptResult { result, gas_used } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::ScriptResult( + crate::protobuf_types::ScriptResultReceipt { + result: Some(proto_script_execution_result(result)), + gas_used: *gas_used, + }, + )), + }, + FuelReceipt::MessageOut { + sender, + recipient, + amount, + nonce, + len, + digest, + data, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::MessageOut( + crate::protobuf_types::MessageOutReceipt { + sender: sender.as_ref().to_vec(), + recipient: recipient.as_ref().to_vec(), + amount: *amount, + nonce: nonce.as_ref().to_vec(), + len: *len, + digest: digest.as_ref().to_vec(), + data: data.as_ref().map(|b| b.to_vec()), + }, + )), + }, + FuelReceipt::Mint { + sub_id, + contract_id, + val, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Mint( + crate::protobuf_types::MintReceipt { + sub_id: sub_id.as_ref().to_vec(), + contract_id: contract_id.as_ref().to_vec(), + val: *val, + pc: *pc, + is: *is, + }, + )), + }, + FuelReceipt::Burn { + sub_id, + contract_id, + val, + pc, + is, + } => ProtoReceipt { + variant: Some(ProtoReceiptVariant::Burn( + crate::protobuf_types::BurnReceipt { + sub_id: sub_id.as_ref().to_vec(), + contract_id: contract_id.as_ref().to_vec(), + val: *val, + pc: *pc, + is: *is, + }, + )), + }, + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs new file mode 100644 index 00000000000..bf715b0e30e --- /dev/null +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/serializer_adapter/proto_to_fuel_conversions.rs @@ -0,0 +1,1169 @@ +#[cfg(feature = "fault-proving")] +use crate::blocks::importer_and_db_source::serializer_adapter::ChainId; +use crate::{ + protobuf_types::{ + Block as ProtoBlock, + ContractInput as ProtoContractInput, + ContractOutput as ProtoContractOutput, + Header as ProtoHeader, + Input as ProtoInput, + Output as ProtoOutput, + PanicInstruction as ProtoPanicInstruction, + Policies as ProtoPolicies, + StorageSlot as ProtoStorageSlot, + Transaction as ProtoTransaction, + TxPointer as ProtoTxPointer, + UpgradePurpose as ProtoUpgradePurpose, + UtxoId as ProtoUtxoId, + block::VersionedBlock as ProtoVersionedBlock, + header::VersionedHeader as ProtoVersionedHeader, + input::Variant as ProtoInputVariant, + output::Variant as ProtoOutputVariant, + receipt::Variant as ProtoReceiptVariant, + script_execution_result::Variant as ProtoScriptExecutionResultVariant, + transaction::Variant as ProtoTransactionVariant, + upgrade_purpose::Variant as ProtoUpgradePurposeVariant, + }, + result::Error, +}; +use anyhow::anyhow; +use fuel_core_types::{ + blockchain::{ + block::Block as FuelBlock, + header::{ + ApplicationHeader, + ConsensusHeader, + PartialBlockHeader, + }, + primitives::{ + DaBlockHeight, + Empty, + }, + }, + fuel_asm::{ + PanicInstruction, + PanicReason, + }, + fuel_tx::{ + Address, + BlobBody, + Bytes32, + Input, + Output, + Receipt as FuelReceipt, + ScriptExecutionResult, + StorageSlot, + Transaction as FuelTransaction, + TxPointer, + UpgradePurpose, + UploadBody, + UtxoId, + Witness, + field::ReceiptsRoot as _, + policies::{ + Policies as FuelPolicies, + PoliciesBits, + PolicyType, + }, + }, + fuel_types::{ + AssetId, + ContractId, + Nonce, + SubAssetId, + }, + tai64, +}; + +fn tx_pointer_from_proto(proto: &ProtoTxPointer) -> crate::result::Result { + let block_height = proto.block_height.into(); + #[allow(clippy::useless_conversion)] + let tx_index = proto.tx_index.try_into().map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_index to target type: {}", e)) + })?; + Ok(TxPointer::new(block_height, tx_index)) +} + +fn storage_slot_from_proto( + proto: &ProtoStorageSlot, +) -> crate::result::Result { + let key = Bytes32::try_from(proto.key.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot key to Bytes32: {}", + e + )) + })?; + let value = Bytes32::try_from(proto.value.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert storage slot value to Bytes32: {}", + e + )) + })?; + Ok(StorageSlot::new(key, value)) +} + +fn contract_input_from_proto( + proto: &ProtoContractInput, +) -> crate::result::Result { + let utxo_proto = proto.utxo_id.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing utxo_id on contract input")) + })?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert balance_root to Bytes32: {}", e)) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert state_root to Bytes32: {}", e)) + })?; + let tx_pointer_proto = proto.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on contract input")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let contract_id = + fuel_core_types::fuel_types::ContractId::try_from(proto.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(fuel_core_types::fuel_tx::input::contract::Contract { + utxo_id, + balance_root, + state_root, + tx_pointer, + contract_id, + }) +} + +fn contract_output_from_proto( + proto: &ProtoContractOutput, +) -> crate::result::Result { + let input_index = u16::try_from(proto.input_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output input_index to u16: {}", + e + )) + })?; + let balance_root = Bytes32::try_from(proto.balance_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output balance_root to Bytes32: {}", + e + )) + })?; + let state_root = Bytes32::try_from(proto.state_root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert contract output state_root to Bytes32: {}", + e + )) + })?; + + Ok(fuel_core_types::fuel_tx::output::contract::Contract { + input_index, + balance_root, + state_root, + }) +} + +fn output_from_proto_output(proto_output: &ProtoOutput) -> crate::result::Result { + match proto_output + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing output variant")))? + { + ProtoOutputVariant::Coin(coin) => { + let to = Address::try_from(coin.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = + fuel_core_types::fuel_types::AssetId::try_from(coin.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::coin(to, coin.amount, asset_id)) + } + ProtoOutputVariant::Contract(contract) => { + let contract = contract_output_from_proto(contract)?; + Ok(Output::Contract(contract)) + } + ProtoOutputVariant::Change(change) => { + let to = Address::try_from(change.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + change.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::change(to, change.amount, asset_id)) + } + ProtoOutputVariant::Variable(variable) => { + let to = Address::try_from(variable.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + variable.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(Output::variable(to, variable.amount, asset_id)) + } + ProtoOutputVariant::ContractCreated(contract_created) => { + let contract_id = fuel_core_types::fuel_types::ContractId::try_from( + contract_created.contract_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let state_root = Bytes32::try_from(contract_created.state_root.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state_root to Bytes32: {}", + e + )) + })?; + Ok(Output::contract_created(contract_id, state_root)) + } + } +} + +fn upgrade_purpose_from_proto( + proto: &ProtoUpgradePurpose, +) -> crate::result::Result { + match proto + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing upgrade purpose variant")))? + { + ProtoUpgradePurposeVariant::ConsensusParameters(consensus) => { + let witness_index = u16::try_from(consensus.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let checksum = + Bytes32::try_from(consensus.checksum.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert checksum to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::ConsensusParameters { + witness_index, + checksum, + }) + } + ProtoUpgradePurposeVariant::StateTransition(state) => { + let root = Bytes32::try_from(state.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert state transition root to Bytes32: {}", + e + )) + })?; + Ok(UpgradePurpose::StateTransition { root }) + } + } +} + +fn utxo_id_from_proto(proto_utxo: &ProtoUtxoId) -> crate::result::Result { + let tx_id = Bytes32::try_from(proto_utxo.tx_id.as_slice()).map_err(|e| { + Error::Serialization(anyhow!("Could not convert tx_id to Bytes32: {}", e)) + })?; + let output_index = u16::try_from(proto_utxo.output_index).map_err(|e| { + Error::Serialization(anyhow!("Could not convert output_index to u16: {}", e)) + })?; + Ok(UtxoId::new(tx_id, output_index)) +} + +pub fn bytes32_to_vec(bytes: &fuel_core_types::fuel_types::Bytes32) -> Vec { + bytes.as_ref().to_vec() +} + +fn script_execution_result_from_proto( + proto: &crate::protobuf_types::ScriptExecutionResult, +) -> crate::result::Result { + let variant = proto.variant.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing script execution result variant")) + })?; + + let result = match variant { + ProtoScriptExecutionResultVariant::Success(_) => ScriptExecutionResult::Success, + ProtoScriptExecutionResultVariant::Revert(_) => ScriptExecutionResult::Revert, + ProtoScriptExecutionResultVariant::Panic(_) => ScriptExecutionResult::Panic, + ProtoScriptExecutionResultVariant::GenericFailure(failure) => { + ScriptExecutionResult::GenericFailure(failure.code) + } + }; + + Ok(result) +} + +fn panic_instruction_from_proto(proto: &ProtoPanicInstruction) -> PanicInstruction { + use crate::protobuf_types::PanicReason as ProtoPanicReason; + + let reason_proto = + ProtoPanicReason::try_from(proto.reason).unwrap_or(ProtoPanicReason::Unknown); + let reason = PanicReason::from(reason_proto as u8); + PanicInstruction::error(reason, proto.instruction) +} + +fn receipt_from_proto( + proto_receipt: &crate::protobuf_types::Receipt, +) -> crate::result::Result { + let variant = proto_receipt + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing receipt variant")))?; + + let receipt = match variant { + ProtoReceiptVariant::Call(call) => { + let id = ContractId::try_from(call.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let to = ContractId::try_from(call.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = AssetId::try_from(call.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::call( + id, + to, + call.amount, + asset_id, + call.gas, + call.param1, + call.param2, + call.pc, + call.is, + )) + } + ProtoReceiptVariant::ReturnReceipt(ret) => { + let id = ContractId::try_from(ret.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::ret(id, ret.val, ret.pc, ret.is)) + } + ProtoReceiptVariant::ReturnData(rd) => { + let id = ContractId::try_from(rd.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let digest = Bytes32::try_from(rd.digest.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert return data digest to Bytes32: {}", + e + )) + })?; + Ok(FuelReceipt::return_data_with_len( + id, + rd.ptr, + rd.len, + digest, + rd.pc, + rd.is, + rd.data.clone(), + )) + } + ProtoReceiptVariant::Panic(panic_receipt) => { + let id = ContractId::try_from(panic_receipt.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let reason_proto = panic_receipt + .reason + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing panic reason")))?; + let reason = panic_instruction_from_proto(reason_proto); + let contract_id = panic_receipt + .contract_id + .as_ref() + .map(|cid| { + ContractId::try_from(cid.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e))) + }) + .transpose()?; + Ok( + FuelReceipt::panic(id, reason, panic_receipt.pc, panic_receipt.is) + .with_panic_contract_id(contract_id), + ) + } + ProtoReceiptVariant::Revert(revert) => { + let id = ContractId::try_from(revert.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::revert(id, revert.ra, revert.pc, revert.is)) + } + ProtoReceiptVariant::Log(log) => { + let id = ContractId::try_from(log.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::log( + id, log.ra, log.rb, log.rc, log.rd, log.pc, log.is, + )) + } + ProtoReceiptVariant::LogData(log) => { + let id = ContractId::try_from(log.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let digest = Bytes32::try_from(log.digest.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert log data digest to Bytes32: {}", + e + )) + })?; + Ok(FuelReceipt::log_data_with_len( + id, + log.ra, + log.rb, + log.ptr, + log.len, + digest, + log.pc, + log.is, + log.data.clone(), + )) + } + ProtoReceiptVariant::Transfer(transfer) => { + let id = ContractId::try_from(transfer.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let to = ContractId::try_from(transfer.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = AssetId::try_from(transfer.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::transfer( + id, + to, + transfer.amount, + asset_id, + transfer.pc, + transfer.is, + )) + } + ProtoReceiptVariant::TransferOut(transfer) => { + let id = ContractId::try_from(transfer.id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let to = Address::try_from(transfer.to.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let asset_id = AssetId::try_from(transfer.asset_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::transfer_out( + id, + to, + transfer.amount, + asset_id, + transfer.pc, + transfer.is, + )) + } + ProtoReceiptVariant::ScriptResult(result) => { + let script_result = result.result.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing script result payload")) + })?; + let execution_result = script_execution_result_from_proto(script_result)?; + Ok(FuelReceipt::script_result( + execution_result, + result.gas_used, + )) + } + ProtoReceiptVariant::MessageOut(msg) => { + let sender = Address::try_from(msg.sender.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let recipient = Address::try_from(msg.recipient.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let nonce = Nonce::try_from(msg.nonce.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let digest = Bytes32::try_from(msg.digest.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert message digest to Bytes32: {}", + e + )) + })?; + Ok(FuelReceipt::message_out_with_len( + sender, + recipient, + msg.amount, + nonce, + msg.len, + digest, + msg.data.clone(), + )) + } + ProtoReceiptVariant::Mint(mint) => { + let sub_id = SubAssetId::try_from(mint.sub_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let contract_id = ContractId::try_from(mint.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::mint( + sub_id, + contract_id, + mint.val, + mint.pc, + mint.is, + )) + } + ProtoReceiptVariant::Burn(burn) => { + let sub_id = SubAssetId::try_from(burn.sub_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let contract_id = ContractId::try_from(burn.contract_id.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + Ok(FuelReceipt::burn( + sub_id, + contract_id, + burn.val, + burn.pc, + burn.is, + )) + } + }?; + + Ok(receipt) +} + +pub fn fuel_block_from_protobuf( + proto_block: ProtoBlock, + msg_ids: &[fuel_core_types::fuel_tx::MessageId], + event_inbox_root: Bytes32, +) -> crate::result::Result<(FuelBlock, Vec)> { + let versioned_block = proto_block + .versioned_block + .ok_or_else(|| anyhow::anyhow!("Missing protobuf versioned_block")) + .map_err(Error::Serialization)?; + let (partial_header, txs, receipts) = match versioned_block { + ProtoVersionedBlock::V1(v1_inner) => { + let proto_header = v1_inner + .header + .clone() + .ok_or_else(|| anyhow::anyhow!("Missing protobuf header")) + .map_err(Error::Serialization)?; + let partial_header = partial_header_from_proto_header(&proto_header)?; + let txs = v1_inner + .transactions + .iter() + .map(tx_from_proto_tx) + .collect::>()?; + let receipts = v1_inner + .receipts + .iter() + .map(receipt_from_proto) + .collect::>()?; + (partial_header, txs, receipts) + } + }; + let block = FuelBlock::new( + partial_header, + txs, + msg_ids, + event_inbox_root, + #[cfg(feature = "fault-proving")] + &ChainId::default(), + ) + .map_err(|e| anyhow!(e)) + .map_err(Error::Serialization)?; + Ok((block, receipts)) +} + +pub fn partial_header_from_proto_header( + proto_header: &ProtoHeader, +) -> crate::result::Result { + let partial_header = PartialBlockHeader { + consensus: proto_header_to_empty_consensus_header(proto_header)?, + application: proto_header_to_empty_application_header(proto_header)?, + }; + Ok(partial_header) +} + +pub fn tx_from_proto_tx( + proto_tx: &ProtoTransaction, +) -> crate::result::Result { + let variant = proto_tx + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing transaction variant")))?; + + match variant { + ProtoTransactionVariant::Script(proto_script) => { + let policies = proto_script + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_script + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_script + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_script + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let mut script_tx = FuelTransaction::script( + proto_script.script_gas_limit, + proto_script.script.clone(), + proto_script.script_data.clone(), + policies, + inputs, + outputs, + witnesses, + ); + *script_tx.receipts_root_mut() = Bytes32::try_from( + proto_script.receipts_root.as_slice(), + ) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert receipts_root to Bytes32: {}", + e + )) + })?; + + Ok(FuelTransaction::Script(script_tx)) + } + ProtoTransactionVariant::Create(proto_create) => { + let policies = proto_create + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_create + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_create + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_create + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let storage_slots = proto_create + .storage_slots + .iter() + .map(storage_slot_from_proto) + .collect::>>()?; + let salt = + fuel_core_types::fuel_types::Salt::try_from(proto_create.salt.as_slice()) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let bytecode_witness_index = + u16::try_from(proto_create.bytecode_witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert bytecode_witness_index to u16: {}", + e + )) + })?; + + let create_tx = FuelTransaction::create( + bytecode_witness_index, + policies, + salt, + storage_slots, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Create(create_tx)) + } + ProtoTransactionVariant::Mint(proto_mint) => { + let tx_pointer_proto = proto_mint.tx_pointer.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing tx_pointer on mint transaction")) + })?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let input_contract_proto = + proto_mint.input_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing input_contract on mint transaction" + )) + })?; + let input_contract = contract_input_from_proto(input_contract_proto)?; + let output_contract_proto = + proto_mint.output_contract.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!( + "Missing output_contract on mint transaction" + )) + })?; + let output_contract = contract_output_from_proto(output_contract_proto)?; + let mint_asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_mint.mint_asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + let mint_tx = FuelTransaction::mint( + tx_pointer, + input_contract, + output_contract, + proto_mint.mint_amount, + mint_asset_id, + proto_mint.gas_price, + ); + + Ok(FuelTransaction::Mint(mint_tx)) + } + ProtoTransactionVariant::Upgrade(proto_upgrade) => { + let purpose_proto = proto_upgrade.purpose.as_ref().ok_or_else(|| { + Error::Serialization(anyhow!("Missing purpose on upgrade transaction")) + })?; + let upgrade_purpose = upgrade_purpose_from_proto(purpose_proto)?; + let policies = proto_upgrade + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_upgrade + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upgrade + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upgrade + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + + let upgrade_tx = FuelTransaction::upgrade( + upgrade_purpose, + policies, + inputs, + outputs, + witnesses, + ); + + Ok(FuelTransaction::Upgrade(upgrade_tx)) + } + ProtoTransactionVariant::Upload(proto_upload) => { + let policies = proto_upload + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_upload + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_upload + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_upload + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let root = Bytes32::try_from(proto_upload.root.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert upload root to Bytes32: {}", + e + )) + })?; + let witness_index = + u16::try_from(proto_upload.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + let subsection_index = + u16::try_from(proto_upload.subsection_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsection_index to u16: {}", + e + )) + })?; + let subsections_number = u16::try_from(proto_upload.subsections_number) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert subsections_number to u16: {}", + e + )) + })?; + let proof_set = proto_upload + .proof_set + .iter() + .map(|entry| { + Bytes32::try_from(entry.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert proof_set entry to Bytes32: {}", + e + )) + }) + }) + .collect::>>()?; + + let body = UploadBody { + root, + witness_index, + subsection_index, + subsections_number, + proof_set, + }; + + let upload_tx = + FuelTransaction::upload(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Upload(upload_tx)) + } + ProtoTransactionVariant::Blob(proto_blob) => { + let policies = proto_blob + .policies + .clone() + .map(|p| policies_from_proto_policies(&p)) + .unwrap_or_default(); + let inputs = proto_blob + .inputs + .iter() + .map(input_from_proto_input) + .collect::>>()?; + let outputs = proto_blob + .outputs + .iter() + .map(output_from_proto_output) + .collect::>>()?; + let witnesses = proto_blob + .witnesses + .iter() + .map(|w| Ok(Witness::from(w.clone()))) + .collect::>>()?; + let blob_id = fuel_core_types::fuel_types::BlobId::try_from( + proto_blob.blob_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = u16::try_from(proto_blob.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert blob witness_index to u16: {}", + e + )) + })?; + let body = BlobBody { + id: blob_id, + witness_index, + }; + + let blob_tx = + FuelTransaction::blob(body, policies, inputs, outputs, witnesses); + + Ok(FuelTransaction::Blob(blob_tx)) + } + } +} + +fn input_from_proto_input(proto_input: &ProtoInput) -> crate::result::Result { + let variant = proto_input + .variant + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing input variant")))?; + + match variant { + ProtoInputVariant::CoinSigned(proto_coin_signed) => { + let utxo_proto = proto_coin_signed + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = + Address::try_from(proto_coin_signed.owner.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_signed.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_signed + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + let witness_index = + u16::try_from(proto_coin_signed.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::coin_signed( + utxo_id, + owner, + proto_coin_signed.amount, + asset_id, + tx_pointer, + witness_index, + )) + } + ProtoInputVariant::CoinPredicate(proto_coin_predicate) => { + let utxo_proto = proto_coin_predicate + .utxo_id + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing utxo_id")))?; + let utxo_id = utxo_id_from_proto(utxo_proto)?; + let owner = Address::try_from(proto_coin_predicate.owner.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert owner to Address: {}", + e + )) + })?; + let asset_id = fuel_core_types::fuel_types::AssetId::try_from( + proto_coin_predicate.asset_id.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let tx_pointer_proto = proto_coin_predicate + .tx_pointer + .as_ref() + .ok_or_else(|| Error::Serialization(anyhow!("Missing tx_pointer")))?; + let tx_pointer = tx_pointer_from_proto(tx_pointer_proto)?; + + Ok(Input::coin_predicate( + utxo_id, + owner, + proto_coin_predicate.amount, + asset_id, + tx_pointer, + proto_coin_predicate.predicate_gas_used, + proto_coin_predicate.predicate.clone(), + proto_coin_predicate.predicate_data.clone(), + )) + } + ProtoInputVariant::Contract(proto_contract) => { + let contract = contract_input_from_proto(proto_contract)?; + Ok(Input::Contract(contract)) + } + ProtoInputVariant::MessageCoinSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::message_coin_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + )) + } + ProtoInputVariant::MessageCoinPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_coin_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + ProtoInputVariant::MessageDataSigned(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + let witness_index = + u16::try_from(proto_message.witness_index).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert witness_index to u16: {}", + e + )) + })?; + + Ok(Input::message_data_signed( + sender, + recipient, + proto_message.amount, + nonce, + witness_index, + proto_message.data.clone(), + )) + } + ProtoInputVariant::MessageDataPredicate(proto_message) => { + let sender = + Address::try_from(proto_message.sender.as_slice()).map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert sender to Address: {}", + e + )) + })?; + let recipient = Address::try_from(proto_message.recipient.as_slice()) + .map_err(|e| { + Error::Serialization(anyhow!( + "Could not convert recipient to Address: {}", + e + )) + })?; + let nonce = fuel_core_types::fuel_types::Nonce::try_from( + proto_message.nonce.as_slice(), + ) + .map_err(|e| Error::Serialization(anyhow!(e)))?; + + Ok(Input::message_data_predicate( + sender, + recipient, + proto_message.amount, + nonce, + proto_message.predicate_gas_used, + proto_message.data.clone(), + proto_message.predicate.clone(), + proto_message.predicate_data.clone(), + )) + } + } +} + +fn policies_from_proto_policies(proto_policies: &ProtoPolicies) -> FuelPolicies { + let ProtoPolicies { bits, values } = proto_policies; + let mut policies = FuelPolicies::default(); + let bits = + PoliciesBits::from_bits(*bits).expect("Should be able to create from `u32`"); + if bits.contains(PoliciesBits::Tip) + && let Some(tip) = values.first() + { + policies.set(PolicyType::Tip, Some(*tip)); + } + if bits.contains(PoliciesBits::WitnessLimit) + && let Some(witness_limit) = values.get(1) + { + policies.set(PolicyType::WitnessLimit, Some(*witness_limit)); + } + if bits.contains(PoliciesBits::Maturity) + && let Some(maturity) = values.get(2) + { + policies.set(PolicyType::Maturity, Some(*maturity)); + } + if bits.contains(PoliciesBits::MaxFee) + && let Some(max_fee) = values.get(3) + { + policies.set(PolicyType::MaxFee, Some(*max_fee)); + } + if bits.contains(PoliciesBits::Expiration) + && let Some(expiration) = values.get(4) + { + policies.set(PolicyType::Expiration, Some(*expiration)); + } + if bits.contains(PoliciesBits::Owner) + && let Some(owner) = values.get(5) + { + policies.set(PolicyType::Owner, Some(*owner)); + } + policies +} + +pub fn proto_header_to_empty_application_header( + proto_header: &ProtoHeader, +) -> crate::result::Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + if cfg!(feature = "fault-proving") { + let app_header = ApplicationHeader { + da_height: DaBlockHeight::from(header.da_height), + consensus_parameters_version: header.consensus_parameters_version, + state_transition_bytecode_version: header + .state_transition_bytecode_version, + generated: Empty {}, + }; + Ok(app_header) + } else { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } +} + +/// Alias the consensus header into an empty one. +pub fn proto_header_to_empty_consensus_header( + proto_header: &ProtoHeader, +) -> crate::result::Result> { + match proto_header.versioned_header.clone() { + Some(ProtoVersionedHeader::V1(header)) => { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root).ok_or( + Error::Serialization(anyhow!("Could create `Bytes32` from bytes")), + )?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } + Some(ProtoVersionedHeader::V2(header)) => { + if cfg!(feature = "fault-proving") { + let consensus_header = ConsensusHeader { + prev_root: *Bytes32::from_bytes_ref_checked(&header.prev_root) + .ok_or(Error::Serialization(anyhow!( + "Could create `Bytes32` from bytes" + )))?, + height: header.height.into(), + time: tai64::Tai64(header.time), + generated: Empty {}, + }; + Ok(consensus_header) + } else { + Err(anyhow!("V2 headers require the 'fault-proving' feature")) + .map_err(Error::Serialization) + } + } + None => Err(anyhow!("Missing protobuf versioned_header")) + .map_err(Error::Serialization), + } +} diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs index be8b6b19e94..ef275bbc85d 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/sync_service.rs @@ -1,6 +1,12 @@ -use crate::blocks::{ - BlockSourceEvent, - importer_and_db_source::BlockSerializer, +use crate::{ + blocks::{ + BlockSourceEvent, + importer_and_db_source::BlockSerializer, + }, + result::{ + Error, + Result, + }, }; use fuel_core_services::{ RunnableService, @@ -12,6 +18,7 @@ use fuel_core_services::{ }; use fuel_core_storage::{ self, + Error as StorageError, StorageInspect, tables::{ FuelBlocks, @@ -19,75 +26,88 @@ use fuel_core_storage::{ }, }; use fuel_core_types::{ + blockchain::block::Block as FuelBlock, fuel_tx::{ + Receipt, Transaction, TxId, }, fuel_types::BlockHeight, }; -use std::time::Duration; +use futures::{ + StreamExt, + TryStreamExt, + stream::FuturesOrdered, +}; use tokio::sync::mpsc::Sender; -pub struct SyncTask { +pub struct SyncTask { serializer: Serializer, block_return_sender: Sender>, db: DB, + receipts: Receipts, next_height: BlockHeight, - maybe_stop_height: Option, - new_ending_height: tokio::sync::oneshot::Receiver, + // exclusive, does not ask for this block + stop_height: BlockHeight, +} + +pub trait TxReceipts: 'static + Send + Sync { + fn get_receipts( + &self, + tx_id: &TxId, + ) -> impl Future>> + Send; } -impl SyncTask +impl SyncTask where Serializer: BlockSerializer + Send, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: Send + Sync + 'static, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { pub fn new( serializer: Serializer, block_return: Sender>, db: DB, + receipts: Receipts, db_starting_height: BlockHeight, - db_ending_height: Option, - new_ending_height: tokio::sync::oneshot::Receiver, + // does not ask for this block (exclusive) + db_ending_height: BlockHeight, ) -> Self { Self { serializer, block_return_sender: block_return, db, + receipts, next_height: db_starting_height, - maybe_stop_height: db_ending_height, - new_ending_height, - } - } - - async fn maybe_update_stop_height(&mut self) { - if let Ok(last_height) = self.new_ending_height.try_recv() { - tracing::info!("updating last height to {}", last_height); - self.maybe_stop_height = Some(last_height); + stop_height: db_ending_height, } } - fn get_block( + async fn get_block_and_receipts( &self, height: &BlockHeight, - ) -> Result, E> { - let maybe_block = StorageInspect::::get(&self.db, height)?; + ) -> Result)>> { + let maybe_block = StorageInspect::::get(&self.db, height) + .map_err(Error::block_source_error)?; if let Some(block) = maybe_block { let tx_ids = block.transactions(); let txs = self.get_txs(tx_ids)?; + let receipts = self.get_receipts(tx_ids).await?; let block = block.into_owned().uncompress(txs); - Ok(Some(block)) + Ok(Some((block, receipts))) } else { Ok(None) } } - fn get_txs(&self, tx_ids: &[TxId]) -> Result, E> { + fn get_txs(&self, tx_ids: &[TxId]) -> Result> { let mut txs = Vec::new(); for tx_id in tx_ids { - match StorageInspect::::get(&self.db, tx_id)? { + match StorageInspect::::get(&self.db, tx_id) + .map_err(Error::block_source_error)? + { Some(tx) => { tracing::debug!("found tx id: {:?}", tx_id); txs.push(tx.into_owned()); @@ -100,51 +120,56 @@ where Ok(txs) } - // For now just have arbitrary 10 ms sleep to avoid busy looping. - // This could be more complicated with increasing backoff times, etc. - async fn go_to_sleep_before_continuing(&self) { - tokio::time::sleep(Duration::from_millis(10)).await; + async fn get_receipts(&self, tx_ids: &[TxId]) -> Result> { + let receipt_futs = tx_ids.iter().map(|tx_id| self.receipts.get_receipts(tx_id)); + FuturesOrdered::from_iter(receipt_futs) + .then(|res| async move { res.map_err(Error::block_source_error) }) + .try_concat() + .await } } -impl RunnableTask for SyncTask +impl RunnableTask + for SyncTask where Serializer: BlockSerializer + Send + Sync, Serializer::Block: Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { async fn run(&mut self, _watcher: &mut StateWatcher) -> TaskNextAction { - self.maybe_update_stop_height().await; - if let Some(last_height) = self.maybe_stop_height - && self.next_height >= last_height - { + if self.next_height >= self.stop_height { tracing::info!( - "reached end height {}, putting task into hibernation", - last_height + "reached stop height {}, putting task into hibernation", + self.stop_height ); - futures::future::pending().await + let _ = _watcher.while_started().await; + return TaskNextAction::Stop } let next_height = self.next_height; - let res = self.get_block(&next_height); - let maybe_block = try_or_stop!(res, |e| { + let res = self.get_block_and_receipts(&next_height).await; + let maybe_block_and_receipts = try_or_stop!(res, |e| { tracing::error!("error fetching block at height {}: {:?}", next_height, e); }); - if let Some(block) = maybe_block { - let res = self.serializer.serialize_block(&block); + if let Some((block, receipts)) = maybe_block_and_receipts { + tracing::debug!( + "found block at height {:?}, sending to return channel", + next_height + ); + let res = self.serializer.serialize_block(&block, &receipts); let block = try_or_continue!(res); let event = BlockSourceEvent::OldBlock(BlockHeight::from(*next_height), block); let res = self.block_return_sender.send(event).await; try_or_continue!(res); self.next_height = BlockHeight::from((*next_height).saturating_add(1)); + TaskNextAction::Continue } else { - tracing::warn!("no block found at height {:?}, retrying", next_height); - self.go_to_sleep_before_continuing().await; + tracing::error!("no block found at height {:?}, retrying", next_height); + TaskNextAction::Stop } - TaskNextAction::Continue } async fn shutdown(self) -> anyhow::Result<()> { @@ -153,14 +178,15 @@ where } #[async_trait::async_trait] -impl RunnableService for SyncTask +impl RunnableService + for SyncTask where Serializer: BlockSerializer + Send + Sync + 'static, ::Block: Send + Sync + 'static, DB: Send + Sync + 'static, - DB: StorageInspect + Send + 'static, - DB: StorageInspect + Send + 'static, - E: std::fmt::Debug + Send, + DB: StorageInspect, + DB: StorageInspect, + Receipts: TxReceipts, { const NAME: &'static str = "BlockSourceSyncTask"; type SharedData = (); diff --git a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs index 64d0256dbae..9f2570d546e 100644 --- a/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs +++ b/crates/services/block_aggregator_api/src/blocks/importer_and_db_source/tests.rs @@ -1,8 +1,6 @@ #![allow(non_snake_case)] use super::*; -use crate::blocks::Block; -use ::postcard::to_allocvec; use fuel_core_services::stream::{ IntoBoxStream, pending, @@ -18,12 +16,17 @@ use fuel_core_storage::{ }, }; use futures::StreamExt; -use std::collections::HashSet; +use std::collections::HashMap; +use crate::blocks::importer_and_db_source::{ + serializer_adapter::SerializerAdapter, + sync_service::TxReceipts, +}; use fuel_core_types::{ blockchain::SealedBlock, fuel_tx::{ Transaction, + TxId, UniqueIdentifier, }, fuel_types::ChainId, @@ -31,26 +34,28 @@ use fuel_core_types::{ }; use std::sync::Arc; -#[derive(Clone)] -pub struct MockSerializer; - -impl BlockSerializer for MockSerializer { - type Block = Block; +fn onchain_db() -> StorageTransaction> { + InMemoryStorage::default().into_transaction() +} - fn serialize_block(&self, block: &FuelBlock) -> Result { - let bytes_vec = to_allocvec(block).map_err(|e| { - Error::BlockSource(anyhow!("failed to serialize block: {}", e)) - })?; - Ok(Block::from(bytes_vec)) - } +struct MockTxReceiptsSource { + receipts_map: HashMap>, } -fn database() -> StorageTransaction> { - InMemoryStorage::default().into_transaction() +impl MockTxReceiptsSource { + fn new(receipts: &[(TxId, Vec)]) -> Self { + let receipts_map = receipts.iter().cloned().collect(); + Self { receipts_map } + } } -fn stream_with_pending(items: Vec) -> BoxStream { - tokio_stream::iter(items).chain(pending()).into_boxed() +impl TxReceipts for MockTxReceiptsSource { + async fn get_receipts(&self, tx_id: &TxId) -> Result> { + let receipts = self.receipts_map.get(tx_id).cloned().ok_or_else(|| { + Error::BlockSource(anyhow!("no receipts found for a tx with id {}", tx_id)) + })?; + Ok(receipts) + } } #[tokio::test] @@ -69,22 +74,26 @@ async fn next_block__gets_new_block_from_importer() { ); let blocks: Vec = vec![import_result]; let block_stream = tokio_stream::iter(blocks).chain(pending()).into_boxed(); - let serializer = MockSerializer; - let db = database(); + let serializer = SerializerAdapter; + let db = onchain_db(); + let receipt_source = MockTxReceiptsSource::new(&[]); let db_starting_height = BlockHeight::from(0u32); + // we don't need to sync anything, so we can use the same height for both + let db_ending_height = db_starting_height; let mut adapter = ImporterAndDbSource::new( block_stream, serializer.clone(), db, + receipt_source, db_starting_height, - None, + db_ending_height, ); // when let actual = adapter.next_block().await.unwrap(); // then - let serialized = serializer.serialize_block(&block.entity).unwrap(); + let serialized = serializer.serialize_block(&block.entity, &[]).unwrap(); let expected = BlockSourceEvent::NewBlock(*height, serialized); assert_eq!(expected, actual); } @@ -97,6 +106,25 @@ fn arbitrary_block_with_txs(height: BlockHeight) -> FuelBlock { block } +fn arbitrary_receipts() -> Vec { + let one = FuelReceipt::Mint { + sub_id: Default::default(), + contract_id: Default::default(), + val: 100, + pc: 0, + is: 0, + }; + let two = FuelReceipt::Transfer { + id: Default::default(), + to: Default::default(), + amount: 50, + asset_id: Default::default(), + pc: 0, + is: 0, + }; + vec![one, two] +} + #[tokio::test] async fn next_block__can_get_block_from_db() { // given @@ -104,14 +132,16 @@ async fn next_block__can_get_block_from_db() { let height1 = BlockHeight::from(0u32); let height2 = BlockHeight::from(1u32); let block = arbitrary_block_with_txs(height1); + let receipts = arbitrary_receipts(); let height = block.header().height(); - let serializer = MockSerializer; - let mut db = database(); - let mut tx = db.write_transaction(); + let serializer = SerializerAdapter; + let mut onchain_db = onchain_db(); + let mut tx = onchain_db.write_transaction(); let compressed_block = block.compress(&chain_id); tx.storage_as_mut::() .insert(height, &compressed_block) .unwrap(); + let tx_id = block.transactions()[0].id(&chain_id); tx.storage_as_mut::() .insert( &block.transactions()[0].id(&chain_id), @@ -119,13 +149,15 @@ async fn next_block__can_get_block_from_db() { ) .unwrap(); tx.commit().unwrap(); + let receipt_source = MockTxReceiptsSource::new(&[(tx_id, receipts.clone())]); let block_stream = tokio_stream::pending().into_boxed(); let db_starting_height = *height; - let db_ending_height = Some(height2); + let db_ending_height = height2; let mut adapter = ImporterAndDbSource::new( block_stream, serializer.clone(), - db, + onchain_db, + receipt_source, db_starting_height, db_ending_height, ); @@ -134,107 +166,7 @@ async fn next_block__can_get_block_from_db() { let actual = adapter.next_block().await.unwrap(); // then - let serialized = serializer.serialize_block(&block).unwrap(); + let serialized = serializer.serialize_block(&block, &receipts).unwrap(); let expected = BlockSourceEvent::OldBlock(*height, serialized); assert_eq!(expected, actual); } - -#[tokio::test] -async fn next_block__will_sync_blocks_from_db_after_receiving_height_from_new_end() { - // given - let chain_id = ChainId::default(); - let height1 = BlockHeight::from(0u32); - let height2 = BlockHeight::from(1u32); - let height3 = BlockHeight::from(2u32); - let block1 = arbitrary_block_with_txs(height1); - let block2 = arbitrary_block_with_txs(height2); - let serializer = MockSerializer; - let mut db = database(); - let mut tx = db.write_transaction(); - let compressed_block = block1.compress(&chain_id); - tx.storage_as_mut::() - .insert(&height1, &compressed_block) - .unwrap(); - tx.storage_as_mut::() - .insert( - &block1.transactions()[0].id(&chain_id), - &block1.transactions()[0], - ) - .unwrap(); - tx.commit().unwrap(); - let mut tx = db.write_transaction(); - let compressed_block = block2.compress(&chain_id); - tx.storage_as_mut::() - .insert(&height2, &compressed_block) - .unwrap(); - tx.storage_as_mut::() - .insert( - &block2.transactions()[0].id(&chain_id), - &block2.transactions()[0], - ) - .unwrap(); - tx.commit().unwrap(); - - // Add the imported block to db as well as streaming - let block3 = arbitrary_block_with_txs(height3); - let mut tx = db.write_transaction(); - let compressed_block = block3.compress(&chain_id); - tx.storage_as_mut::() - .insert(&height3, &compressed_block) - .unwrap(); - tx.storage_as_mut::() - .insert( - &block3.transactions()[0].id(&chain_id), - &block3.transactions()[0], - ) - .unwrap(); - tx.commit().unwrap(); - - let sealed_block = SealedBlock { - entity: block3.clone(), - consensus: Default::default(), - }; - let import_result = Arc::new( - ImportResult { - sealed_block, - tx_status: vec![], - events: vec![], - source: Default::default(), - } - .wrap(), - ); - let blocks: Vec = vec![import_result]; - let block_stream = stream_with_pending(blocks); - let db_starting_height = height1; - let mut adapter = ImporterAndDbSource::new( - block_stream, - serializer.clone(), - db, - db_starting_height, - None, - ); - - // when - let actual1 = adapter.next_block().await.unwrap(); - let actual2 = adapter.next_block().await.unwrap(); - let actual3 = adapter.next_block().await.unwrap(); - - // then - let actual = vec![actual1, actual2, actual3] - .into_iter() - .collect::>(); - // should receive the - let expected = vec![ - BlockSourceEvent::OldBlock(height1, serializer.serialize_block(&block1).unwrap()), - BlockSourceEvent::OldBlock(height2, serializer.serialize_block(&block2).unwrap()), - BlockSourceEvent::NewBlock(height3, serializer.serialize_block(&block3).unwrap()), - ]; - let expected: HashSet<_> = expected.into_iter().collect(); - let length = actual.len(); - let expected_length = expected.len(); - for event in &actual { - tracing::debug!("actual event: {:?}", event); - } - assert_eq!(length, expected_length); - assert_eq!(expected, actual); -} diff --git a/crates/services/block_aggregator_api/src/db.rs b/crates/services/block_aggregator_api/src/db.rs index d664bd13932..7e326bdc737 100644 --- a/crates/services/block_aggregator_api/src/db.rs +++ b/crates/services/block_aggregator_api/src/db.rs @@ -1,8 +1,15 @@ -use crate::result::Result; +use crate::{ + blocks::BlockSourceEvent, + result::Result, +}; use fuel_core_types::fuel_types::BlockHeight; +pub mod remote_cache; pub mod storage_db; +pub mod storage_or_remote_db; +pub mod table; + /// The definition of the block aggregator database. pub trait BlockAggregatorDB: Send + Sync { type Block; @@ -12,8 +19,7 @@ pub trait BlockAggregatorDB: Send + Sync { /// Stores a block with the given ID fn store_block( &mut self, - height: BlockHeight, - block: Self::Block, + block: BlockSourceEvent, ) -> impl Future> + Send; /// Retrieves a range of blocks from the database @@ -26,5 +32,7 @@ pub trait BlockAggregatorDB: Send + Sync { /// Retrieves the current height of the aggregated blocks If there is a break in the blocks, /// i.e. the blocks are being aggregated out of order, return the height of the last /// contiguous block - fn get_current_height(&self) -> impl Future> + Send; + fn get_current_height( + &self, + ) -> impl Future>> + Send; } diff --git a/crates/services/block_aggregator_api/src/db/remote_cache.rs b/crates/services/block_aggregator_api/src/db/remote_cache.rs new file mode 100644 index 00000000000..ae0e4b03173 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/remote_cache.rs @@ -0,0 +1,256 @@ +use crate::{ + block_range_response::BlockRangeResponse, + blocks::BlockSourceEvent, + db::{ + BlockAggregatorDB, + table::{ + LatestBlock, + Mode, + }, + }, + protobuf_types::Block as ProtoBlock, + result::Error, +}; +use anyhow::anyhow; +use aws_sdk_s3::{ + self, + Client, + primitives::ByteStream, +}; +use flate2::{ + Compression, + write::GzEncoder, +}; +use fuel_core_storage::{ + Error as StorageError, + StorageAsMut, + StorageAsRef, + StorageInspect, + StorageMutate, + transactional::{ + Modifiable, + StorageTransaction, + WriteTransaction, + }, +}; +use fuel_core_types::fuel_types::BlockHeight; +use prost::Message; +use std::io::Write; + +#[allow(non_snake_case)] +#[cfg(test)] +mod tests; + +#[allow(unused)] +pub struct RemoteCache { + // aws configuration + aws_bucket: String, + requester_pays: bool, + aws_endpoint: Option, + client: Client, + publishes_blocks: bool, + + // track consistency between runs + local_persisted: S, + sync_from: BlockHeight, + highest_new_height: Option, + orphaned_new_height: Option, + synced: bool, +} + +impl RemoteCache { + #[allow(clippy::too_many_arguments)] + pub async fn new( + aws_bucket: String, + requester_pays: bool, + aws_endpoint: Option, + client: Client, + local_persisted: S, + sync_from: BlockHeight, + publish: bool, + ) -> RemoteCache { + RemoteCache { + aws_bucket, + requester_pays, + aws_endpoint, + client, + publishes_blocks: publish, + local_persisted, + sync_from, + highest_new_height: None, + orphaned_new_height: None, + synced: false, + } + } + + fn stream_blocks( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> crate::result::Result { + let bucket = self.aws_bucket.clone(); + let requester_pays = self.requester_pays; + let aws_endpoint = self.aws_endpoint.clone(); + let stream = futures::stream::iter((*first..=*last).map(move |height| { + let block_height = BlockHeight::new(height); + let key = block_height_to_key(&block_height); + let res = crate::block_range_response::RemoteS3Response { + bucket: bucket.clone(), + key: key.clone(), + requester_pays, + aws_endpoint: aws_endpoint.clone(), + }; + (block_height, res) + })); + Ok(BlockRangeResponse::S3(Box::pin(stream))) + } +} + +impl BlockAggregatorDB for RemoteCache +where + S: Send + Sync, + S: Modifiable, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, +{ + type Block = ProtoBlock; + type BlockRangeResponse = BlockRangeResponse; + + async fn store_block( + &mut self, + block_event: BlockSourceEvent, + ) -> crate::result::Result<()> { + let (height, block) = block_event.clone().into_inner(); + let key = block_height_to_key(&height); + let mut buf = Vec::new(); + block.encode(&mut buf).map_err(Error::db_error)?; + let zipped = gzip_bytes(&buf)?; + let body = ByteStream::from(zipped); + if self.publishes_blocks { + let req = self + .client + .put_object() + .bucket(&self.aws_bucket) + .key(&key) + .body(body) + .content_encoding("gzip") + .content_type("application/grpc-web"); + let _ = req.send().await.map_err(Error::db_error)?; + } + match block_event { + BlockSourceEvent::NewBlock(new_height, _) => { + tracing::debug!("New block: {:?}", new_height); + self.highest_new_height = Some(new_height); + if self.synced { + tracing::debug!("Updating latest block to {:?}", new_height); + let mut tx = self.local_persisted.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &Mode::new_s3(new_height)) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if new_height == self.sync_from + || self.height_is_next_height(new_height)? + { + tracing::debug!("Updating latest block to {:?}", new_height); + self.synced = true; + self.highest_new_height = Some(new_height); + self.orphaned_new_height = None; + let mut tx = self.local_persisted.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &Mode::new_s3(new_height)) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if self.orphaned_new_height.is_none() { + tracing::info!("Marking block as orphaned: {:?}", new_height); + self.orphaned_new_height = Some(new_height); + } + } + BlockSourceEvent::OldBlock(height, _) => { + tracing::debug!("Old block: {:?}", height); + let mut tx = self.local_persisted.write_transaction(); + let latest_height = if height.succ() == self.orphaned_new_height { + tracing::debug!("Marking block as synced: {:?}", height); + self.orphaned_new_height = None; + self.synced = true; + self.highest_new_height.unwrap_or(height) + } else { + tracing::debug!("Updating latest block to {:?}", height); + height + }; + tx.storage_as_mut::() + .insert(&(), &Mode::new_s3(latest_height)) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } + } + Ok(()) + } + + async fn get_block_range( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> crate::result::Result { + let current_height = self + .get_current_height() + .await? + .unwrap_or(BlockHeight::new(0)); + if last > current_height { + Err(Error::db_error(anyhow!( + "Requested block height {} is greater than current synced height {}", + last, + current_height + ))) + } else { + self.stream_blocks(first, last) + } + } + + async fn get_current_height(&self) -> crate::result::Result> { + tracing::debug!("Getting current height from local cache"); + let height = self + .local_persisted + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + + Ok(height.map(|b| b.height())) + } +} + +impl RemoteCache +where + S: Send + Sync, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, +{ + fn height_is_next_height(&self, height: BlockHeight) -> crate::result::Result { + let maybe_latest_height = self + .local_persisted + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))? + .map(|m| m.height()); + if let Some(latest_height) = maybe_latest_height { + Ok(latest_height.succ() == Some(height)) + } else { + Ok(false) + } + } +} + +pub fn block_height_to_key(height: &BlockHeight) -> String { + let raw: [u8; 4] = height.to_bytes(); + format!( + "{:02}/{:02}/{:02}/{:02}", + &raw[0], &raw[1], &raw[2], &raw[3] + ) +} + +pub fn gzip_bytes(data: &[u8]) -> crate::result::Result> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(data).map_err(Error::db_error)?; + encoder.finish().map_err(Error::db_error) +} diff --git a/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs new file mode 100644 index 00000000000..ec444a2bb70 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/remote_cache/tests.rs @@ -0,0 +1,248 @@ +use super::*; +use crate::{ + block_range_response::RemoteS3Response, + blocks::importer_and_db_source::{ + BlockSerializer, + serializer_adapter::SerializerAdapter, + }, + db::table::{ + Column, + Mode, + }, +}; +use aws_sdk_s3::operation::put_object::PutObjectOutput; +use aws_smithy_mocks::{ + Rule, + mock, + mock_client, +}; +use fuel_core_storage::{ + structured_storage::test::InMemoryStorage, + transactional::{ + IntoTransaction, + StorageTransaction, + }, +}; +use fuel_core_types::blockchain::block::Block as FuelBlock; +use futures::StreamExt; +use std::iter; + +fn database() -> StorageTransaction> { + InMemoryStorage::default().into_transaction() +} + +fn arb_proto_block() -> ProtoBlock { + let block = FuelBlock::default(); + let serializer = SerializerAdapter; + serializer.serialize_block(&block, &[]).unwrap() +} +fn put_happy_rule() -> Rule { + mock!(Client::put_object) + .match_requests(|req| req.bucket() == Some("test-bucket")) + .sequence() + .output(|| PutObjectOutput::builder().build()) + .build() +} + +#[tokio::test] +async fn store_block__happy_path() { + // given + let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); + let aws_bucket = "test-bucket".to_string(); + let storage = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from, true).await; + let block_height = BlockHeight::new(123); + let block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(block_height, block); + + // when + let res = adapter.store_block(block).await; + + // then + assert!(res.is_ok()); +} + +#[tokio::test] +async fn get_block_range__happy_path() { + // given + let client = mock_client!(aws_sdk_s3, []); + let aws_bucket = "test-bucket".to_string(); + let storage = database(); + let sync_from = BlockHeight::new(0); + let adapter = RemoteCache::new( + aws_bucket.clone(), + false, + None, + client, + storage, + sync_from, + true, + ) + .await; + let start = BlockHeight::new(999); + let end = BlockHeight::new(1003); + + // when + let addresses = adapter.get_block_range(start, end).await.unwrap(); + + // then + let actual = match addresses { + BlockRangeResponse::Literal(_) => { + panic!("Expected remote response, got literal"); + } + BlockRangeResponse::S3(stream) => stream.collect::>().await, + }; + let expected = (999..=1003) + .map(|height| { + let key = block_height_to_key(&BlockHeight::new(height)); + let res = RemoteS3Response { + bucket: aws_bucket.clone(), + key, + requester_pays: false, + aws_endpoint: None, + }; + (BlockHeight::new(height), res) + }) + .collect::>(); + assert_eq!(actual, expected); +} + +#[tokio::test] +async fn get_current_height__returns_highest_continuous_block() { + // given + let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); + let aws_bucket = "test-bucket".to_string(); + let storage = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from, true).await; + + let expected = BlockHeight::new(123); + let block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(expected, block); + adapter.store_block(block).await.unwrap(); + + // when + let actual = adapter.get_current_height().await.unwrap().unwrap(); + + // then + assert_eq!(expected, actual); +} + +#[tokio::test] +async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { + // given + let mut storage = database(); + let mut tx = storage.write_transaction(); + let starting_height = BlockHeight::from(1u32); + tx.storage_as_mut::() + .insert(&(), &Mode::new_s3(starting_height)) + .unwrap(); + tx.commit().unwrap(); + let client = mock_client!(aws_sdk_s3, [&put_happy_rule()]); + let aws_bucket = "test-bucket".to_string(); + let sync_from = BlockHeight::new(0); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, client, storage, sync_from, true).await; + + let expected = BlockHeight::new(3); + let block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(expected, block); + adapter.store_block(block).await.unwrap(); + + // when + let expected = starting_height; + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); +} + +#[tokio::test] +async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { + let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); + let client = mock_client!(aws_sdk_s3, rules.iter()); + let aws_bucket = "test-bucket".to_string(); + + // given + let db = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, client, db, sync_from, true).await; + + for height in 2..=10u32 { + let height = BlockHeight::from(height); + let block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(height, block.clone()); + adapter.store_block(block).await.unwrap(); + } + // when + let height = BlockHeight::from(1u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(10u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced) +} + +#[tokio::test] +async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() { + let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); + let client = mock_client!(aws_sdk_s3, rules.iter()); + let aws_bucket = "test-bucket".to_string(); + + // given + let db = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, client, db, sync_from, true).await; + + let height = BlockHeight::from(0u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // when + let height = BlockHeight::from(1u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(1u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced) +} + +#[tokio::test] +async fn store_block__new_block_comes_first() { + let rules: Vec<_> = iter::repeat_with(put_happy_rule).take(10).collect(); + let client = mock_client!(aws_sdk_s3, rules.iter()); + let aws_bucket = "test-bucket".to_string(); + + // given + let db = database(); + let sync_from = BlockHeight::new(0); + let mut adapter = + RemoteCache::new(aws_bucket, false, None, client, db, sync_from, true).await; + + // when + let height = BlockHeight::from(0u32); + let some_block = arb_proto_block(); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(0u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced); +} diff --git a/crates/services/block_aggregator_api/src/db/storage_db.rs b/crates/services/block_aggregator_api/src/db/storage_db.rs index 7aeac0a91d1..8f57c925eb2 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db.rs @@ -1,8 +1,14 @@ use crate::{ block_range_response::BlockRangeResponse, + blocks::BlockSourceEvent, db::{ BlockAggregatorDB, - storage_db::table::Column, + table::{ + Blocks, + Column, + LatestBlock, + Mode, + }, }, protobuf_types::Block as ProtoBlock, result::{ @@ -29,78 +35,44 @@ use fuel_core_storage::{ }; use fuel_core_types::fuel_types::BlockHeight; use std::{ - cmp::Ordering, - collections::BTreeSet, pin::Pin, task::{ Context, Poll, }, }; -use table::Blocks; -pub mod table; #[cfg(test)] mod tests; pub struct StorageDB { - highest_contiguous_block: BlockHeight, - orphaned_heights: BTreeSet, + highest_new_height: Option, + orphaned_new_height: Option, + synced: bool, + sync_from: BlockHeight, storage: S, } impl StorageDB { - pub fn new(storage: S) -> Self { - let height = BlockHeight::new(0); - Self::new_with_height(storage, height) - } - - pub fn new_with_height(storage: S, highest_contiguous_block: BlockHeight) -> Self { - let orphaned_heights = BTreeSet::new(); + pub fn new(storage: S, sync_from: BlockHeight) -> Self { Self { - highest_contiguous_block, - orphaned_heights, + highest_new_height: None, + orphaned_new_height: None, + synced: false, + sync_from, storage, } } - - fn update_highest_contiguous_block(&mut self, height: BlockHeight) { - let next_height = self.next_height(); - match height.cmp(&next_height) { - Ordering::Equal => { - self.highest_contiguous_block = height; - while let Some(next_height) = self.orphaned_heights.first() { - if next_height == &self.next_height() { - self.highest_contiguous_block = *next_height; - let _ = self.orphaned_heights.pop_first(); - } else { - break; - } - } - } - Ordering::Greater => { - self.orphaned_heights.insert(height); - } - Ordering::Less => { - tracing::warn!( - "Received block at height {:?}, but the syncing is already at height {:?}. Ignoring block.", - height, - self.highest_contiguous_block - ); - } - } - } - fn next_height(&self) -> BlockHeight { - let last_height = *self.highest_contiguous_block; - BlockHeight::new(last_height.saturating_add(1)) - } } impl BlockAggregatorDB for StorageDB where S: Modifiable + std::fmt::Debug, S: KeyValueInspect, + S: StorageInspect, for<'b> StorageTransaction<&'b mut S>: StorageMutate, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, S: AtomicView, T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, StorageTransaction: StorageInspect, @@ -110,15 +82,55 @@ where async fn store_block( &mut self, - height: BlockHeight, - block: ProtoBlock, + block_event: BlockSourceEvent, ) -> Result<()> { - self.update_highest_contiguous_block(height); + let (height, block) = block_event.clone().into_inner(); let mut tx = self.storage.write_transaction(); tx.storage_as_mut::() .insert(&height, &block) .map_err(|e| Error::DB(anyhow!(e)))?; tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + + match block_event { + BlockSourceEvent::NewBlock(new_height, _) => { + tracing::debug!("New block: {:?}", new_height); + self.highest_new_height = Some(new_height); + if self.synced { + let mut tx = self.storage.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &Mode::Local(new_height)) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if new_height == self.sync_from + || self.height_is_next_height(new_height)? + { + let mut tx = self.storage.write_transaction(); + self.synced = true; + self.highest_new_height = Some(new_height); + tx.storage_as_mut::() + .insert(&(), &Mode::Local(new_height)) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } else if self.orphaned_new_height.is_none() { + self.orphaned_new_height = Some(new_height); + } + } + BlockSourceEvent::OldBlock(height, _) => { + tracing::debug!("Old block: {:?}", height); + let latest_height = if height.succ() == self.orphaned_new_height { + self.orphaned_new_height = None; + self.synced = true; + self.highest_new_height.unwrap_or(height) + } else { + height + }; + let mut tx = self.storage.write_transaction(); + tx.storage_as_mut::() + .insert(&(), &Mode::Local(latest_height)) + .map_err(|e| Error::DB(anyhow!(e)))?; + tx.commit().map_err(|e| Error::DB(anyhow!(e)))?; + } + } Ok(()) } @@ -135,11 +147,41 @@ where Ok(BlockRangeResponse::Literal(Box::pin(stream))) } - async fn get_current_height(&self) -> Result { - Ok(self.highest_contiguous_block) + async fn get_current_height(&self) -> Result> { + let height = self + .storage + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))?; + + Ok(height.map(|b| b.height())) } } +impl StorageDB +where + S: Modifiable + std::fmt::Debug, + S: KeyValueInspect, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, +{ + fn height_is_next_height(&self, height: BlockHeight) -> Result { + let maybe_latest_height = self + .storage + .storage_as_ref::() + .get(&()) + .map_err(|e| Error::DB(anyhow!(e)))? + .map(|m| m.height()); + if let Some(latest_height) = maybe_latest_height { + Ok(latest_height.succ() == Some(height)) + } else { + Ok(false) + } + } +} pub struct StorageStream { inner: S, next: Option, @@ -161,7 +203,7 @@ where S: Unpin + ReadTransaction + std::fmt::Debug, for<'a> StorageTransaction<&'a S>: StorageInspect, { - type Item = ProtoBlock; + type Item = (BlockHeight, ProtoBlock); fn poll_next( self: Pin<&mut Self>, @@ -187,7 +229,7 @@ where None }; this.next = next; - Poll::Ready(Some(block.into_owned())) + Poll::Ready(Some((height, block.into_owned()))) } Ok(None) => { tracing::debug!("No block at height: {:?}", height); diff --git a/crates/services/block_aggregator_api/src/db/storage_db/table.rs b/crates/services/block_aggregator_api/src/db/storage_db/table.rs deleted file mode 100644 index be11785c7af..00000000000 --- a/crates/services/block_aggregator_api/src/db/storage_db/table.rs +++ /dev/null @@ -1,64 +0,0 @@ -use crate::protobuf_types::Block as ProtoBlock; -use fuel_core_storage::{ - Mappable, - blueprint::plain::Plain, - codec::postcard::Postcard, - kv_store::StorageColumn, - structured_storage::TableWithBlueprint, -}; -use fuel_core_types::fuel_types::BlockHeight; - -#[repr(u32)] -#[derive( - Copy, - Clone, - Debug, - strum_macros::EnumCount, - strum_macros::IntoStaticStr, - PartialEq, - Eq, - enum_iterator::Sequence, - Hash, - num_enum::TryFromPrimitive, -)] -pub enum Column { - Metadata = 0, - Blocks = 1, -} - -impl Column { - pub const COUNT: usize = ::COUNT; - - pub fn as_u32(&self) -> u32 { - *self as u32 - } -} - -impl StorageColumn for Column { - fn name(&self) -> String { - let str: &str = self.into(); - str.to_string() - } - - fn id(&self) -> u32 { - self.as_u32() - } -} - -pub struct Blocks; - -impl Mappable for Blocks { - type Key = Self::OwnedKey; - type OwnedKey = BlockHeight; - type Value = Self::OwnedValue; - type OwnedValue = ProtoBlock; -} - -impl TableWithBlueprint for Blocks { - type Blueprint = Plain; - type Column = Column; - - fn column() -> Self::Column { - Column::Blocks - } -} diff --git a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs index 593839e406a..0b116b4a246 100644 --- a/crates/services/block_aggregator_api/src/db/storage_db/tests.rs +++ b/crates/services/block_aggregator_api/src/db/storage_db/tests.rs @@ -6,7 +6,10 @@ use crate::{ BlockSerializer, serializer_adapter::SerializerAdapter, }, - db::storage_db::table::Column, + db::table::{ + Column, + Mode, + }, }; use fuel_core_storage::{ StorageAsRef, @@ -28,19 +31,22 @@ fn proto_block_with_height(height: BlockHeight) -> ProtoBlock { let serializer_adapter = SerializerAdapter; let mut default_block = FuelBlock::::default(); default_block.header_mut().set_block_height(height); - serializer_adapter.serialize_block(&default_block).unwrap() + serializer_adapter + .serialize_block(&default_block, &[]) + .unwrap() } #[tokio::test] async fn store_block__adds_to_storage() { // given let db = database(); - let mut adapter = StorageDB::new(db); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); let expected = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, expected.clone()); // when - adapter.store_block(height, expected.clone()).await.unwrap(); + adapter.store_block(block).await.unwrap(); // then let actual = adapter @@ -78,7 +84,7 @@ async fn get_block__can_get_expected_range() { tx.commit().unwrap(); let db = db.commit().unwrap(); let tx = db.into_transaction(); - let adapter = StorageDB::new(tx); + let adapter = StorageDB::new(tx, BlockHeight::from(0u32)); // when let BlockRangeResponse::Literal(stream) = @@ -89,41 +95,48 @@ async fn get_block__can_get_expected_range() { let actual = stream.collect::>().await; // then - assert_eq!(actual, vec![expected_2, expected_3]); + assert_eq!(actual, vec![(height_2, expected_2), (height_3, expected_3)]); } #[tokio::test] async fn store_block__updates_the_highest_continuous_block_if_contiguous() { // given let db = database(); - let mut adapter = StorageDB::new_with_height(db, BlockHeight::from(0u32)); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); let height = BlockHeight::from(1u32); let expected = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, expected.clone()); // when - adapter.store_block(height, expected.clone()).await.unwrap(); + adapter.store_block(block).await.unwrap(); // then let expected = height; - let actual = adapter.get_current_height().await.unwrap(); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } #[tokio::test] async fn store_block__does_not_update_the_highest_continuous_block_if_not_contiguous() { // given - let db = database(); - let starting_height = BlockHeight::from(0u32); - let mut adapter = StorageDB::new_with_height(db, starting_height); - let height = BlockHeight::from(2u32); - let expected = proto_block_with_height(height); + let mut db = database(); + let mut tx = db.write_transaction(); + let starting_height = BlockHeight::from(1u32); + tx.storage_as_mut::() + .insert(&(), &Mode::Local(starting_height)) + .unwrap(); + tx.commit().unwrap(); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); + let height = BlockHeight::from(3u32); + let proto = proto_block_with_height(height); + let block = BlockSourceEvent::NewBlock(height, proto.clone()); // when - adapter.store_block(height, expected.clone()).await.unwrap(); + adapter.store_block(block).await.unwrap(); // then let expected = starting_height; - let actual = adapter.get_current_height().await.unwrap(); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); } @@ -131,30 +144,66 @@ async fn store_block__does_not_update_the_highest_continuous_block_if_not_contig async fn store_block__updates_the_highest_continuous_block_if_filling_a_gap() { // given let db = database(); - let starting_height = BlockHeight::from(0u32); - let mut adapter = StorageDB::new_with_height(db, starting_height); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); - let mut orphaned_height = None; for height in 2..=10u32 { let height = BlockHeight::from(height); - orphaned_height = Some(height); let block = proto_block_with_height(height); - adapter.store_block(height, block).await.unwrap(); + let block = BlockSourceEvent::NewBlock(height, block.clone()); + adapter.store_block(block).await.unwrap(); } - let expected = starting_height; - let actual = adapter.get_current_height().await.unwrap(); + // when + let height = BlockHeight::from(1u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(10u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); +} +#[tokio::test] +async fn store_block__new_block_updates_the_highest_continuous_block_if_synced() { + // given + let db = database(); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); + + let height = BlockHeight::from(0u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::OldBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); // when let height = BlockHeight::from(1u32); let some_block = proto_block_with_height(height); - adapter - .store_block(height, some_block.clone()) - .await - .unwrap(); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); // then - let expected = orphaned_height.unwrap(); - let actual = adapter.get_current_height().await.unwrap(); + let expected = BlockHeight::from(1u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); assert_eq!(expected, actual); + + assert!(adapter.synced) +} + +#[tokio::test] +async fn store_block__new_block_comes_first() { + // given + let db = database(); + let mut adapter = StorageDB::new(db, BlockHeight::from(0u32)); + + // when + let height = BlockHeight::from(0u32); + let some_block = proto_block_with_height(height); + let block = BlockSourceEvent::NewBlock(height, some_block.clone()); + adapter.store_block(block).await.unwrap(); + + // then + let expected = BlockHeight::from(0u32); + let actual = adapter.get_current_height().await.unwrap().unwrap(); + assert_eq!(expected, actual); + + assert!(adapter.synced); } diff --git a/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs new file mode 100644 index 00000000000..b501e801678 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/storage_or_remote_db.rs @@ -0,0 +1,139 @@ +use crate::{ + block_range_response::BlockRangeResponse, + blocks::BlockSourceEvent, + db::{ + BlockAggregatorDB, + remote_cache::RemoteCache, + storage_db::StorageDB, + table::{ + Blocks, + Column, + LatestBlock, + }, + }, + result::Result, +}; +use aws_config::{ + BehaviorVersion, + default_provider::credentials::DefaultCredentialsChain, +}; + +use fuel_core_storage::{ + Error as StorageError, + StorageInspect, + StorageMutate, + kv_store::KeyValueInspect, + transactional::{ + AtomicView, + Modifiable, + StorageTransaction, + }, +}; +use fuel_core_types::fuel_types::BlockHeight; + +/// A union of a storage and a remote cache for the block aggregator. This allows both to be +/// supported in production depending on the configuration +pub enum StorageOrRemoteDB { + Remote(RemoteCache), + Storage(StorageDB), +} + +impl StorageOrRemoteDB { + pub fn new_storage(storage: S, sync_from: BlockHeight) -> Self { + StorageOrRemoteDB::Storage(StorageDB::new(storage, sync_from)) + } + + #[allow(clippy::too_many_arguments)] + pub async fn new_s3( + storage: S, + aws_bucket: &str, + requester_pays: bool, + aws_endpoint_url: Option, + sync_from: BlockHeight, + publish: bool, + ) -> Self { + let credentials = DefaultCredentialsChain::builder().build().await; + let sdk_config = aws_config::defaults(BehaviorVersion::latest()) + .credentials_provider(credentials) + .load() + .await; + let mut config_builder = aws_sdk_s3::config::Builder::from(&sdk_config); + if let Some(endpoint) = &aws_endpoint_url { + config_builder.set_endpoint_url(Some(endpoint.to_string())); + } + let config = config_builder.force_path_style(true).build(); + let client = aws_sdk_s3::Client::from_conf(config); + let remote_cache = RemoteCache::new( + aws_bucket.to_string(), + requester_pays, + aws_endpoint_url, + client, + storage, + sync_from, + publish, + ) + .await; + StorageOrRemoteDB::Remote(remote_cache) + } +} + +impl BlockAggregatorDB for StorageOrRemoteDB +where + // Storage Constraints + S: Modifiable + std::fmt::Debug, + S: KeyValueInspect, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: StorageMutate, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + std::fmt::Debug, + StorageTransaction: StorageInspect, + // Remote Constraints + S: Send + Sync, + S: Modifiable, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, +{ + type Block = crate::protobuf_types::Block; + type BlockRangeResponse = BlockRangeResponse; + + async fn store_block(&mut self, block: BlockSourceEvent) -> Result<()> { + match self { + StorageOrRemoteDB::Remote(remote_db) => remote_db.store_block(block).await?, + StorageOrRemoteDB::Storage(storage_db) => { + storage_db.store_block(block).await? + } + } + Ok(()) + } + + async fn get_block_range( + &self, + first: BlockHeight, + last: BlockHeight, + ) -> Result { + let range_response = match self { + StorageOrRemoteDB::Remote(remote_db) => { + remote_db.get_block_range(first, last).await? + } + StorageOrRemoteDB::Storage(storage_db) => { + storage_db.get_block_range(first, last).await? + } + }; + Ok(range_response) + } + + async fn get_current_height(&self) -> Result> { + let height = match self { + StorageOrRemoteDB::Remote(remote_db) => { + remote_db.get_current_height().await? + } + StorageOrRemoteDB::Storage(storage_db) => { + storage_db.get_current_height().await? + } + }; + Ok(height) + } +} diff --git a/crates/services/block_aggregator_api/src/db/table.rs b/crates/services/block_aggregator_api/src/db/table.rs new file mode 100644 index 00000000000..1221c59c2c1 --- /dev/null +++ b/crates/services/block_aggregator_api/src/db/table.rs @@ -0,0 +1,144 @@ +use crate::protobuf_types::Block as ProtoBlock; +use fuel_core_storage::{ + Mappable, + blueprint::plain::Plain, + codec::{ + Decode, + Encode, + }, + kv_store::StorageColumn, + structured_storage::TableWithBlueprint, +}; +use fuel_core_types::fuel_types::BlockHeight; +use std::borrow::Cow; + +#[repr(u32)] +#[derive( + Copy, + Clone, + Debug, + strum_macros::EnumCount, + strum_macros::IntoStaticStr, + PartialEq, + Eq, + enum_iterator::Sequence, + Hash, + num_enum::TryFromPrimitive, +)] +pub enum Column { + Metadata = 0, + Blocks = 1, + LatestBlock = 2, +} + +impl Column { + pub const COUNT: usize = ::COUNT; + + pub fn as_u32(&self) -> u32 { + *self as u32 + } +} + +impl StorageColumn for Column { + fn name(&self) -> String { + let str: &str = self.into(); + str.to_string() + } + + fn id(&self) -> u32 { + self.as_u32() + } +} + +pub struct Blocks; + +impl Mappable for Blocks { + type Key = Self::OwnedKey; + type OwnedKey = BlockHeight; + type Value = Self::OwnedValue; + type OwnedValue = ProtoBlock; +} + +impl TableWithBlueprint for Blocks { + type Blueprint = Plain, ProtoBufCodec>; + type Column = Column; + + fn column() -> Self::Column { + Column::Blocks + } +} + +pub struct LatestBlock; + +#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)] +pub enum Mode { + Local(BlockHeight), + S3(BlockHeight), +} + +impl Mode { + pub fn new_s3(height: BlockHeight) -> Self { + Self::S3(height) + } + + pub fn new_local(height: BlockHeight) -> Self { + Self::Local(height) + } + + pub fn height(&self) -> BlockHeight { + match self { + Self::Local(height) => *height, + Self::S3(height) => *height, + } + } +} + +impl Mappable for LatestBlock { + type Key = Self::OwnedKey; + type OwnedKey = (); + type Value = Self::OwnedValue; + type OwnedValue = Mode; +} + +impl TableWithBlueprint for LatestBlock { + type Blueprint = Plain; + type Column = Column; + fn column() -> Self::Column { + Column::LatestBlock + } +} + +use fuel_core_storage::codec::{ + postcard::Postcard, + primitive::Primitive, +}; +use prost::Message; + +pub struct ProtoBufCodec; + +impl Encode for ProtoBufCodec +where + T: Sized + Message, +{ + type Encoder<'a> + = Cow<'a, [u8]> + where + T: 'a; + + fn encode(value: &T) -> Self::Encoder<'_> { + let mut buffer = Vec::new(); + value.encode(&mut buffer).expect( + "It should be impossible to fail unless serialization is not implemented, which is not true for our types.", + ); + buffer.into() + } +} + +impl Decode for ProtoBufCodec +where + T: Message + Default, +{ + fn decode(bytes: &[u8]) -> anyhow::Result { + Ok(T::decode(bytes)?) + } +} diff --git a/crates/services/block_aggregator_api/src/lib.rs b/crates/services/block_aggregator_api/src/lib.rs index e3e9057d7d7..0a231687117 100644 --- a/crates/services/block_aggregator_api/src/lib.rs +++ b/crates/services/block_aggregator_api/src/lib.rs @@ -4,14 +4,12 @@ use crate::{ db::BlockAggregatorDB, }; use fuel_core_services::{ - RunnableService, RunnableTask, StateWatcher, TaskNextAction, }; use fuel_core_types::fuel_types::BlockHeight; use protobuf_types::Block as ProtoBlock; -use std::fmt::Debug; pub mod api; pub mod blocks; @@ -20,8 +18,12 @@ pub mod result; pub mod block_range_response; +pub mod block_aggregator; pub mod protobuf_types; +#[cfg(test)] +mod tests; + pub mod integration { use crate::{ BlockAggregator, @@ -29,85 +31,297 @@ pub mod integration { BlockAggregatorApi, protobuf_adapter::ProtobufAPI, }, - blocks::importer_and_db_source::{ - BlockSerializer, - ImporterAndDbSource, + block_range_response::BlockRangeResponse, + blocks::{ + BlockSource, + importer_and_db_source::{ + BlockSerializer, + ImporterAndDbSource, + sync_service::TxReceipts, + }, + }, + db::{ + storage_or_remote_db::StorageOrRemoteDB, + table::{ + Column, + LatestBlock, + Mode, + }, }, - db::BlockAggregatorDB, protobuf_types::Block as ProtoBlock, }; + use anyhow::bail; use fuel_core_services::{ + RunnableService, ServiceRunner, + StateWatcher, stream::BoxStream, }; use fuel_core_storage::{ + Error as StorageError, + StorageAsRef, StorageInspect, + StorageMutate, + kv_store::KeyValueInspect, tables::{ FuelBlocks, Transactions, }, + transactional::{ + AtomicView, + HistoricalView, + Modifiable, + StorageTransaction, + }, }; use fuel_core_types::{ fuel_types::BlockHeight, services::block_importer::SharedImportResult, }; - use std::net::SocketAddr; + use std::{ + fmt::Debug, + net::SocketAddr, + }; #[derive(Clone, Debug)] pub struct Config { pub addr: SocketAddr, + pub api_buffer_size: usize, + pub sync_from: Option, + pub storage_method: StorageMethod, + } + + #[derive(Clone, Debug, Default)] + pub enum StorageMethod { + // Stores blocks in local DB + #[default] + Local, + // Publishes blocks to S3 bucket + S3 { + bucket: String, + endpoint_url: Option, + requester_pays: bool, + }, + // Assumes another node is publishing blocks to S3 bucket, but relaying details + S3NoPublish { + bucket: String, + endpoint_url: Option, + requester_pays: bool, + }, + } + + pub struct UninitializedTask { + api: API, + block_source: Blocks, + storage: S, + config: Config, + genesis_block_height: BlockHeight, + } + + #[async_trait::async_trait] + impl RunnableService for UninitializedTask + where + Api: BlockAggregatorApi< + Block = ProtoBlock, + BlockRangeResponse = BlockRangeResponse, + >, + Blocks: BlockSource, + // Storage Constraints + S: Modifiable + Debug, + S: KeyValueInspect, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + S: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + Debug, + StorageTransaction: + StorageInspect, + // Remote Constraints + S: Send + Sync, + S: Modifiable, + S: StorageInspect, + for<'b> StorageTransaction<&'b mut S>: + StorageMutate, + { + const NAME: &'static str = "BlockAggregatorService"; + type SharedData = (); + type Task = BlockAggregator, Blocks, Blocks::Block>; + type TaskParams = (); + + fn shared_data(&self) -> Self::SharedData {} + + async fn into_task( + self, + _state_watcher: &StateWatcher, + _params: Self::TaskParams, + ) -> anyhow::Result { + let UninitializedTask { + api, + block_source, + storage, + config, + genesis_block_height, + } = self; + let sync_from = config.sync_from.unwrap_or(genesis_block_height); + let db_adapter = match config.storage_method { + StorageMethod::Local => { + let mode = storage.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode + .clone() + .map(|c| c.into_owned()) + { + Some(Mode::S3(_)) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + _ => mode.map(|m| m.height()), + }; + let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + StorageOrRemoteDB::new_storage(storage, sync_from_height) + } + StorageMethod::S3 { + bucket, + endpoint_url, + requester_pays, + } => { + let mode = storage.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode + .clone() + .map(|c| c.into_owned()) + { + Some(Mode::Local(_)) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + _ => mode.map(|m| m.height()), + }; + let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + + let publish = true; + + StorageOrRemoteDB::new_s3( + storage, + &bucket, + requester_pays, + endpoint_url.clone(), + sync_from_height, + publish, + ) + .await + } + + StorageMethod::S3NoPublish { + bucket, + endpoint_url, + requester_pays, + } => { + let mode = storage.storage_as_ref::().get(&())?; + let maybe_sync_from_height = match mode + .clone() + .map(|c| c.into_owned()) + { + Some(Mode::Local(_)) => { + bail!( + "Database is configured in S3 mode, but Local storage method was requested. If you would like to run in S3 mode, then please use a clean DB" + ); + } + _ => mode.map(|m| m.height()), + }; + let sync_from_height = maybe_sync_from_height.unwrap_or(sync_from); + + let publish = false; + + StorageOrRemoteDB::new_s3( + storage, + &bucket, + requester_pays, + endpoint_url.clone(), + sync_from_height, + publish, + ) + .await + } + }; + Ok(BlockAggregator::new(api, db_adapter, block_source)) + } } #[allow(clippy::type_complexity)] - pub fn new_service( - config: &Config, + pub fn new_service( db: DB, serializer: S, onchain_db: OnchainDB, + receipts: Receipts, importer: BoxStream, - ) -> ServiceRunner< - BlockAggregator< - ProtobufAPI, - DB, - ImporterAndDbSource, - ProtoBlock, + config: Config, + genesis_block_height: BlockHeight, + ) -> anyhow::Result< + ServiceRunner< + UninitializedTask< + ProtobufAPI, + ImporterAndDbSource, + DB, + >, >, > where - DB: BlockAggregatorDB< - BlockRangeResponse = ::BlockRangeResponse, - Block = ProtoBlock, - >, - S: BlockSerializer + Clone + Send + Sync + 'static, + S: BlockSerializer + Clone + Send + Sync + 'static, OnchainDB: Send + Sync, - OnchainDB: StorageInspect, - OnchainDB: StorageInspect, - E: std::fmt::Debug + Send + Sync, + OnchainDB: StorageInspect, + OnchainDB: StorageInspect, + OnchainDB: HistoricalView, + Receipts: TxReceipts, + // Storage Constraints + DB: Modifiable + Debug, + DB: KeyValueInspect, + DB: StorageInspect, + for<'b> StorageTransaction<&'b mut DB>: + StorageMutate, + for<'b> StorageTransaction<&'b mut DB>: + StorageMutate, + DB: AtomicView, + T: Unpin + Send + Sync + KeyValueInspect + 'static + Debug, + StorageTransaction: + StorageInspect, + // Remote Constraints + DB: Send + Sync, + DB: Modifiable, + DB: StorageInspect, + for<'b> StorageTransaction<&'b mut DB>: + StorageMutate, { let addr = config.addr.to_string(); - let api = ProtobufAPI::new(addr); - let db_starting_height = BlockHeight::from(0); - let db_ending_height = None; + let api_buffer_size = config.api_buffer_size; + let api = ProtobufAPI::new(addr, api_buffer_size) + .map_err(|e| anyhow::anyhow!("Error creating API: {e}"))?; + let db_ending_height = onchain_db + .latest_height() + .and_then(BlockHeight::succ) + .unwrap_or(BlockHeight::from(0)); + let sync_from_height = config.sync_from.unwrap_or(genesis_block_height); let block_source = ImporterAndDbSource::new( importer, serializer, onchain_db, - db_starting_height, + receipts, + sync_from_height, db_ending_height, ); - let block_aggregator = BlockAggregator { - query: api, - database: db, + let uninitialized_task = UninitializedTask { + api, block_source, - new_block_subscriptions: Vec::new(), + storage: db, + config, + genesis_block_height, }; - ServiceRunner::new(block_aggregator) + let runner = ServiceRunner::new(uninitialized_task); + Ok(runner) } } -#[cfg(test)] -mod tests; - -pub mod block_aggregator; // TODO: this doesn't need to limited to the blocks, // but we can change the name later @@ -117,7 +331,7 @@ pub struct BlockAggregator { query: Api, database: DB, block_source: Blocks, - new_block_subscriptions: Vec>, + new_block_subscriptions: Vec>, } pub struct NewBlock { @@ -160,30 +374,3 @@ where Ok(()) } } - -#[async_trait::async_trait] -impl RunnableService - for BlockAggregator -where - Api: - BlockAggregatorApi + Send, - DB: BlockAggregatorDB + Send, - Blocks: BlockSource, - BlockRange: Send, - ::Block: Clone + Debug + Send, -{ - const NAME: &'static str = "BlockAggregatorService"; - type SharedData = (); - type Task = Self; - type TaskParams = (); - - fn shared_data(&self) -> Self::SharedData {} - - async fn into_task( - self, - _state_watcher: &StateWatcher, - _params: Self::TaskParams, - ) -> anyhow::Result { - Ok(self) - } -} diff --git a/crates/services/block_aggregator_api/src/protobuf_types.rs b/crates/services/block_aggregator_api/src/protobuf_types.rs index 648ac0e278d..256ed21a634 100644 --- a/crates/services/block_aggregator_api/src/protobuf_types.rs +++ b/crates/services/block_aggregator_api/src/protobuf_types.rs @@ -1 +1 @@ -tonic::include_proto!("blockaggregator"); +pub use fuel_core_protobuf::*; diff --git a/crates/services/block_aggregator_api/src/result.rs b/crates/services/block_aggregator_api/src/result.rs index ab91f71ece0..3a49b0b58ff 100644 --- a/crates/services/block_aggregator_api/src/result.rs +++ b/crates/services/block_aggregator_api/src/result.rs @@ -9,6 +9,22 @@ pub enum Error { DB(anyhow::Error), #[error("Serialization error: {0}")] Serialization(anyhow::Error), + #[error("Receipt error: {0}")] + Receipt(anyhow::Error), +} + +impl Error { + pub fn db_error>(err: T) -> Self { + Error::DB(err.into()) + } + + pub fn block_source_error>(err: T) -> Self { + Error::BlockSource(err.into()) + } + + pub fn receipt_error>(err: T) -> Self { + Error::Receipt(err.into()) + } } pub type Result = core::result::Result; diff --git a/crates/services/block_aggregator_api/src/tests.rs b/crates/services/block_aggregator_api/src/tests.rs index d8b9a8744e5..dc00e5a0efe 100644 --- a/crates/services/block_aggregator_api/src/tests.rs +++ b/crates/services/block_aggregator_api/src/tests.rs @@ -4,7 +4,7 @@ use super::*; use crate::{ api::BlockAggregatorQuery, blocks::{ - Block, + BlockBytes, BlockSourceEvent, }, result::{ @@ -34,7 +34,7 @@ use tokio::{ time::error::Elapsed, }; -type BlockRangeResponse = BoxStream; +type BlockRangeResponse = BoxStream; struct FakeApi { receiver: Receiver>, @@ -57,7 +57,7 @@ impl BlockAggregatorApi for FakeApi { } struct FakeDB { - map: Arc>>, + map: Arc>>, } impl FakeDB { @@ -66,20 +66,21 @@ impl FakeDB { Self { map } } - fn add_block(&mut self, height: BlockHeight, block: Block) { + fn add_block(&mut self, height: BlockHeight, block: BlockBytes) { self.map.lock().unwrap().insert(height, block); } - fn clone_inner(&self) -> Arc>> { + fn clone_inner(&self) -> Arc>> { self.map.clone() } } impl BlockAggregatorDB for FakeDB { - type Block = Block; + type Block = BlockBytes; type BlockRangeResponse = BlockRangeResponse; - async fn store_block(&mut self, id: BlockHeight, block: Block) -> Result<()> { + async fn store_block(&mut self, block: BlockSourceEvent) -> Result<()> { + let (id, block) = block.into_inner(); self.map.lock().unwrap().insert(id, block); Ok(()) } @@ -88,7 +89,7 @@ impl BlockAggregatorDB for FakeDB { &self, first: BlockHeight, last: BlockHeight, - ) -> Result> { + ) -> Result> { let mut blocks = vec![]; let first: u32 = first.into(); let last: u32 = last.into(); @@ -105,19 +106,19 @@ impl BlockAggregatorDB for FakeDB { Ok(Box::pin(futures::stream::iter(blocks))) } - async fn get_current_height(&self) -> Result { + async fn get_current_height(&self) -> Result> { let map = self.map.lock().unwrap(); - let max_height = map.keys().max().cloned().unwrap_or(BlockHeight::from(0u32)); + let max_height = map.keys().max().cloned(); Ok(max_height) } } struct FakeBlockSource { - blocks: Receiver>, + blocks: Receiver>, } impl FakeBlockSource { - fn new() -> (Self, Sender>) { + fn new() -> (Self, Sender>) { let (_sender, receiver) = tokio::sync::mpsc::channel(1); let _self = Self { blocks: receiver }; (_self, _sender) @@ -125,9 +126,9 @@ impl FakeBlockSource { } impl BlockSource for FakeBlockSource { - type Block = Block; + type Block = BlockBytes; - async fn next_block(&mut self) -> Result> { + async fn next_block(&mut self) -> Result> { self.blocks .recv() .await @@ -145,9 +146,9 @@ async fn run__get_block_range__returns_expected_blocks() { // given let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); - db.add_block(1.into(), Block::random(&mut rng)); - db.add_block(2.into(), Block::random(&mut rng)); - db.add_block(3.into(), Block::random(&mut rng)); + db.add_block(1.into(), BlockBytes::random(&mut rng)); + db.add_block(2.into(), BlockBytes::random(&mut rng)); + db.add_block(3.into(), BlockBytes::random(&mut rng)); let (source, _block_sender) = FakeBlockSource::new(); @@ -161,7 +162,7 @@ async fn run__get_block_range__returns_expected_blocks() { // then let stream = response.await.unwrap(); - let blocks = stream.collect::>().await; + let blocks = stream.collect::>().await; // TODO: Check values assert_eq!(blocks.len(), 2); @@ -180,7 +181,7 @@ async fn run__new_block_gets_added_to_db() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - let block = Block::random(&mut rng); + let block = BlockBytes::random(&mut rng); let id = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); @@ -202,9 +203,9 @@ async fn run__get_current_height__returns_expected_height() { let (api, sender) = FakeApi::new(); let mut db = FakeDB::new(); let expected_height = BlockHeight::from(3u32); - db.add_block(1.into(), Block::random(&mut rng)); - db.add_block(2.into(), Block::random(&mut rng)); - db.add_block(expected_height, Block::random(&mut rng)); + db.add_block(1.into(), BlockBytes::random(&mut rng)); + db.add_block(2.into(), BlockBytes::random(&mut rng)); + db.add_block(expected_height, BlockBytes::random(&mut rng)); let (source, _block_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); @@ -218,7 +219,7 @@ async fn run__get_current_height__returns_expected_height() { // then tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let height = response.await.unwrap(); + let height = response.await.unwrap().unwrap(); assert_eq!(expected_height, height); // cleanup @@ -234,7 +235,7 @@ async fn run__new_block_subscription__sends_new_block() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - let expected_block = Block::random(&mut rng); + let expected_block = BlockBytes::random(&mut rng); let expected_height = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); let (query, response) = BlockAggregatorQuery::new_block_subscription(); @@ -248,7 +249,7 @@ async fn run__new_block_subscription__sends_new_block() { // then let actual_block = await_response_with_timeout(response).await.unwrap(); - assert_eq!(expected_block, actual_block); + assert_eq!((expected_height, expected_block), actual_block); // cleanup drop(source_sender); @@ -263,7 +264,7 @@ async fn run__new_block_subscription__does_not_send_syncing_blocks() { let (source, source_sender) = FakeBlockSource::new(); let mut srv = BlockAggregator::new(api, db, source); - let block = Block::random(&mut rng); + let block = BlockBytes::random(&mut rng); let height = BlockHeight::from(123u32); let mut watcher = StateWatcher::started(); let (query, response) = BlockAggregatorQuery::new_block_subscription(); diff --git a/crates/services/shared-sequencer/src/service.rs b/crates/services/shared-sequencer/src/service.rs index c81bf1b63be..b807b0a9229 100644 --- a/crates/services/shared-sequencer/src/service.rs +++ b/crates/services/shared-sequencer/src/service.rs @@ -178,7 +178,7 @@ where async fn run(&mut self, watcher: &mut StateWatcher) -> TaskNextAction { if !self.config.enabled { let _ = watcher.while_started().await; - return TaskNextAction::Stop; + return TaskNextAction::Stop } if let Err(err) = self.ensure_account_metadata().await { diff --git a/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs b/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs index e944959f3f3..05465a0f751 100644 --- a/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs +++ b/crates/services/txpool_v2/src/selection_algorithms/ratio_tip_gas.rs @@ -206,7 +206,7 @@ where < constraints.minimal_gas_price; if less_price { - continue; + continue } let not_enough_gas = stored_transaction.transaction.max_gas() > gas_left; @@ -214,7 +214,7 @@ where stored_transaction.transaction.metered_bytes_size() > space_left; if not_enough_gas || too_big_tx { - continue; + continue } gas_left = diff --git a/crates/services/txpool_v2/src/storage/graph.rs b/crates/services/txpool_v2/src/storage/graph.rs index d042f445d0d..1a0a741ef28 100644 --- a/crates/services/txpool_v2/src/storage/graph.rs +++ b/crates/services/txpool_v2/src/storage/graph.rs @@ -221,17 +221,17 @@ impl GraphStorage { if to != i_owner { return Err(Error::InputValidation( InputValidationError::NotInsertedIoWrongOwner, - )); + )) } if amount != i_amount { return Err(Error::InputValidation( InputValidationError::NotInsertedIoWrongAmount, - )); + )) } if asset_id != i_asset_id { return Err(Error::InputValidation( InputValidationError::NotInsertedIoWrongAssetId, - )); + )) } } Output::Contract(_) => { @@ -687,10 +687,10 @@ impl Storage for GraphStorage { if extracted_outputs .coin_exists(utxo_id, owner, amount, asset_id) { - continue; + continue } missing_inputs.push(MissingInput::Utxo(*utxo_id)); - continue; + continue } Err(e) => { return Err(InputValidationErrorType::Inconsistency( @@ -746,10 +746,10 @@ impl Storage for GraphStorage { Ok(true) => {} Ok(false) => { if extracted_outputs.contract_exists(contract_id) { - continue; + continue } missing_inputs.push(MissingInput::Contract(*contract_id)); - continue; + continue } Err(e) => { return Err(InputValidationErrorType::Inconsistency( diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index f23b0e2126c..5792af72134 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -44,7 +44,7 @@ anyhow = { workspace = true } aws-sdk-kms = { workspace = true, optional = true } bs58 = { version = "0.5", optional = true } -derive_more = { version = "0.99" } +derive_more = { workspace = true } ed25519 = { workspace = true, default-features = false } ed25519-dalek = { workspace = true, default-features = false } educe = { workspace = true, optional = true } @@ -62,7 +62,7 @@ tai64 = { version = "=4.0.0", features = ["serde"] } zeroize = "1.5" [dev-dependencies] -aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } +aws-config = { workspace = true, features = ["behavior-version-latest"] } fuel-core-types = { path = ".", features = ["test-helpers", "serde"] } postcard = { workspace = true } tokio = { workspace = true, features = ["macros"] } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 896cab86c3a..6620ee27cdf 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -79,17 +79,6 @@ impl BlockHeader { } } - /// Get the application portion of the header. - pub fn application_v1( - &self, - ) -> Option<&ApplicationHeader> { - match self { - BlockHeader::V1(header) => Some(header.application()), - #[cfg(feature = "fault-proving")] - BlockHeader::V2(_header) => None, - } - } - /// Get the consensus portion of the header. pub fn consensus(&self) -> &ConsensusHeader { match self { diff --git a/crates/types/src/test_helpers.rs b/crates/types/src/test_helpers.rs index 37e187f6b81..014d8d4b563 100644 --- a/crates/types/src/test_helpers.rs +++ b/crates/types/src/test_helpers.rs @@ -1,12 +1,18 @@ +#[cfg(feature = "fault-proving")] +use crate::fuel_types::ChainId; use crate::{ blockchain::{ block::Block, header::{ - GeneratedConsensusFields, + ApplicationHeader, + BlockHeader, + BlockHeaderV1, + PartialBlockHeader, generate_txns_root, + v1::GeneratedApplicationFieldsV1, }, - primitives::DaBlockHeight, }, + fuel_asm::PanicInstruction, fuel_merkle::binary::root_calculator::MerkleRootCalculator, fuel_tx::{ BlobBody, @@ -18,6 +24,8 @@ use crate::{ Input, MessageId, Output, + Receipt, + ScriptExecutionResult, StorageSlot, Transaction, TransactionBuilder, @@ -38,6 +46,7 @@ use crate::{ BlobId, BlockHeight, Nonce, + SubAssetId, }, fuel_vm::{ Contract, @@ -46,7 +55,6 @@ use crate::{ }; use proptest::prelude::*; use rand::Rng; -use tai64::Tai64; /// Helper function to create a contract creation transaction /// from a given contract bytecode. @@ -72,7 +80,6 @@ pub fn create_contract( (tx, contract_id) } -#[allow(unused)] fn arb_txs() -> impl Strategy> { prop::collection::vec(arb_transaction(), 0..10) } @@ -427,6 +434,27 @@ prop_compose! { } } +fn arb_contract_id() -> impl Strategy { + any::<[u8; 32]>().prop_map(ContractId::new) +} + +fn arb_sub_asset_id() -> impl Strategy { + any::<[u8; 32]>().prop_map(SubAssetId::new) +} + +fn arb_panic_instruction() -> impl Strategy { + any::().prop_map(PanicInstruction::from) +} + +fn arb_script_execution_result() -> impl Strategy { + prop_oneof![ + Just(ScriptExecutionResult::Success), + Just(ScriptExecutionResult::Revert), + Just(ScriptExecutionResult::Panic), + any::().prop_map(ScriptExecutionResult::GenericFailure), + ] +} + fn arb_msg_ids() -> impl Strategy> { prop::collection::vec(arb_msg_id(), 0..10usize) } @@ -485,7 +513,7 @@ fn arb_create_transaction() -> impl Strategy { ) .prop_map( |(policies, salt_bytes, storage_slots, inputs, outputs, witnesses)| { - let create = crate::fuel_tx::Transaction::create( + let create = Transaction::create( 0, policies, Salt::from(salt_bytes), @@ -517,7 +545,7 @@ fn arb_mint_transaction() -> impl Strategy { mint_asset_id, gas_price, )| { - let mint = crate::fuel_tx::Transaction::mint( + let mint = Transaction::mint( tx_pointer, input_contract, output_contract, @@ -613,41 +641,21 @@ fn arb_blob_transaction() -> impl Strategy { }) } -prop_compose! { - fn arb_consensus_header()( - prev_root in any::<[u8; 32]>(), - time in any::(), - ) -> crate::blockchain::header::ConsensusHeader { - crate::blockchain::header::ConsensusHeader { - prev_root: prev_root.into(), - height: BlockHeight::new(0), - time: Tai64(time), - generated: GeneratedConsensusFields::default(), - } - } -} - prop_compose! { /// Generate an arbitrary block with a variable number of transactions pub fn arb_block()( txs in arb_txs(), da_height in any::(), - consensus_parameter_version in any::(), + consensus_parameters_version in any::(), state_transition_bytecode_version in any::(), msg_ids in arb_msg_ids(), event_root in any::<[u8; 32]>(), - mut consensus_header in arb_consensus_header(), + chain_id in any::(), ) -> (Block, Vec, Bytes32) { - let mut fuel_block = Block::default(); - - *fuel_block.transactions_mut() = txs; - - fuel_block.header_mut().set_da_height(DaBlockHeight(da_height)); - fuel_block.header_mut().set_consensus_parameters_version(consensus_parameter_version); - fuel_block.header_mut().set_state_transition_bytecode_version(state_transition_bytecode_version); - - let count = fuel_block.transactions().len().try_into().expect("we shouldn't have more than u16::MAX transactions"); - let msg_root = msg_ids + let transactions_count = txs.len().try_into().expect("we shouldn't have more than u16::MAX transactions"); + let message_receipt_count = msg_ids.len().try_into().expect("we shouldn't have more than u32::MAX messages"); + let transactions_root = generate_txns_root(&txs); + let message_outbox_root = msg_ids .iter() .fold(MerkleRootCalculator::new(), |mut tree, id| { tree.push(id.as_ref()); @@ -655,19 +663,177 @@ prop_compose! { }) .root() .into(); - let tx_root = generate_txns_root(fuel_block.transactions()); - let event_root = event_root.into(); - fuel_block.header_mut().set_transactions_count(count); - fuel_block.header_mut().set_message_receipt_count(msg_ids.len().try_into().expect("we shouldn't have more than u32::MAX messages")); - fuel_block.header_mut().set_transaction_root(tx_root); - fuel_block.header_mut().set_message_outbox_root(msg_root); - fuel_block.header_mut().set_event_inbox_root(event_root); - - // Consensus - // TODO: Include V2 Application with V2 Header - let application_hash = fuel_block.header().application_v1().unwrap().hash(); - consensus_header.generated.application_hash = application_hash; - fuel_block.header_mut().set_consensus_header(consensus_header); + let event_root: Bytes32 = event_root.into(); + let header = { + let mut default = BlockHeaderV1::default(); + default.set_application_header(ApplicationHeader { + da_height: da_height.into(), + consensus_parameters_version, + state_transition_bytecode_version, + generated: GeneratedApplicationFieldsV1 { + transactions_count, + message_receipt_count, + transactions_root, + message_outbox_root, + event_inbox_root: event_root, + }, + }); + + BlockHeader::V1(default) + }; + let partial_block_header = PartialBlockHeader::from(&header); + #[cfg(feature = "fault-proving")] + let fuel_block = { + let chain_id = ChainId::new(chain_id); + Block::new(partial_block_header, txs, &msg_ids, event_root, &chain_id).unwrap() + }; + #[cfg(not(feature = "fault-proving"))] + let fuel_block = { + let _ = chain_id; + Block::new(partial_block_header, txs, &msg_ids, event_root).unwrap() + }; (fuel_block, msg_ids, event_root) } } + +fn arb_receipt() -> impl Strategy { + prop_oneof![ + ( + arb_contract_id(), + arb_contract_id(), + any::(), + arb_asset_id(), + any::(), + any::(), + any::(), + any::(), + any::(), + ) + .prop_map( + |(id, to, amount, asset_id, gas, param1, param2, pc, is)| { + Receipt::call(id, to, amount, asset_id, gas, param1, param2, pc, is) + }, + ), + (arb_contract_id(), any::(), any::(), any::(),) + .prop_map(|(id, val, pc, is)| Receipt::ret(id, val, pc, is)), + ( + arb_contract_id(), + any::(), + any::(), + any::(), + prop::collection::vec(any::(), 0..64), + ) + .prop_map(|(id, ptr, pc, is, data)| Receipt::return_data( + id, ptr, pc, is, data, + )), + ( + arb_contract_id(), + arb_panic_instruction(), + any::(), + any::(), + prop::option::of(arb_contract_id()), + ) + .prop_map(|(id, reason, pc, is, panic_contract)| { + Receipt::panic(id, reason, pc, is).with_panic_contract_id(panic_contract) + }), + (arb_contract_id(), any::(), any::(), any::(),) + .prop_map(|(id, ra, pc, is)| Receipt::revert(id, ra, pc, is)), + ( + arb_contract_id(), + any::(), + any::(), + any::(), + any::(), + any::(), + any::(), + ) + .prop_map(|(id, ra, rb, rc, rd, pc, is)| { + Receipt::log(id, ra, rb, rc, rd, pc, is) + }), + ( + arb_contract_id(), + any::(), + any::(), + any::(), + any::(), + any::(), + prop::collection::vec(any::(), 0..64), + ) + .prop_map(|(id, ra, rb, ptr, pc, is, data)| { + Receipt::log_data(id, ra, rb, ptr, pc, is, data) + }), + ( + arb_contract_id(), + arb_contract_id(), + any::(), + arb_asset_id(), + any::(), + any::(), + ) + .prop_map(|(id, to, amount, asset_id, pc, is)| { + Receipt::transfer(id, to, amount, asset_id, pc, is) + }), + ( + arb_contract_id(), + arb_address(), + any::(), + arb_asset_id(), + any::(), + any::(), + ) + .prop_map(|(id, to, amount, asset_id, pc, is)| { + Receipt::transfer_out(id, to, amount, asset_id, pc, is) + }), + (arb_script_execution_result(), any::()) + .prop_map(|(result, gas_used)| Receipt::script_result(result, gas_used),), + ( + arb_address(), + arb_address(), + any::(), + arb_nonce(), + prop::collection::vec(any::(), 0..64), + ) + .prop_map(|(sender, recipient, amount, nonce, data)| { + let len = data.len() as u64; + let digest = Output::message_digest(&data); + Receipt::message_out_with_len( + sender, + recipient, + amount, + nonce, + len, + digest, + Some(data), + ) + }), + ( + arb_sub_asset_id(), + arb_contract_id(), + any::(), + any::(), + any::(), + ) + .prop_map(|(sub_id, contract_id, val, pc, is)| { + Receipt::mint(sub_id, contract_id, val, pc, is) + }), + ( + arb_sub_asset_id(), + arb_contract_id(), + any::(), + any::(), + any::(), + ) + .prop_map(|(sub_id, contract_id, val, pc, is)| { + Receipt::burn(sub_id, contract_id, val, pc, is) + }), + ] +} + +prop_compose! { + /// generates a list of random receipts + pub fn arb_receipts()( + receipts in prop::collection::vec(arb_receipt(), 0..10), + ) -> Vec { + receipts + } +} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index caf48a62cd8..2cf55a94f58 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -28,16 +28,16 @@ fault-proving = [ "fuel-core-benches/fault-proving", ] rpc = ["fuel-core/rpc", "fuel-core-bin/rpc"] +local_rpc = [] [dependencies] anyhow = { workspace = true } -async-trait = { workspace = true } -aws-config = { version = "1.1.7", features = [ +aws-config = { workspace = true, features = [ "behavior-version-latest", ], optional = true } -aws-sdk-kms = { version = "1.37.0", optional = true } +aws-sdk-kms = { workspace = true, optional = true } +aws-sdk-s3 = { workspace = true } clap = { workspace = true } -cynic = { workspace = true } fuel-core = { path = "../crates/fuel-core", default-features = false, features = [ "p2p", "relayer", @@ -72,18 +72,17 @@ fuel-core-txpool = { path = "../crates/services/txpool_v2", features = [ fuel-core-types = { path = "../crates/types", features = ["test-helpers"] } fuel-core-upgradable-executor = { path = "../crates/services/upgradable-executor" } futures = { workspace = true } -hex = { workspace = true } hyper = { workspace = true, features = ["server"] } insta = { workspace = true } itertools = { workspace = true } k256 = { version = "0.13.3", features = ["ecdsa-core"] } postcard = { workspace = true } primitive-types = { workspace = true, default-features = false } +prost = { workspace = true } rand = { workspace = true } reqwest = { workspace = true } rstest = "0.15" serde_json = { workspace = true } -spki = "0.7.3" tempfile = { workspace = true } test-case = { workspace = true } test-helpers = { path = "./test-helpers" } @@ -99,6 +98,8 @@ url = { workspace = true } alloy-primitives = { workspace = true } alloy-provider = { workspace = true, default-features = false, features = ["reqwest-rustls-tls"] } alloy-rpc-types-eth = { workspace = true } +aws-config = { workspace = true } +flate2 = { workspace = true } fuel-core-executor = { workspace = true, features = ["limited-tx-count"] } pretty_assertions = "1.4" proptest = { workspace = true } diff --git a/tests/tests/lib.rs b/tests/tests/lib.rs index 5e6b7458d9f..05aacb115bd 100644 --- a/tests/tests/lib.rs +++ b/tests/tests/lib.rs @@ -59,7 +59,11 @@ mod relayer; #[cfg(not(feature = "only-p2p"))] mod required_fuel_block_height_extension; #[cfg(feature = "rpc")] +#[cfg(feature = "local_rpc")] mod rpc; +#[cfg(feature = "rpc")] +#[cfg(not(feature = "local_rpc"))] +mod rpc_s3; #[cfg(not(feature = "only-p2p"))] mod snapshot; diff --git a/tests/tests/rpc.rs b/tests/tests/rpc.rs index aa6c564834b..9aea72823e6 100644 --- a/tests/tests/rpc.rs +++ b/tests/tests/rpc.rs @@ -1,4 +1,5 @@ #![allow(non_snake_case)] + use fuel_core::{ database::Database, service::{ @@ -6,24 +7,28 @@ use fuel_core::{ FuelService, }, }; -use fuel_core_block_aggregator_api::protobuf_types::{ - BlockHeightRequest as ProtoBlockHeightRequest, - BlockRangeRequest as ProtoBlockRangeRequest, - NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, - block::VersionedBlock as ProtoVersionedBlock, - block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, - block_response::Payload as ProtoPayload, - header::VersionedHeader as ProtoVersionedHeader, +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf, + protobuf_types::{ + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + NewBlockSubscriptionRequest as ProtoNewBlockSubscriptionRequest, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, + }, }; use fuel_core_client::client::FuelClient; -use fuel_core_types::fuel_tx::*; +use fuel_core_types::{ + fuel_tx::*, + fuel_types::BlockHeight, +}; use futures::StreamExt; -use test_helpers::client_ext::ClientExt; +use tokio::time::sleep; #[tokio::test(flavor = "multi_thread")] -async fn get_block_range__can_get_serialized_block_from_rpc() { - let config = Config::local_node(); - let rpc_url = config.rpc_config.addr; +async fn get_block_range__can_get_serialized_block_from_rpc__literal() { + let config = Config::local_node_with_rpc(); + let rpc_url = config.rpc_config.clone().unwrap().addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -39,16 +44,9 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { .await .expect("could not connect to server"); - let expected_block = graphql_client - .full_block_by_height(1) - .await - .unwrap() - .unwrap(); - let expected_header = expected_block.header; - // when let request = ProtoBlockRangeRequest { start: 1, end: 1 }; - let actual_block = if let Some(ProtoPayload::Literal(block)) = rpc_client + let proto_block = if let Some(ProtoPayload::Literal(block)) = rpc_client .get_block_range(request) .await .unwrap() @@ -63,20 +61,37 @@ async fn get_block_range__can_get_serialized_block_from_rpc() { } else { panic!("expected literal block payload"); }; - let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { - ProtoVersionedHeader::V1(v1_header) => v1_header.height, - ProtoVersionedHeader::V2(v2_header) => v2_header.height, - }; + let (actual_block, receipts) = + fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); + let actual_height = actual_block.header().height(); + // then - assert_eq!(expected_header.height.0, actual_height); + let expected_height = BlockHeight::new(1); + assert_eq!(&expected_height, actual_height); + + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); } #[tokio::test(flavor = "multi_thread")] async fn get_block_height__can_get_value_from_rpc() { - let config = Config::local_node(); - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc(); + let rpc_url = config.rpc_config.clone().unwrap().addr; // given let srv = FuelService::from_database(Database::default(), config.clone()) @@ -94,10 +109,11 @@ async fn get_block_height__can_get_value_from_rpc() { .expect("could not connect to server"); // when + sleep(std::time::Duration::from_secs(1)).await; let request = ProtoBlockHeightRequest {}; - let expected_height = 1; + let expected_height = Some(1); let actual_height = rpc_client - .get_block_height(request) + .get_synced_block_height(request) .await .unwrap() .into_inner() @@ -109,8 +125,8 @@ async fn get_block_height__can_get_value_from_rpc() { #[tokio::test(flavor = "multi_thread")] async fn new_block_subscription__can_get_expect_block() { - let config = Config::local_node(); - let rpc_url = config.rpc_config.addr; + let config = Config::local_node_with_rpc(); + let rpc_url = config.rpc_config.clone().unwrap().addr; let srv = FuelService::from_database(Database::default(), config.clone()) .await @@ -137,20 +153,35 @@ async fn new_block_subscription__can_get_expect_block() { let next = tokio::time::timeout(std::time::Duration::from_secs(1), stream.next()) .await .unwrap(); - let actual_block = + let proto_block = if let Some(ProtoPayload::Literal(block)) = next.unwrap().unwrap().payload { block } else { panic!("expected literal block payload"); }; - let ProtoVersionedBlock::V1(v1_block) = actual_block.versioned_block.unwrap(); - let actual_height = match v1_block.header.unwrap().versioned_header.unwrap() { - ProtoVersionedHeader::V1(v1_header) => v1_header.height, - ProtoVersionedHeader::V2(v2_header) => v2_header.height, - }; + let (actual_block, receipts) = + fuel_block_from_protobuf(proto_block, &[], Bytes32::default()).unwrap(); + let actual_height = actual_block.header().height(); // then - let expected_height = 1; - assert_eq!(expected_height, actual_height); + let expected_height = BlockHeight::new(1); + assert_eq!(&expected_height, actual_height); + + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); } diff --git a/tests/tests/rpc_s3.rs b/tests/tests/rpc_s3.rs new file mode 100644 index 00000000000..727fcf34c20 --- /dev/null +++ b/tests/tests/rpc_s3.rs @@ -0,0 +1,449 @@ +#![allow(non_snake_case)] + +use aws_config::{ + BehaviorVersion, + default_provider::credentials::DefaultCredentialsChain, +}; +use aws_sdk_s3::Client; +use flate2::read::GzDecoder; +use fuel_core::{ + database::Database, + service::{ + Config, + FuelService, + }, +}; +use fuel_core_block_aggregator_api::{ + blocks::importer_and_db_source::serializer_adapter::proto_to_fuel_conversions::fuel_block_from_protobuf, + db::remote_cache::block_height_to_key, + integration::StorageMethod, + protobuf_types::{ + Block as ProtoBlock, + BlockHeightRequest as ProtoBlockHeightRequest, + BlockRangeRequest as ProtoBlockRangeRequest, + RemoteBlockResponse as ProtoRemoteBlockResponse, + RemoteS3Bucket, + block_aggregator_client::BlockAggregatorClient as ProtoBlockAggregatorClient, + block_response::Payload as ProtoPayload, + remote_block_response::Location, + }, +}; +use fuel_core_client::client::FuelClient; +use fuel_core_types::{ + fuel_tx::*, + fuel_types::BlockHeight, +}; +use futures::StreamExt; +use prost::bytes::Bytes; +use std::io::Read; +use test_helpers::client_ext::ClientExt; +use tokio::time::sleep; + +const AWS_ENDPOINT_URL: &str = "http://127.0.0.1:4566"; + +macro_rules! require_env_var_or_panic { + ($($var:literal),+) => { + $(if std::env::var($var).is_err() { + panic!("missing env var: {}", $var); + })+ + }; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_serialized_block_from_rpc__remote() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let expected_header = expected_block.header; + let expected_height = BlockHeight::new(expected_header.height.0); + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let remote_info = if let Some(ProtoPayload::Remote(remote_info)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + remote_info + } else { + panic!("expected literal block payload"); + }; + + // then + let key = block_height_to_key(&expected_height); + let expected = ProtoRemoteBlockResponse { + location: Some(Location::S3(RemoteS3Bucket { + bucket: "test-bucket".to_string(), + key, + requester_pays: false, + endpoint: Some(AWS_ENDPOINT_URL.to_string()), + })), + }; + assert_eq!(expected, remote_info); + + // cleanup + clean_s3_bucket().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_height__can_get_value_from_rpc() { + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + + // setup + ensure_bucket_exists().await; + clean_s3_bucket().await; + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; + + // given + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + sleep(std::time::Duration::from_secs(1)).await; + let request = ProtoBlockHeightRequest {}; + let expected_height = Some(1); + let actual_height = rpc_client + .get_synced_block_height(request) + .await + .unwrap() + .into_inner() + .height; + + // then + assert_eq!(expected_height, actual_height); + + // cleanup + clean_s3_bucket().await; +} + +async fn aws_client() -> Client { + let credentials = DefaultCredentialsChain::builder().build().await; + let _aws_region = + std::env::var("AWS_REGION").expect("AWS_REGION env var must be set"); + let sdk_config = aws_config::defaults(BehaviorVersion::latest()) + .credentials_provider(credentials) + .endpoint_url(AWS_ENDPOINT_URL) + .load() + .await; + let builder = aws_sdk_s3::config::Builder::from(&sdk_config); + let config = builder.force_path_style(true).build(); + Client::from_conf(config) +} + +async fn get_block_from_s3_bucket() -> Bytes { + let client = aws_client().await; + let bucket = "test-bucket".to_string(); + let key = block_height_to_key(&BlockHeight::new(1)); + tracing::info!("getting block from bucket: {} with key {}", bucket, key); + let req = client.get_object().bucket(&bucket).key(&key); + let obj = req.send().await.unwrap(); + let message = format!( + "should be able to get block from bucket: {} with key {}", + bucket, key + ); + obj.body.collect().await.expect(&message).into_bytes() +} + +async fn block_found_in_s3_bucket() -> bool { + let client = aws_client().await; + let bucket = "test-bucket".to_string(); + let key = block_height_to_key(&BlockHeight::new(1)); + tracing::info!( + "checking if block is in bucket: {} with key {}", + bucket, + key + ); + let req = client.get_object().bucket(&bucket).key(&key); + req.send().await.is_ok() +} + +async fn ensure_bucket_exists() { + let client = aws_client().await; + let bucket = "test-bucket"; + let req = client.create_bucket().bucket(bucket); + let expect_message = format!("should be able to create bucket: {}", bucket); + let _ = req.send().await.expect(&expect_message); +} + +async fn clean_s3_bucket() { + let client = aws_client().await; + let bucket = "test-bucket"; + let req = client.list_objects().bucket(bucket); + let objs = req.send().await.unwrap(); + for obj in objs.contents.unwrap_or_default() { + let req = client.delete_object().bucket(bucket).key(obj.key.unwrap()); + let _ = req.send().await.unwrap(); + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__can_get_from_remote_s3_bucket() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3 { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + let graphql_client = FuelClient::from(srv.bound_address); + let tx = Transaction::default_test_tx(); + + // when + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + sleep(std::time::Duration::from_secs(1)).await; + + // then + let zipped_data = get_block_from_s3_bucket().await; + let data = unzip_bytes(&zipped_data); + let actual_proto: ProtoBlock = prost::Message::decode(data.as_ref()).unwrap(); + let (_, receipts) = + fuel_block_from_protobuf(actual_proto, &[], Bytes32::default()).unwrap(); + assert!( + matches!( + receipts[1], + Receipt::ScriptResult { + result: ScriptExecutionResult::Success, + .. + } + ), + "should have a script result receipt, received: {:?}", + receipts + ); + assert!( + matches!(receipts[0], Receipt::Return { .. }), + "should have a return receipt, received: {:?}", + receipts + ); + + // cleanup + clean_s3_bucket().await; + drop(srv); + tracing::info!( + "Successfully ran test: get_block_range__can_get_from_remote_s3_bucket" + ); +} + +fn unzip_bytes(bytes: &[u8]) -> Vec { + let mut decoder = GzDecoder::new(bytes); + let mut output = Vec::new(); + decoder.read_to_end(&mut output).unwrap(); + output +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__no_publish__can_get_block_info_from_rpc__remote() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3NoPublish { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; + + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + let expected_block = graphql_client + .full_block_by_height(1) + .await + .unwrap() + .unwrap(); + let expected_header = expected_block.header; + let expected_height = BlockHeight::new(expected_header.height.0); + + // when + let request = ProtoBlockRangeRequest { start: 1, end: 1 }; + let remote_info = if let Some(ProtoPayload::Remote(remote_info)) = rpc_client + .get_block_range(request) + .await + .unwrap() + .into_inner() + .next() + .await + .unwrap() + .unwrap() + .payload + { + remote_info + } else { + panic!("expected literal block payload"); + }; + + // then + let key = block_height_to_key(&expected_height); + let expected = ProtoRemoteBlockResponse { + location: Some(Location::S3(RemoteS3Bucket { + bucket: "test-bucket".to_string(), + key, + requester_pays: false, + endpoint: Some(AWS_ENDPOINT_URL.to_string()), + })), + }; + assert_eq!(expected, remote_info); + + // cleanup + clean_s3_bucket().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_height__no_publish__can_get_value_from_rpc() { + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + + // setup + ensure_bucket_exists().await; + clean_s3_bucket().await; + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3NoPublish { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let rpc_url = config.rpc_config.clone().unwrap().addr; + + // given + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + + let graphql_client = FuelClient::from(srv.bound_address); + + let tx = Transaction::default_test_tx(); + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + let rpc_url = format!("http://{}", rpc_url); + let mut rpc_client = ProtoBlockAggregatorClient::connect(rpc_url) + .await + .expect("could not connect to server"); + + // when + sleep(std::time::Duration::from_secs(1)).await; + let request = ProtoBlockHeightRequest {}; + let expected_height = Some(1); + let actual_height = rpc_client + .get_synced_block_height(request) + .await + .unwrap() + .into_inner() + .height; + + // then + assert_eq!(expected_height, actual_height); + + // cleanup + clean_s3_bucket().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn get_block_range__no_publish__does_not_publish_to_s3_bucket() { + // setup + require_env_var_or_panic!("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"); + ensure_bucket_exists().await; + clean_s3_bucket().await; + + // given + let endpoint_url = AWS_ENDPOINT_URL.to_string(); + let storage_method = StorageMethod::S3NoPublish { + bucket: "test-bucket".to_string(), + endpoint_url: Some(endpoint_url), + requester_pays: false, + }; + let config = Config::local_node_with_rpc_and_storage_method(storage_method); + let srv = FuelService::from_database(Database::default(), config.clone()) + .await + .unwrap(); + let graphql_client = FuelClient::from(srv.bound_address); + let tx = Transaction::default_test_tx(); + + // when + let _ = graphql_client.submit_and_await_commit(&tx).await.unwrap(); + + sleep(std::time::Duration::from_secs(1)).await; + + // then + let found_block = block_found_in_s3_bucket().await; + assert!(!found_block); +}