diff --git a/.github/workflows/daily.yaml b/.github/workflows/daily.yaml index 82e1c630df..c04b2b50f1 100644 --- a/.github/workflows/daily.yaml +++ b/.github/workflows/daily.yaml @@ -1,8 +1,8 @@ name: Openmina Daily on: workflow_dispatch: {} - schedule: - - cron: "0 5 * * *" + #schedule: + # - cron: "0 5 * * *" push: branches: [ "test/*daily*" ] diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 084466083d..dc6ba1c467 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -33,6 +33,11 @@ jobs: - name: Git checkout uses: actions/checkout@v3 + + # This is needed so that we can get the current version with vergen + - name: Fetch tag for current commit + run: | + git fetch --depth=1 origin +refs/tags/*:refs/tags/* - name: Login to Docker Hub uses: docker/login-action@v3 diff --git a/.idea/.gitignore b/.idea/.gitignore index 7abb13d050..193e1a4000 100644 --- a/.idea/.gitignore +++ b/.idea/.gitignore @@ -5,3 +5,4 @@ /httpRequests/ # GitHub Copilot persisted chat sessions /copilot/chatSessions +/vcs.xml \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 76ab5914e3..820cb65dff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,49 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.16.0] - 2025-04-04 + +### Added + +- **GraphQL**: More queries (snark pool, pending snark work, genesis block, ledger status). + +### Changed + +- **GraphQL**: Added more fields to `daemonStatus` query˙ + +### Fixed + +- **GraphQL**: Some issues with accounts. +- **Block Producer**: Corner case that caused the won slot search to sometimes be interrupted at epoch bounds. + +## [0.15.0] - 2025-03-13 + +### Added + +- Restored support for snark workers. +- **Archive**: Support for storing blocks to AWS, GCP and filesystem. +- **Tooling**: WebRTC traffic sniffer. +- **GraphQL**: + - `sendPayment` mutation. + - `sendDelegation` mutation. + - `pooledUserCommands` query. + - `pooledZkappCommands` query. + - Various other partially implemented queries expanded to ensure compatibility with the OCaml node. + + +### Changed + +- **P2P**: Wait until full validation is complete before broadcasting transactions and completed works. +- **Transition frontier**: Perform cheap consensus operation first, and then the more expensive proof verification. +- **Transaction pool**: Unified libp2p and webrtc logic for the initial phase of handling transactions received from gossip network. As a result, processing of transactions received during bootstrap is delayed until the initial sync is complete. +- **Transaction pool**: Suspend processing during block production. + +### Fixed + +- **Transition frontier**: Rare race condition in the case of forks during block production that could result in dropping staged ledgers too early. +- **Webnode**: Replaced tokio channels which had a race condition that could crash the thread on WASM. +- **Transaction pool**: Verify zkApp proofs in a dedicated thread to avoid blocking the state machine. + ## [0.14.0] - 2025-01-31 ### Changed @@ -366,7 +409,9 @@ First public release. - Alpha version of the node which can connect and syncup to the berkeleynet network, and keep applying new blocks to maintain consensus state and ledger up to date. - Web-based frontend for the node. -[Unreleased]: https://github.com/openmina/openmina/compare/v0.14.0...develop +[Unreleased]: https://github.com/openmina/openmina/compare/v0.16.0...develop +[0.15.0]: https://github.com/openmina/openmina/compare/v0.15.0...v0.16.0 +[0.15.0]: https://github.com/openmina/openmina/compare/v0.14.0...v0.15.0 [0.14.0]: https://github.com/openmina/openmina/compare/v0.13.0...v0.14.0 [0.13.0]: https://github.com/openmina/openmina/compare/v0.12.0...v0.13.0 [0.12.0]: https://github.com/openmina/openmina/compare/v0.11.0...v0.12.0 diff --git a/Cargo.lock b/Cargo.lock index 849639cdcc..3dfb6e7607 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -131,7 +131,7 @@ dependencies = [ "serde", "serde_json", "thiserror 1.0.60", - "toml", + "toml 0.5.11", "wasm-bindgen", "wasm-bindgen-test", ] @@ -241,7 +241,7 @@ dependencies = [ [[package]] name = "ark-ec" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "ark-ff", "ark-serialize 0.3.0", @@ -255,7 +255,7 @@ dependencies = [ [[package]] name = "ark-ff" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "ark-ff-asm", "ark-ff-macros", @@ -273,7 +273,7 @@ dependencies = [ [[package]] name = "ark-ff-asm" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "quote 1.0.35", "syn 1.0.109", @@ -282,7 +282,7 @@ dependencies = [ [[package]] name = "ark-ff-macros" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "num-bigint", "num-traits", @@ -293,7 +293,7 @@ dependencies = [ [[package]] name = "ark-poly" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "ark-ff", "ark-serialize 0.3.0", @@ -306,7 +306,7 @@ dependencies = [ [[package]] name = "ark-serialize" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "ark-serialize-derive", "ark-std 0.3.0", @@ -327,7 +327,7 @@ dependencies = [ [[package]] name = "ark-serialize-derive" version = "0.3.0" -source = "git+https://github.com/openmina/algebra?rev=aea157a#aea157a8e81ddc9f16bae5123b5dda169e3842c9" +source = "git+https://github.com/openmina/algebra?rev=150ab8d#150ab8d4cf4918f256580c0d17249ddf11e20aeb" dependencies = [ "proc-macro2 1.0.93", "quote 1.0.35", @@ -382,7 +382,7 @@ dependencies = [ "asn1-rs-derive 0.4.0", "asn1-rs-impl 0.1.0", "displaydoc", - "nom", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror 1.0.60", @@ -398,7 +398,7 @@ dependencies = [ "asn1-rs-derive 0.5.1", "asn1-rs-impl 0.2.0", "displaydoc", - "nom", + "nom 7.1.3", "num-traits", "rusticata-macros", "thiserror 1.0.60", @@ -451,6 +451,29 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" version = "0.4.18" @@ -464,26 +487,73 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-executor" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand 2.3.0", + "futures-lite 2.6.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-io 2.4.0", + "async-lock 3.4.0", + "blocking", + "futures-lite 2.6.0", + "once_cell", +] + [[package]] name = "async-io" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", + "polling 2.8.0", "rustix 0.37.26", "slab", "socket2 0.4.10", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +dependencies = [ + "async-lock 3.4.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.6.0", + "parking", + "polling 3.7.4", + "rustix 0.38.42", + "slab", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "async-lock" version = "2.8.0" @@ -493,6 +563,43 @@ dependencies = [ "event-listener 2.5.3", ] +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" +dependencies = [ + "async-channel 1.9.0", + "async-global-executor", + "async-io 2.4.0", + "async-lock 3.4.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 2.6.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -515,6 +622,12 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" version = "0.1.74" @@ -603,6 +716,381 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "aws-config" +version = "1.5.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50236e4d60fe8458de90a71c0922c761e41755adf091b1b03de1cef537179915" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sdk-sso", + "aws-sdk-ssooidc", + "aws-sdk-sts", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand 2.3.0", + "hex", + "http 0.2.9", + "ring 0.17.8", + "time", + "tokio", + "tracing", + "url", + "zeroize", +] + +[[package]] +name = "aws-credential-types" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da" +dependencies = [ + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "zeroize", +] + +[[package]] +name = "aws-runtime" +version = "1.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76dd04d39cc12844c0994f2c9c5a6f5184c22e9188ec1ff723de41910a21dcad" +dependencies = [ + "aws-credential-types", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "fastrand 2.3.0", + "http 0.2.9", + "http-body 0.4.5", + "once_cell", + "percent-encoding", + "pin-project-lite", + "tracing", + "uuid", +] + +[[package]] +name = "aws-sdk-s3" +version = "1.74.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f551566d462b47c3e49b330f1b86e69e7dc6e4d4efb1959e28c5c82d22e79f7c" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-sigv4", + "aws-smithy-async", + "aws-smithy-checksums", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "bytes", + "fastrand 2.3.0", + "hex", + "hmac", + "http 0.2.9", + "http-body 0.4.5", + "lru", + "once_cell", + "percent-encoding", + "regex-lite", + "sha2 0.10.8", + "tracing", + "url", +] + +[[package]] +name = "aws-sdk-sso" +version = "1.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16ff718c9ee45cc1ebd4774a0e086bb80a6ab752b4902edf1c9f56b86ee1f770" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.9", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-ssooidc" +version = "1.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5183e088715cc135d8d396fdd3bc02f018f0da4c511f53cb8d795b6a31c55809" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-types", + "bytes", + "http 0.2.9", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sdk-sts" +version = "1.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9f944ef032717596639cea4a2118a3a457268ef51bbb5fde9637e54c465da00" +dependencies = [ + "aws-credential-types", + "aws-runtime", + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-json", + "aws-smithy-query", + "aws-smithy-runtime", + "aws-smithy-runtime-api", + "aws-smithy-types", + "aws-smithy-xml", + "aws-types", + "http 0.2.9", + "once_cell", + "regex-lite", + "tracing", +] + +[[package]] +name = "aws-sigv4" +version = "1.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bc5bbd1e4a2648fd8c5982af03935972c24a2f9846b396de661d351ee3ce837" +dependencies = [ + "aws-credential-types", + "aws-smithy-eventstream", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "crypto-bigint 0.5.5", + "form_urlencoded", + "hex", + "hmac", + "http 0.2.9", + "http 1.1.0", + "once_cell", + "p256 0.11.1", + "percent-encoding", + "ring 0.17.8", + "sha2 0.10.8", + "subtle", + "time", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-async" +version = "1.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa59d1327d8b5053c54bf2eaae63bf629ba9e904434d0835a28ed3c0ed0a614e" +dependencies = [ + "futures-util", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "aws-smithy-checksums" +version = "0.62.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f45a1c384d7a393026bc5f5c177105aa9fa68e4749653b985707ac27d77295" +dependencies = [ + "aws-smithy-http", + "aws-smithy-types", + "bytes", + "crc32c", + "crc32fast", + "crc64fast-nvme", + "hex", + "http 0.2.9", + "http-body 0.4.5", + "md-5", + "pin-project-lite", + "sha1", + "sha2 0.10.8", + "tracing", +] + +[[package]] +name = "aws-smithy-eventstream" +version = "0.60.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b18559a41e0c909b77625adf2b8c50de480a8041e5e4a3f5f7d177db70abc5a" +dependencies = [ + "aws-smithy-types", + "bytes", + "crc32fast", +] + +[[package]] +name = "aws-smithy-http" +version = "0.60.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7809c27ad8da6a6a68c454e651d4962479e81472aa19ae99e59f9aba1f9713cc" +dependencies = [ + "aws-smithy-eventstream", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.9", + "http-body 0.4.5", + "once_cell", + "percent-encoding", + "pin-project-lite", + "pin-utils", + "tracing", +] + +[[package]] +name = "aws-smithy-json" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "623a51127f24c30776c8b374295f2df78d92517386f77ba30773f15a30ce1422" +dependencies = [ + "aws-smithy-types", +] + +[[package]] +name = "aws-smithy-query" +version = "0.60.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fbd61ceb3fe8a1cb7352e42689cec5335833cd9f94103a61e98f9bb61c64bb" +dependencies = [ + "aws-smithy-types", + "urlencoding", +] + +[[package]] +name = "aws-smithy-runtime" +version = "1.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d526a12d9ed61fadefda24abe2e682892ba288c2018bcb38b1b4c111d13f6d92" +dependencies = [ + "aws-smithy-async", + "aws-smithy-http", + "aws-smithy-runtime-api", + "aws-smithy-types", + "bytes", + "fastrand 2.3.0", + "h2 0.3.24", + "http 0.2.9", + "http-body 0.4.5", + "http-body 1.0.1", + "httparse", + "hyper 0.14.27", + "hyper-rustls 0.24.2", + "once_cell", + "pin-project-lite", + "pin-utils", + "rustls 0.21.12", + "tokio", + "tracing", +] + +[[package]] +name = "aws-smithy-runtime-api" +version = "1.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92165296a47a812b267b4f41032ff8069ab7ff783696d217f0994a0d7ab585cd" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "bytes", + "http 0.2.9", + "http 1.1.0", + "pin-project-lite", + "tokio", + "tracing", + "zeroize", +] + +[[package]] +name = "aws-smithy-types" +version = "1.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7b8a53819e42f10d0821f56da995e1470b199686a1809168db6ca485665f042" +dependencies = [ + "base64-simd", + "bytes", + "bytes-utils", + "futures-core", + "http 0.2.9", + "http 1.1.0", + "http-body 0.4.5", + "http-body 1.0.1", + "http-body-util", + "itoa", + "num-integer", + "pin-project-lite", + "pin-utils", + "ryu", + "serde", + "time", + "tokio", + "tokio-util", +] + +[[package]] +name = "aws-smithy-xml" +version = "0.60.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "aws-types" +version = "1.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbd0a668309ec1f66c0f6bda4840dd6d4796ae26d699ebc266d7cc95c6d040f" +dependencies = [ + "aws-credential-types", + "aws-smithy-async", + "aws-smithy-runtime-api", + "aws-smithy-types", + "rustc_version 0.4.0", + "tracing", +] + [[package]] name = "axum" version = "0.7.5" @@ -693,6 +1181,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" +[[package]] +name = "base16ct" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" + [[package]] name = "base16ct" version = "0.2.0" @@ -717,6 +1211,16 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +[[package]] +name = "base64-simd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +dependencies = [ + "outref", + "vsimd", +] + [[package]] name = "base64ct" version = "1.6.0" @@ -888,6 +1392,19 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blocking" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +dependencies = [ + "async-channel 2.3.1", + "async-task", + "futures-io", + "futures-lite 2.6.0", + "piper", +] + [[package]] name = "bs58" version = "0.4.0" @@ -937,6 +1454,16 @@ dependencies = [ "serde", ] +[[package]] +name = "bytes-utils" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +dependencies = [ + "bytes", + "either", +] + [[package]] name = "cbc" version = "0.1.2" @@ -946,6 +1473,25 @@ dependencies = [ "cipher 0.4.4", ] +[[package]] +name = "cbindgen" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fce8dd7fcfcbf3a0a87d8f515194b49d6135acab73e18bd380d1d93bb1a15eb" +dependencies = [ + "clap 4.5.20", + "heck 0.4.1", + "indexmap 2.7.1", + "log", + "proc-macro2 1.0.93", + "quote 1.0.35", + "serde", + "serde_json", + "syn 2.0.96", + "tempfile", + "toml 0.8.20", +] + [[package]] name = "cc" version = "1.2.5" @@ -975,7 +1521,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom", + "nom 7.1.3", ] [[package]] @@ -1142,9 +1688,10 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cli" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", + "backtrace", "bytes", "clap 4.5.20", "console", @@ -1353,6 +1900,15 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" +[[package]] +name = "crc32c" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" +dependencies = [ + "rustc_version 0.4.0", +] + [[package]] name = "crc32fast" version = "1.3.2" @@ -1362,6 +1918,16 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crc64fast-nvme" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5e2ee08013e3f228d6d2394116c4549a6df77708442c62d887d83f68ef2ee37" +dependencies = [ + "cbindgen", + "crc", +] + [[package]] name = "critical-section" version = "1.1.2" @@ -1421,7 +1987,19 @@ checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" name = "crunchy" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-bigint" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] [[package]] name = "crypto-bigint" @@ -1680,6 +2258,25 @@ dependencies = [ "openssl-src", ] +[[package]] +name = "dataloader" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43533eb41c886a84d11ed5c72c17315dcdff46a5eb08d3bbfac6a9ef8faa4085" +dependencies = [ + "async-std", +] + +[[package]] +name = "der" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der" version = "0.7.8" @@ -1699,7 +2296,7 @@ checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ "asn1-rs 0.5.2", "displaydoc", - "nom", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1713,7 +2310,7 @@ checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" dependencies = [ "asn1-rs 0.6.2", "displaydoc", - "nom", + "nom 7.1.3", "num-bigint", "num-traits", "rusticata-macros", @@ -1903,18 +2500,30 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "ecdsa" +version = "0.14.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" +dependencies = [ + "der 0.6.1", + "elliptic-curve 0.12.3", + "rfc6979 0.3.1", + "signature 1.6.4", +] + [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der", + "der 0.7.8", "digest 0.10.7", - "elliptic-curve", - "rfc6979", - "signature", - "spki", + "elliptic-curve 0.13.8", + "rfc6979 0.4.0", + "signature 2.1.0", + "spki 0.7.3", ] [[package]] @@ -1923,9 +2532,9 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8", + "pkcs8 0.10.2", "serde", - "signature", + "signature 2.1.0", ] [[package]] @@ -1952,23 +2561,43 @@ dependencies = [ "serde", ] +[[package]] +name = "elliptic-curve" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" +dependencies = [ + "base16ct 0.1.1", + "crypto-bigint 0.4.9", + "der 0.6.1", + "digest 0.10.7", + "ff 0.12.1", + "generic-array", + "group 0.12.1", + "pkcs8 0.9.0", + "rand_core", + "sec1 0.3.0", + "subtle", + "zeroize", +] + [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct", - "crypto-bigint", + "base16ct 0.2.0", + "crypto-bigint 0.5.5", "digest 0.10.7", - "ff", + "ff 0.13.0", "generic-array", - "group", + "group 0.13.0", "hkdf", "pem-rfc7468", - "pkcs8", + "pkcs8 0.10.2", "rand_core", - "sec1", + "sec1 0.7.3", "subtle", "zeroize", ] @@ -2036,9 +2665,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" dependencies = [ "anstream", "anstyle", @@ -2053,6 +2682,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi 0.3.9", +] + [[package]] name = "errno" version = "0.3.10" @@ -2063,6 +2703,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "etcetera" version = "0.8.0" @@ -2074,6 +2724,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "etherparse" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b14e4ac78394e3ea04edbbc412099cf54f2f52ded51efb79c466a282729399d2" +dependencies = [ + "arrayvec", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -2091,6 +2750,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "faster-stun" version = "1.0.1" @@ -2121,6 +2790,16 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "ff" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" +dependencies = [ + "rand_core", + "subtle", +] + [[package]] name = "ff" version = "0.13.0" @@ -2185,12 +2864,13 @@ dependencies = [ [[package]] name = "flume" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" dependencies = [ "futures-core", "futures-sink", + "nanorand", "spin 0.9.8", ] @@ -2200,6 +2880,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + [[package]] name = "foreign-types" version = "0.3.2" @@ -2335,6 +3021,19 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -2574,6 +3273,81 @@ dependencies = [ "web-sys", ] +[[package]] +name = "google-cloud-auth" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57a13fbacc5e9c41ded3ad8d0373175a6b7a6ad430d99e89d314ac121b7ab06" +dependencies = [ + "async-trait", + "base64 0.21.7", + "google-cloud-metadata", + "google-cloud-token", + "home", + "jsonwebtoken", + "reqwest 0.12.12", + "serde", + "serde_json", + "thiserror 1.0.60", + "time", + "tokio", + "tracing", + "urlencoding", +] + +[[package]] +name = "google-cloud-metadata" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d901aeb453fd80e51d64df4ee005014f6cf39f2d736dd64f7239c132d9d39a6a" +dependencies = [ + "reqwest 0.12.12", + "thiserror 1.0.60", + "tokio", +] + +[[package]] +name = "google-cloud-storage" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34a73d9e94d35665909050f02e035d8bdc82e419241b1b027ebf1ea51dc8a470" +dependencies = [ + "anyhow", + "async-stream", + "async-trait", + "base64 0.21.7", + "bytes", + "futures-util", + "google-cloud-auth", + "google-cloud-metadata", + "google-cloud-token", + "hex", + "once_cell", + "percent-encoding", + "pkcs8 0.10.2", + "regex", + "reqwest 0.12.12", + "reqwest-middleware", + "ring 0.17.8", + "serde", + "serde_json", + "sha2 0.10.8", + "thiserror 1.0.60", + "time", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "google-cloud-token" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c12ba8b21d128a2ce8585955246977fbce4415f680ebf9199b6f9d6d725f" +dependencies = [ + "async-trait", +] + [[package]] name = "graphannis-malloc_size_of" version = "2.0.0" @@ -2649,13 +3423,24 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "group" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" +dependencies = [ + "ff 0.12.1", + "rand_core", + "subtle", +] + [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff", + "ff 0.13.0", "rand_core", "subtle", ] @@ -2663,7 +3448,7 @@ dependencies = [ [[package]] name = "groupmap" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -2682,7 +3467,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.9", - "indexmap 2.0.2", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -2701,7 +3486,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.0.2", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -2710,7 +3495,7 @@ dependencies = [ [[package]] name = "hash-tool" -version = "0.14.0" +version = "0.16.0" dependencies = [ "bs58 0.5.0", "hex", @@ -2751,6 +3536,17 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + [[package]] name = "hashlink" version = "0.9.1" @@ -2800,7 +3596,7 @@ dependencies = [ [[package]] name = "heartbeats-processor" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "base64 0.22.0", @@ -2810,9 +3606,11 @@ dependencies = [ "firestore", "gcloud-sdk", "mina-p2p-messages", + "mina-tree", "openmina-core", "serde", "serde_json", + "snark", "sqlx", "tokio", ] @@ -2853,6 +3651,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -3031,6 +3835,22 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.9", + "hyper 0.14.27", + "log", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.5" @@ -3042,10 +3862,10 @@ dependencies = [ "hyper 1.5.0", "hyper-util", "rustls 0.23.21", - "rustls-native-certs", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.1", "tower-service", ] @@ -3304,7 +4124,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb892e5777fe09e16f3d44de7802f4daa7267ecbe8c466f19d94e25bb0c303e" dependencies = [ - "async-io", + "async-io 1.13.0", "core-foundation 0.9.3", "fnv", "futures", @@ -3349,12 +4169,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.2", "serde", ] @@ -3380,7 +4200,7 @@ dependencies = [ [[package]] name = "interceptor" version = "0.12.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "async-trait", "bytes", @@ -3399,7 +4219,7 @@ dependencies = [ [[package]] name = "internal-tracing" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" [[package]] name = "io-lifetimes" @@ -3522,7 +4342,7 @@ dependencies = [ "auto_enums", "fnv", "futures", - "indexmap 2.0.2", + "indexmap 2.7.1", "juniper_codegen", "serde", "smartstring", @@ -3578,7 +4398,7 @@ dependencies = [ [[package]] name = "kimchi" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -3611,6 +4431,15 @@ dependencies = [ "turshi", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -3634,7 +4463,7 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "ledger-tool" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "mina-curves", @@ -3970,7 +4799,7 @@ dependencies = [ [[package]] name = "libp2p-rpc-behaviour" -version = "0.14.0" +version = "0.16.0" dependencies = [ "libp2p", "log", @@ -4162,17 +4991,20 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +dependencies = [ + "value-bag", +] [[package]] name = "lru" -version = "0.12.0" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -4264,7 +5096,7 @@ dependencies = [ [[package]] name = "mina-curves" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -4273,7 +5105,7 @@ dependencies = [ [[package]] name = "mina-hasher" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ff", "bitvec", @@ -4319,7 +5151,7 @@ dependencies = [ "strum_macros 0.26.4", "thiserror 1.0.60", "time", - "toml", + "toml 0.5.11", "wasm-bindgen", "wasm-bindgen-test", "web-sys", @@ -4328,7 +5160,7 @@ dependencies = [ [[package]] name = "mina-poseidon" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -4345,7 +5177,7 @@ dependencies = [ [[package]] name = "mina-signer" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -4363,7 +5195,7 @@ dependencies = [ [[package]] name = "mina-transport" -version = "0.14.0" +version = "0.16.0" dependencies = [ "blake2", "hex", @@ -4374,7 +5206,7 @@ dependencies = [ [[package]] name = "mina-tree" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "ark-ec", @@ -4581,6 +5413,15 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -4727,7 +5568,7 @@ dependencies = [ [[package]] name = "node" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "ark-ff", @@ -4785,6 +5626,15 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nom" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" +dependencies = [ + "memchr", +] + [[package]] name = "nu-ansi-term" version = "0.39.0" @@ -4980,7 +5830,7 @@ dependencies = [ [[package]] name = "o1-utils" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -5089,7 +5939,7 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openmina-archive-breadcrumb-compare" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "binprot", @@ -5104,7 +5954,7 @@ dependencies = [ [[package]] name = "openmina-bootstrap-sandbox" -version = "0.14.0" +version = "0.16.0" dependencies = [ "base64 0.22.0", "binprot", @@ -5129,7 +5979,7 @@ dependencies = [ [[package]] name = "openmina-core" -version = "0.14.0" +version = "0.16.0" dependencies = [ "argon2", "ark-ff", @@ -5138,11 +5988,13 @@ dependencies = [ "binprot_derive", "bs58 0.4.0", "crypto_secretbox", + "flume", "graphannis-malloc_size_of", "graphannis-malloc_size_of_derive", "hex", "js-sys", "lazy_static", + "libp2p-identity", "md5", "mina-hasher", "mina-p2p-messages", @@ -5169,7 +6021,7 @@ dependencies = [ [[package]] name = "openmina-fuzzer" -version = "0.14.0" +version = "0.16.0" dependencies = [ "lazy_static", "rand", @@ -5180,7 +6032,7 @@ dependencies = [ [[package]] name = "openmina-gossipsub-sandbox" -version = "0.14.0" +version = "0.16.0" dependencies = [ "bs58 0.5.0", "env_logger", @@ -5194,7 +6046,7 @@ dependencies = [ [[package]] name = "openmina-macros" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "openmina-core", @@ -5207,7 +6059,7 @@ dependencies = [ [[package]] name = "openmina-node-account" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "bs58 0.4.0", @@ -5225,13 +6077,19 @@ dependencies = [ [[package]] name = "openmina-node-common" -version = "0.14.0" +version = "0.16.0" dependencies = [ + "anyhow", "ark-ff", + "aws-config", + "aws-sdk-s3", "binprot", "binprot_derive", + "bitflags 2.8.0", "gloo-timers", "gloo-utils", + "google-cloud-auth", + "google-cloud-storage", "jsonpath-rust", "libp2p-identity", "mina-p2p-messages", @@ -5261,7 +6119,7 @@ dependencies = [ [[package]] name = "openmina-node-invariants" -version = "0.14.0" +version = "0.16.0" dependencies = [ "documented", "lazy_static", @@ -5275,13 +6133,15 @@ dependencies = [ [[package]] name = "openmina-node-native" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "bs58 0.4.0", "bytes", + "dataloader", "derive_more", "getrandom", + "hex", "jsonpath-rust", "juniper", "juniper_warp", @@ -5291,6 +6151,7 @@ dependencies = [ "mina-tree", "nix 0.26.4", "node", + "o1-utils", "openmina-core", "openmina-node-common", "openmina-producer-dashboard", @@ -5313,7 +6174,7 @@ dependencies = [ [[package]] name = "openmina-node-testing" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "axum", @@ -5359,7 +6220,7 @@ dependencies = [ [[package]] name = "openmina-node-web" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "bytes", @@ -5387,7 +6248,7 @@ dependencies = [ [[package]] name = "openmina-producer-dashboard" -version = "0.14.0" +version = "0.16.0" dependencies = [ "bincode", "clap 4.5.20", @@ -5468,27 +6329,44 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "outref" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" + [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" +dependencies = [ + "ecdsa 0.14.8", + "elliptic-curve 0.12.3", + "sha2 0.10.8", +] + [[package]] name = "p256" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.8", ] [[package]] name = "p2p" -version = "0.14.0" +version = "0.16.0" dependencies = [ "aes-gcm 0.10.3", "anyhow", @@ -5502,7 +6380,7 @@ dependencies = [ "cfg-if", "chacha20poly1305 0.10.1", "clap 4.5.20", - "crypto-bigint", + "crypto-bigint 0.5.5", "curve25519-dalek", "datachannel", "derive_more", @@ -5531,6 +6409,7 @@ dependencies = [ "prost-build", "quick-protobuf", "rand", + "rcgen 0.13.1", "redux", "reqwest 0.11.24", "salsa-simple", @@ -5556,7 +6435,7 @@ dependencies = [ [[package]] name = "p2p-testing" -version = "0.14.0" +version = "0.16.0" dependencies = [ "derive_more", "futures", @@ -5584,8 +6463,8 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" dependencies = [ - "ecdsa", - "elliptic-curve", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", "primeorder", "sha2 0.10.8", ] @@ -5661,6 +6540,21 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "pcap" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "499125886165f62fbc0c095ead9189b253f48eb1c5fcab49f81a270f2f220652" +dependencies = [ + "bitflags 1.3.2", + "errno 0.2.8", + "libc", + "libloading", + "pkg-config", + "regex", + "windows-sys 0.36.1", +] + [[package]] name = "pem" version = "1.1.1" @@ -5747,7 +6641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.0.2", + "indexmap 2.7.1", ] [[package]] @@ -5844,15 +6738,36 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand 2.3.0", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der", - "pkcs8", - "spki", + "der 0.7.8", + "pkcs8 0.10.2", + "spki 0.7.3", +] + +[[package]] +name = "pkcs8" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" +dependencies = [ + "der 0.6.1", + "spki 0.6.0", ] [[package]] @@ -5861,8 +6776,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der", - "spki", + "der 0.7.8", + "spki 0.7.3", ] [[package]] @@ -5893,10 +6808,25 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "polling" +version = "3.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite", + "rustix 0.38.42", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "poly-commitment" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ec", "ark-ff", @@ -5973,7 +6903,7 @@ checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "poseidon" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-ff", "mina-curves", @@ -6021,7 +6951,7 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" dependencies = [ - "elliptic-curve", + "elliptic-curve 0.13.8", ] [[package]] @@ -6031,7 +6961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.15", ] [[package]] @@ -6523,6 +7453,12 @@ dependencies = [ "regex-syntax 0.8.2", ] +[[package]] +name = "regex-lite" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" + [[package]] name = "regex-syntax" version = "0.6.29" @@ -6537,7 +7473,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "replay_dynamic_effects" -version = "0.14.0" +version = "0.16.0" dependencies = [ "node", "openmina-node-invariants", @@ -6602,7 +7538,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.5.0", - "hyper-rustls", + "hyper-rustls 0.27.5", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -6616,7 +7552,7 @@ dependencies = [ "pin-project-lite", "quinn 0.11.6", "rustls 0.23.21", - "rustls-native-certs", + "rustls-native-certs 0.8.1", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -6626,7 +7562,7 @@ dependencies = [ "system-configuration 0.6.1", "tokio", "tokio-native-tls", - "tokio-rustls", + "tokio-rustls 0.26.1", "tokio-util", "tower 0.5.2", "tower-service", @@ -6638,6 +7574,21 @@ dependencies = [ "windows-registry", ] +[[package]] +name = "reqwest-middleware" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1ccd3b55e711f91a9885a2fa6fbbb2e39db1776420b062efc058c6410f7e5e3" +dependencies = [ + "anyhow", + "async-trait", + "http 1.1.0", + "reqwest 0.12.12", + "serde", + "thiserror 1.0.60", + "tower-service", +] + [[package]] name = "resolv-conf" version = "0.7.0" @@ -6648,6 +7599,17 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" +dependencies = [ + "crypto-bigint 0.4.9", + "hmac", + "zeroize", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -6728,10 +7690,10 @@ dependencies = [ "num-integer", "num-traits", "pkcs1", - "pkcs8", + "pkcs8 0.10.2", "rand_core", - "signature", - "spki", + "signature 2.1.0", + "spki 0.7.3", "subtle", "zeroize", ] @@ -6780,7 +7742,7 @@ dependencies = [ [[package]] name = "rtcp" version = "0.11.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "bytes", "thiserror 1.0.60", @@ -6805,7 +7767,7 @@ dependencies = [ [[package]] name = "rtp" version = "0.11.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "bytes", "portable-atomic", @@ -6866,7 +7828,7 @@ version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" dependencies = [ - "nom", + "nom 7.1.3", ] [[package]] @@ -6876,7 +7838,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84f3f8f960ed3b5a59055428714943298bf3fa2d4a1d53135084e0544829d995" dependencies = [ "bitflags 1.3.2", - "errno", + "errno 0.3.10", "io-lifetimes", "libc", "linux-raw-sys 0.3.8", @@ -6890,7 +7852,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.8.0", - "errno", + "errno 0.3.10", "libc", "linux-raw-sys 0.4.14", "windows-sys 0.52.0", @@ -6923,6 +7885,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.3", + "schannel", + "security-framework 2.9.2", +] + [[package]] name = "rustls-native-certs" version = "0.8.1" @@ -7036,7 +8010,7 @@ checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "salsa-simple" -version = "0.14.0" +version = "0.16.0" dependencies = [ "generic-array", "hex", @@ -7091,7 +8065,7 @@ dependencies = [ [[package]] name = "sdp" version = "0.6.2" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "rand", "substring", @@ -7099,16 +8073,30 @@ dependencies = [ "url", ] +[[package]] +name = "sec1" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +dependencies = [ + "base16ct 0.1.1", + "der 0.6.1", + "generic-array", + "pkcs8 0.9.0", + "subtle", + "zeroize", +] + [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct", - "der", + "base16ct 0.2.0", + "der 0.7.8", "generic-array", - "pkcs8", + "pkcs8 0.10.2", "subtle", "zeroize", ] @@ -7221,7 +8209,7 @@ version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -7237,6 +8225,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -7269,7 +8266,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.2", + "indexmap 2.7.1", "serde", "serde_derive", "serde_json", @@ -7307,7 +8304,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" dependencies = [ - "base16ct", + "base16ct 0.2.0", "serde", ] @@ -7406,6 +8403,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +dependencies = [ + "digest 0.10.7", + "rand_core", +] + [[package]] name = "signature" version = "2.1.0" @@ -7502,7 +8509,7 @@ dependencies = [ [[package]] name = "snark" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-ec", "ark-ff", @@ -7585,6 +8592,16 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" +dependencies = [ + "base64ct", + "der 0.6.1", +] + [[package]] name = "spki" version = "0.7.3" @@ -7592,7 +8609,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der", + "der 0.7.8", ] [[package]] @@ -7602,7 +8619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" dependencies = [ "itertools 0.12.0", - "nom", + "nom 7.1.3", "unicode_categories", ] @@ -7641,7 +8658,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.0.2", + "indexmap 2.7.1", "log", "memchr", "native-tls", @@ -7920,7 +8937,7 @@ dependencies = [ [[package]] name = "stun" version = "0.6.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "base64 0.21.7", "crc", @@ -7950,6 +8967,16 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "sudo" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bd84d4c082e18e37fef52c0088e4407dabcef19d23a607fb4b5ee03b7d5b83" +dependencies = [ + "libc", + "log", +] + [[package]] name = "syn" version = "0.15.44" @@ -8297,6 +9324,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.1" @@ -8353,11 +9390,26 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.24", +] + [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -8365,9 +9417,22 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.7.1", + "toml_datetime", + "winnow 0.5.40", +] + +[[package]] +name = "toml_edit" +version = "0.22.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +dependencies = [ + "indexmap 2.7.1", + "serde", + "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.7.2", ] [[package]] @@ -8391,11 +9456,11 @@ dependencies = [ "percent-encoding", "pin-project 1.1.5", "prost 0.13.4", - "rustls-native-certs", + "rustls-native-certs 0.8.1", "rustls-pemfile 2.2.0", "socket2 0.5.5", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.1", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -8587,7 +9652,7 @@ dependencies = [ [[package]] name = "transaction_fuzzer" -version = "0.14.0" +version = "0.16.0" dependencies = [ "ark-ec", "ark-ff", @@ -8702,7 +9767,7 @@ checksum = "23d5919d7121237af683b7fa982450597b1eaa2643e597aec3b519e4e5ab3d62" [[package]] name = "turn" version = "0.8.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "async-trait", "base64 0.21.7", @@ -8722,7 +9787,7 @@ dependencies = [ [[package]] name = "turshi" version = "0.1.0" -source = "git+https://github.com/openmina/proof-systems?rev=dec49a9#dec49a95fd483d8a90b7b602d13bfa8ea9485491" +source = "git+https://github.com/openmina/proof-systems?rev=f461b4b#f461b4bf65fe9677f81033ed7c3c4f997caea3fe" dependencies = [ "ark-ff", "hex", @@ -8891,6 +9956,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf-8" version = "0.7.6" @@ -8930,6 +10001,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "value-bag" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" + [[package]] name = "vcpkg" version = "0.2.15" @@ -8968,7 +10045,7 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrf" -version = "0.14.0" +version = "0.16.0" dependencies = [ "anyhow", "ark-ec", @@ -8994,6 +10071,12 @@ dependencies = [ "thiserror 1.0.60", ] +[[package]] +name = "vsimd" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" + [[package]] name = "waitgroup" version = "0.1.2" @@ -9214,7 +10297,7 @@ dependencies = [ [[package]] name = "webrtc" version = "0.11.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "arc-swap", "async-trait", @@ -9257,7 +10340,7 @@ dependencies = [ [[package]] name = "webrtc-data" version = "0.9.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "bytes", "log", @@ -9271,7 +10354,7 @@ dependencies = [ [[package]] name = "webrtc-dtls" version = "0.10.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "aes 0.8.3", "aes-gcm 0.10.3", @@ -9284,7 +10367,7 @@ dependencies = [ "hkdf", "hmac", "log", - "p256", + "p256 0.13.2", "p384", "portable-atomic", "rand", @@ -9292,7 +10375,7 @@ dependencies = [ "rcgen 0.13.1", "ring 0.17.8", "rustls 0.23.21", - "sec1", + "sec1 0.7.3", "serde", "sha1", "sha2 0.10.8", @@ -9307,7 +10390,7 @@ dependencies = [ [[package]] name = "webrtc-ice" version = "0.11.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "arc-swap", "async-trait", @@ -9331,7 +10414,7 @@ dependencies = [ [[package]] name = "webrtc-mdns" version = "0.7.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "log", "socket2 0.5.5", @@ -9343,7 +10426,7 @@ dependencies = [ [[package]] name = "webrtc-media" version = "0.8.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "byteorder", "bytes", @@ -9355,7 +10438,7 @@ dependencies = [ [[package]] name = "webrtc-sctp" version = "0.10.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "arc-swap", "async-trait", @@ -9379,10 +10462,35 @@ dependencies = [ "url", ] +[[package]] +name = "webrtc-sniffer" +version = "0.16.0" +dependencies = [ + "aes 0.8.3", + "aes-gcm 0.10.3", + "cbc", + "clap 4.5.20", + "ctrlc", + "env_logger", + "etherparse", + "hex", + "hkdf", + "hmac", + "log", + "nom 8.0.0", + "p256 0.13.2", + "p384", + "pcap", + "rand", + "sha2 0.10.8", + "sudo", + "thiserror 2.0.11", +] + [[package]] name = "webrtc-srtp" version = "0.13.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "aead 0.5.2", "aes 0.8.3", @@ -9404,7 +10512,7 @@ dependencies = [ [[package]] name = "webrtc-util" version = "0.9.0" -source = "git+https://github.com/openmina/webrtc.git?rev=e8705db39af1b198b324a5db6ff57fb213ba75e9#e8705db39af1b198b324a5db6ff57fb213ba75e9" +source = "git+https://github.com/openmina/webrtc.git?rev=aeaa62682b97f6984627bedd6e6811fe17af18eb#aeaa62682b97f6984627bedd6e6811fe17af18eb" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -9532,6 +10640,19 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -9559,6 +10680,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -9623,6 +10753,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -9641,6 +10777,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -9665,6 +10807,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -9683,6 +10831,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -9719,6 +10873,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -9746,6 +10906,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59690dea168f2198d1a3b0cac23b8063efcd11012f10ae4698f284808c8ef603" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -9799,7 +10968,7 @@ dependencies = [ "data-encoding", "der-parser 8.2.0", "lazy_static", - "nom", + "nom 7.1.3", "oid-registry 0.6.1", "rusticata-macros", "thiserror 1.0.60", @@ -9816,7 +10985,7 @@ dependencies = [ "data-encoding", "der-parser 9.0.0", "lazy_static", - "nom", + "nom 7.1.3", "oid-registry 0.7.1", "ring 0.17.8", "rusticata-macros", @@ -9830,6 +10999,12 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + [[package]] name = "xmltree" version = "0.10.3" diff --git a/Cargo.toml b/Cargo.toml index 889c49c481..de5acf974b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,8 @@ members = [ "tools/fuzzing", "tools/archive-breadcrumb-compare", "tools/heartbeats-processor", + "tools/webrtc-sniffer", + "producer-dashboard", "fuzzer", @@ -48,14 +50,14 @@ mina-p2p-messages = { path = "mina-p2p-messages" } poseidon = { path = "poseidon" } ledger = { path = "ledger", package = "mina-tree" } -mina-hasher = { git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } -mina-signer = { git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } -mina-curves = { git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } -# UNCOMMENTED_IN_CI mina-curves = { git = "https://github.com/openmina/proof-systems", rev = "dec49a9", features = [ "32x9" ] } -o1-utils = { git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } -kimchi = { git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } -mina-poseidon = {git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } -poly-commitment = {git = "https://github.com/openmina/proof-systems", rev = "dec49a9" } +mina-hasher = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } +mina-signer = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } +mina-curves = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } +# UNCOMMENTED_IN_CI mina-curves = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b", features = [ "32x9" ] } +o1-utils = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } +kimchi = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } +mina-poseidon = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } +poly-commitment = { git = "https://github.com/openmina/proof-systems", rev = "f461b4b" } libp2p = { git = "https://github.com/openmina/rust-libp2p", rev = "5c44c7d9", default-features = false } vrf = { path = "vrf" } @@ -90,10 +92,10 @@ incremental = false codegen-units = 1 [patch.crates-io] -ark-ff = { git = "https://github.com/openmina/algebra", rev = "aea157a" } # branch: fix-openmina-webnode -ark-ec = { git = "https://github.com/openmina/algebra", rev = "aea157a" } # branch: fix-openmina-webnode -ark-poly = { git = "https://github.com/openmina/algebra", rev = "aea157a" } # branch: fix-openmina-webnode -ark-serialize = { git = "https://github.com/openmina/algebra", rev = "aea157a" } # branch: fix-openmina-webnode +ark-ff = { git = "https://github.com/openmina/algebra", rev = "150ab8d" } # branch: fix-openmina-webnode +ark-ec = { git = "https://github.com/openmina/algebra", rev = "150ab8d" } # branch: fix-openmina-webnode +ark-poly = { git = "https://github.com/openmina/algebra", rev = "150ab8d" } # branch: fix-openmina-webnode +ark-serialize = { git = "https://github.com/openmina/algebra", rev = "150ab8d" } # branch: fix-openmina-webnode num-bigint = { git = "https://github.com/openmina/num-bigint", rev = "8bb5ee4" } # branch: on-stack num-rational = { git = "https://github.com/openmina/num-rational", rev = "336f11d" } # branch: on-stack diff --git a/Dockerfile b/Dockerfile index 80d7e72f1f..58a13deafd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:buster AS build +FROM rust:bullseye AS build RUN apt-get update && apt-get install -y protobuf-compiler && apt-get clean RUN rustup default 1.84 && rustup component add rustfmt WORKDIR /openmina @@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ RUN git clone --depth 1 https://github.com/openmina/circuit-blobs.git \ && rm -rf circuit-blobs/berkeley_rc1 circuit-blobs/*/tests -FROM debian:buster +FROM debian:bullseye RUN apt-get update && apt-get install -y libjemalloc2 libssl1.1 libpq5 curl jq procps && apt-get clean COPY --from=build /openmina/release-bin/openmina /usr/local/bin/ diff --git a/cli/Cargo.toml b/cli/Cargo.toml index defd502f5d..fa45cf9e58 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cli" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" @@ -38,6 +38,7 @@ nix = { version = "0.26.2", features = ["signal"] } shellexpand = "3.1.0" dialoguer = "0.10.4" serde_json = "1.0.107" +backtrace = "0.3" [target.'cfg(not(target_family = "wasm"))'.dependencies] redux = { workspace = true, features=["serializable_callbacks"] } diff --git a/cli/replay_dynamic_effects/Cargo.toml b/cli/replay_dynamic_effects/Cargo.toml index e226cf2399..e8524e00d9 100644 --- a/cli/replay_dynamic_effects/Cargo.toml +++ b/cli/replay_dynamic_effects/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "replay_dynamic_effects" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/cli/src/commands/build_info/mod.rs b/cli/src/commands/build_info/mod.rs index 99e1889d75..3e7e52ba3f 100644 --- a/cli/src/commands/build_info/mod.rs +++ b/cli/src/commands/build_info/mod.rs @@ -9,6 +9,7 @@ impl Command { let build_env = BuildEnv::get(); println!( r#" +Version: {} Build time: {} Commit SHA: {} Commit time: {} @@ -16,6 +17,7 @@ Commit branch: {} Rustc channel: {} Rustc version: {} "#, + build_env.version, build_env.time, build_env.git.commit_hash, build_env.git.commit_time, diff --git a/cli/src/commands/node/mod.rs b/cli/src/commands/node/mod.rs index 098316bbc1..31f78b16da 100644 --- a/cli/src/commands/node/mod.rs +++ b/cli/src/commands/node/mod.rs @@ -17,7 +17,7 @@ use node::p2p::identity::SecretKey; use node::service::Recorder; use node::SnarkerStrategy; -use openmina_node_native::{tracing, NodeBuilder}; +use openmina_node_native::{archive::config::ArchiveStorageOptions, tracing, NodeBuilder}; /// Openmina node #[derive(Debug, clap::Args)] @@ -138,9 +138,42 @@ pub struct Node { #[arg(short = 'c', long, env)] pub config: Option, - /// Enable archive mode (seding blocks to the archive process). + /// Enable local precomputed storage. + /// + /// This option requires the following environment variables to be set: + /// - OPENMINA_ARCHIVE_LOCAL_STORAGE_PATH (otherwise the path to the working directory will be used) + #[arg(long, env)] + pub archive_local_storage: bool, + + /// Enable archiver process. + /// + /// This requires the following environment variables to be set: + /// - OPENMINA_ARCHIVE_ADDRESS + #[arg(long, env)] + pub archive_archiver_process: bool, + + /// Enable GCP precomputed storage. + /// + /// This requires the following environment variables to be set: + /// - GCP_CREDENTIALS_JSON + /// - GCP_BUCKET_NAME + /// #[arg(long, env)] - pub archive_address: Option, + pub archive_gcp_storage: bool, + + /// Enable AWS precomputed storage. + /// + /// This requires the following environment variables to be set: + /// - AWS_ACCESS_KEY_ID + /// - AWS_SECRET_ACCESS_KEY + /// - AWS_SESSION_TOKEN + /// - AWS_DEFAULT_REGION + /// - OPENMINA_AWS_BUCKET_NAME + #[arg(long, env)] + pub archive_aws_storage: bool, + + #[arg(long, env)] + pub rng_seed: Option, } impl Node { @@ -186,7 +219,27 @@ impl Node { node::config::DEVNET_CONFIG.clone(), ), }; - let mut node_builder: NodeBuilder = NodeBuilder::new(None, daemon_conf, genesis_conf); + + let custom_rng_seed = match self.rng_seed { + None => None, + Some(v) => match hex::decode(v) + .map_err(anyhow::Error::from) + .and_then(|bytes| { + <[u8; 32]>::try_from(bytes.as_slice()).map_err(anyhow::Error::from) + }) { + Ok(v) => Some(v), + Err(err) => { + node::core::error!( + node::core::log::system_time(); + summary = "bad rng seed", + err = err.to_string(), + ); + return Err(err); + } + }, + }; + let mut node_builder: NodeBuilder = + NodeBuilder::new(custom_rng_seed, daemon_conf, genesis_conf); // let genesis_config = match self.config { // Some(config_path) => GenesisConfig::DaemonJsonFile(config_path).into(), @@ -272,23 +325,44 @@ impl Node { } } - if let Some(address) = self.archive_address { - openmina_core::IS_ARCHIVE - .set(true) - .expect("IS_ARCHIVE already set"); + let archive_storage_options = ArchiveStorageOptions::from_iter( + [ + ( + self.archive_local_storage, + ArchiveStorageOptions::LOCAL_PRECOMPUTED_STORAGE, + ), + ( + self.archive_archiver_process, + ArchiveStorageOptions::ARCHIVER_PROCESS, + ), + ( + self.archive_gcp_storage, + ArchiveStorageOptions::GCP_PRECOMPUTED_STORAGE, + ), + ( + self.archive_aws_storage, + ArchiveStorageOptions::AWS_PRECOMPUTED_STORAGE, + ), + ] + .iter() + .filter(|(enabled, _)| *enabled) + .map(|(_, option)| option.clone()), + ); + + if archive_storage_options.is_enabled() { node::core::info!( summary = "Archive mode enabled", - address = address.to_string() + local_storage = archive_storage_options.uses_local_precomputed_storage(), + archiver_process = archive_storage_options.uses_archiver_process(), + gcp_storage = archive_storage_options.uses_gcp_precomputed_storage(), + aws_storage = archive_storage_options.uses_aws_precomputed_storage(), ); - // Convert URL to SocketAddr - let socket_addrs = address.socket_addrs(|| None).expect("Invalid URL"); - let socket_addr = socket_addrs.first().expect("No socket address found"); - node_builder.archive(*socket_addr); - } else { - openmina_core::IS_ARCHIVE - .set(false) - .expect("IS_ARCHIVE already set"); + archive_storage_options + .validate_env_vars() + .map_err(|e| anyhow::anyhow!(e))?; + + node_builder.archive(archive_storage_options, work_dir.clone()); } if let Some(sec_key) = self.run_snarker { diff --git a/cli/src/commands/replay/replay_state_with_input_actions.rs b/cli/src/commands/replay/replay_state_with_input_actions.rs index 637f378e2a..d5271eaa81 100644 --- a/cli/src/commands/replay/replay_state_with_input_actions.rs +++ b/cli/src/commands/replay/replay_state_with_input_actions.rs @@ -10,6 +10,9 @@ pub struct ReplayStateWithInputActions { #[arg(long, default_value = "./target/release/libreplay_dynamic_effects.so")] pub dynamic_effects_lib: String, + #[arg(long)] + pub ignore_mismatch: bool, + /// Verbosity level #[arg(long, short, default_value = "info")] pub verbosity: tracing::Level, @@ -30,13 +33,22 @@ impl ReplayStateWithInputActions { } }; - replay_state_with_input_actions(&dir, dynamic_effects_lib, check_build_env)?; + replay_state_with_input_actions( + &dir, + dynamic_effects_lib, + self.ignore_mismatch, + check_build_env, + )?; Ok(()) } } -pub fn check_build_env(record_env: &BuildEnv, replay_env: &BuildEnv) -> anyhow::Result<()> { +pub fn check_build_env( + record_env: &BuildEnv, + replay_env: &BuildEnv, + ignore_mismatch: bool, +) -> anyhow::Result<()> { let is_git_same = record_env.git.commit_hash == replay_env.git.commit_hash; let is_cargo_same = record_env.cargo == replay_env.cargo; let is_rustc_same = record_env.rustc == replay_env.rustc; @@ -47,7 +59,8 @@ pub fn check_build_env(record_env: &BuildEnv, replay_env: &BuildEnv) -> anyhow:: record_env.git, replay_env.git ); let msg = format!("git build env mismatch!\n{diff}"); - if console::user_attended() { + if ignore_mismatch { + } else if console::user_attended() { use dialoguer::Confirm; let prompt = format!("{msg}\nDo you want to continue?"); diff --git a/cli/src/main.rs b/cli/src/main.rs index 9473803c5f..67b73af73d 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,3 +1,6 @@ +use backtrace::Backtrace; +use std::panic::PanicHookInfo; + #[cfg(not(target_arch = "wasm32"))] use tikv_jemallocator::Jemalloc; @@ -56,8 +59,72 @@ fn setup_var_from_single_and_only_thread() { } } -fn main() -> anyhow::Result<()> { +/// Mimic default hook: +/// https://github.com/rust-lang/rust/blob/5986ff05d8480da038dd161b3a6aa79ff364a851/library/std/src/panicking.rs#L246 +/// +/// Unlike the default hook, this one allocates. +/// We store (+ display) panics in non-main threads, and display them all when the main thread panics. +#[cfg(not(target_family = "wasm"))] +fn new_hook(info: &PanicHookInfo<'_>) { + use std::any::Any; + use std::io::Write; + + fn payload_as_str(payload: &dyn Any) -> &str { + if let Some(&s) = payload.downcast_ref::<&'static str>() { + s + } else if let Some(s) = payload.downcast_ref::() { + s.as_str() + } else { + "Box" + } + } + + static PREVIOUS_PANICS: std::sync::Mutex>> = + const { std::sync::Mutex::new(Vec::new()) }; + + let mut s: Vec = Vec::with_capacity(64 * 1024); + let backtrace = Backtrace::new(); + + let current = std::thread::current(); + let name = current.name().unwrap_or(""); + let location = info.location().unwrap(); + let msg = payload_as_str(info.payload()); + + let _ = writeln!(&mut s, "\nthread '{name}' panicked at {location}:\n{msg}"); + let _ = writeln!(&mut s, "{:#?}", &backtrace); + + eprintln!("{}", String::from_utf8_lossy(&s)); + + if name != "main" { + let Ok(mut previous) = PREVIOUS_PANICS.lock() else { + return; + }; + // Make sure we don't store too many panics + if previous.len() < 256 { + previous.push(s); + eprintln!("Saved panic from thread '{name}'"); + } else { + eprintln!("Panic from thread '{name}' not saved !"); + } + } else { + let Ok(previous) = PREVIOUS_PANICS.lock() else { + return; + }; + eprintln!("\nNumber of panics from others threads: {}", previous.len()); + for panic in previous.iter() { + eprintln!("{}", String::from_utf8_lossy(panic)); + } + } +} + +fn early_setup() { setup_var_from_single_and_only_thread(); + #[cfg(not(target_family = "wasm"))] + std::panic::set_hook(Box::new(new_hook)); +} + +fn main() -> anyhow::Result<()> { + early_setup(); #[cfg(feature = "unsafe-signal-handlers")] unsafe_signal_handlers::setup(); diff --git a/core/Cargo.toml b/core/Cargo.toml index 2ba5c357db..a60bea43b5 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-core" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" @@ -17,6 +17,7 @@ binprot_derive = { git = "https://github.com/openmina/binprot-rs", rev = "400b52 rand = "0.8.0" redux = { workspace = true } tokio = { version = "1.26", features = ["sync"] } +flume = { version = "0.11.1", features = ["async", "spin"] } time = { version = "0.3", features = ["formatting", "macros", "parsing"] } md5 = "0.7.0" multihash = { version = "0.18.1", features = ["blake2b"] } @@ -36,6 +37,10 @@ mina-p2p-messages = { workspace = true } poseidon = { workspace = true } hex = "0.4.3" ark-ff = { workspace = true } +libp2p-identity = { version = "=0.2.7", features = [ + "serde", + "peerid" +] } [target.'cfg(not(target_family = "wasm"))'.dependencies] redux = { workspace = true, features = ["serializable_callbacks"] } diff --git a/core/src/block/block_with_hash.rs b/core/src/block/block_with_hash.rs index 17b0a262fc..14108cc35a 100644 --- a/core/src/block/block_with_hash.rs +++ b/core/src/block/block_with_hash.rs @@ -61,6 +61,10 @@ impl> BlockWithHash { global_slot(self.header()) } + pub fn slot(&self) -> u32 { + slot(self.header()) + } + pub fn global_slot_since_genesis(&self) -> u32 { global_slot_since_genesis(self.header()) } @@ -149,6 +153,10 @@ impl> BlockWithHash { ) -> Box> { self.body().completed_works_iter() } + + pub fn block_stake_winner(&self) -> &v2::NonZeroCurvePoint { + block_stake_winner(self.header()) + } } impl> BlockHeaderWithHash { @@ -176,6 +184,10 @@ impl> BlockHeaderWithHash { global_slot(self.header()) } + pub fn slot(&self) -> u32 { + slot(self.header()) + } + pub fn global_slot_since_genesis(&self) -> u32 { global_slot_since_genesis(self.header()) } @@ -227,6 +239,10 @@ impl> BlockHeaderWithHash { pub fn staged_ledger_hashes(&self) -> &v2::MinaBaseStagedLedgerHashStableV1 { staged_ledger_hashes(self.header()) } + + pub fn block_stake_winner(&self) -> &v2::NonZeroCurvePoint { + block_stake_winner(self.header()) + } } fn consensus_state( @@ -243,6 +259,15 @@ fn global_slot(header: &BlockHeader) -> u32 { consensus_state(header).global_slot() } +fn slot(header: &BlockHeader) -> u32 { + let slot_struct = &consensus_state(header).curr_global_slot_since_hard_fork; + slot_struct + .slot_number + .as_u32() + .checked_rem(slot_struct.slots_per_epoch.as_u32()) + .expect("division by zero") +} + fn global_slot_since_genesis(header: &BlockHeader) -> u32 { consensus_state(header).global_slot_since_genesis.as_u32() } @@ -318,3 +343,11 @@ fn staged_ledger_hashes(header: &BlockHeader) -> &v2::MinaBaseStagedLedgerHashSt .blockchain_state .staged_ledger_hash } + +fn block_stake_winner(header: &BlockHeader) -> &v2::NonZeroCurvePoint { + &header + .protocol_state + .body + .consensus_state + .block_stake_winner +} diff --git a/core/src/block/mod.rs b/core/src/block/mod.rs index 4b430e4120..0cef7c0bf7 100644 --- a/core/src/block/mod.rs +++ b/core/src/block/mod.rs @@ -4,6 +4,8 @@ pub use block_with_hash::{BlockHeaderWithHash, BlockWithHash}; mod applied_block; pub use applied_block::AppliedBlock; +pub mod prevalidate; + pub mod genesis; use std::sync::Arc; diff --git a/core/src/block/prevalidate.rs b/core/src/block/prevalidate.rs new file mode 100644 index 0000000000..8442145b2b --- /dev/null +++ b/core/src/block/prevalidate.rs @@ -0,0 +1,120 @@ +use serde::{Deserialize, Serialize}; + +use super::ArcBlockWithHash; +use crate::constants::PROTOCOL_VERSION; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum BlockPrevalidationError { + GenesisNotReady, + ReceivedTooEarly { + current_global_slot: u32, + block_global_slot: u32, + }, + ReceivedTooLate { + current_global_slot: u32, + block_global_slot: u32, + delta: u32, + }, + InvalidGenesisProtocolState, + InvalidProtocolVersion, + MismatchedProtocolVersion, + ConsantsMismatch, + InvalidDeltaBlockChainProof, +} + +impl BlockPrevalidationError { + pub fn is_forever_invalid(&self) -> bool { + !matches!(self, Self::ReceivedTooEarly { .. }) + } +} + +pub fn validate_block_timing( + block: &ArcBlockWithHash, + genesis: &ArcBlockWithHash, + cur_global_slot: u32, + allow_block_too_late: bool, +) -> Result<(), BlockPrevalidationError> { + let block_global_slot = block.global_slot(); + let delta = genesis.constants().delta.as_u32(); + + if cur_global_slot < block_global_slot { + return Err(BlockPrevalidationError::ReceivedTooEarly { + current_global_slot: cur_global_slot, + block_global_slot, + }); + } else if !allow_block_too_late && cur_global_slot.saturating_sub(block_global_slot) > delta { + return Err(BlockPrevalidationError::ReceivedTooLate { + current_global_slot: cur_global_slot, + block_global_slot, + delta, + }); + } + + Ok(()) +} + +pub fn validate_genesis_state( + block: &ArcBlockWithHash, + genesis: &ArcBlockWithHash, +) -> Result<(), BlockPrevalidationError> { + if block.header().genesis_state_hash() != genesis.hash() { + return Err(BlockPrevalidationError::InvalidGenesisProtocolState); + } + Ok(()) +} + +pub fn validate_protocol_versions(block: &ArcBlockWithHash) -> Result<(), BlockPrevalidationError> { + let min_transaction_version = 1.into(); + let v = &block.header().current_protocol_version; + let nv = block + .header() + .proposed_protocol_version_opt + .as_ref() + .unwrap_or(v); + + // Our version values are unsigned, so there is no need to check that the + // other parts are not negative. + let valid = + v.transaction >= min_transaction_version && nv.transaction >= min_transaction_version; + if !valid { + return Err(BlockPrevalidationError::InvalidProtocolVersion); + } + + let compatible = + v.transaction == PROTOCOL_VERSION.transaction && v.network == PROTOCOL_VERSION.network; + if !compatible { + return Err(BlockPrevalidationError::MismatchedProtocolVersion); + } + + Ok(()) +} + +pub fn validate_constants( + block: &ArcBlockWithHash, + genesis: &ArcBlockWithHash, +) -> Result<(), BlockPrevalidationError> { + // NOTE: currently these cannot change between blocks, but that + // may not always be true? + if block.constants() != genesis.constants() { + return Err(BlockPrevalidationError::ConsantsMismatch); + } + Ok(()) +} + +pub fn prevalidate_block( + block: &ArcBlockWithHash, + genesis: &ArcBlockWithHash, + cur_global_slot: u32, + allow_block_too_late: bool, +) -> Result<(), BlockPrevalidationError> { + validate_block_timing(block, genesis, cur_global_slot, allow_block_too_late)?; + validate_genesis_state(block, genesis)?; + validate_protocol_versions(block)?; + validate_constants(block, genesis)?; + + // TODO(tizoc): check for InvalidDeltaBlockChainProof + // https://github.com/MinaProtocol/mina/blob/d800da86a764d8d37ffb8964dd8d54d9f522b358/src/lib/mina_block/validation.ml#L369 + // https://github.com/MinaProtocol/mina/blob/d800da86a764d8d37ffb8964dd8d54d9f522b358/src/lib/transition_chain_verifier/transition_chain_verifier.ml + + Ok(()) +} diff --git a/core/src/channels.rs b/core/src/channels.rs index ddc93486ae..bfd032b2ce 100644 --- a/core/src/channels.rs +++ b/core/src/channels.rs @@ -1 +1,211 @@ -pub use tokio::sync::{broadcast, mpsc, oneshot, watch}; +pub use tokio::sync::oneshot; + +pub mod mpsc { + use std::sync::{Arc, Weak}; + + pub use flume::{SendError, TryRecvError, TrySendError}; + + pub type RecvStream = flume::r#async::RecvStream<'static, T>; + + pub struct Sender(flume::Sender); + pub struct Receiver(flume::Receiver); + + pub struct UnboundedSender(flume::Sender, Arc<()>); + pub struct UnboundedReceiver(flume::Receiver); + + pub type TrackedUnboundedSender = UnboundedSender>; + pub type TrackedUnboundedReceiver = UnboundedReceiver>; + + #[allow(dead_code)] + pub struct Tracked(pub T, pub Tracker); + #[allow(dead_code)] + pub struct Tracker(Weak<()>); + + impl std::fmt::Debug for UnboundedSender { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?} (len: {})", self.0, self.len()) + } + } + + impl std::fmt::Debug for UnboundedReceiver { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?} (len: {})", self.0, self.len()) + } + } + + impl Clone for Sender { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + + impl Clone for UnboundedSender { + fn clone(&self) -> Self { + Self(self.0.clone(), self.1.clone()) + } + } + + impl std::ops::Deref for Tracked { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl std::ops::DerefMut for Tracked { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + + impl Sender { + pub async fn send(&self, message: T) -> Result<(), SendError> { + self.0.send_async(message).await + } + + pub fn try_send(&self, message: T) -> Result<(), TrySendError> { + self.0.try_send(message) + } + } + + impl Receiver { + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub async fn recv(&mut self) -> Option { + self.0.recv_async().await.ok() + } + + pub fn try_recv(&mut self) -> Result { + self.0.try_recv() + } + } + + impl UnboundedSender { + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn len(&self) -> usize { + Arc::weak_count(&self.1) + } + + pub fn send(&self, message: T) -> Result<(), SendError> { + self.0.send(message) + } + } + + impl TrackedUnboundedSender { + pub fn tracked_send(&self, message: T) -> Result<(), SendError> { + let msg = Tracked(message, Tracker(Arc::downgrade(&self.1))); + self.send(msg).map_err(|err| SendError(err.0 .0)) + } + } + + impl UnboundedReceiver { + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub async fn recv(&mut self) -> Option { + self.0.recv_async().await.ok() + } + + pub fn try_recv(&mut self) -> Result { + self.0.try_recv() + } + + pub fn stream(&self) -> RecvStream { + self.0.clone().into_stream() + } + + pub fn blocking_recv(&mut self) -> Option { + self.0.recv().ok() + } + } + + pub fn channel(bound: usize) -> (Sender, Receiver) { + let (tx, rx) = flume::bounded(bound); + + (Sender(tx), Receiver(rx)) + } + + pub fn unbounded_channel() -> (UnboundedSender, UnboundedReceiver) { + let (tx, rx) = flume::unbounded(); + + (UnboundedSender(tx, Arc::new(())), UnboundedReceiver(rx)) + } + + pub fn tracked_unbounded_channel( + ) -> (UnboundedSender>, UnboundedReceiver>) { + let (tx, rx) = flume::unbounded(); + + (UnboundedSender(tx, Arc::new(())), UnboundedReceiver(rx)) + } +} + +pub mod broadcast { + pub use tokio::sync::broadcast::*; + + #[deprecated(note = "don't use across threads as it can cause panic in WASM")] + #[inline(always)] + pub fn channel(capacity: usize) -> (Sender, Receiver) { + tokio::sync::broadcast::channel(capacity) + } +} + +pub mod watch { + pub use tokio::sync::watch::*; + + #[deprecated(note = "don't use across threads as it can cause panic in WASM")] + #[inline(always)] + pub fn channel(init: T) -> (Sender, Receiver) { + tokio::sync::watch::channel(init) + } +} + +#[allow(dead_code)] +pub struct Aborter(flume::Receiver<()>, flume::Sender<()>); + +#[derive(Clone)] +pub struct Aborted(flume::Sender<()>); + +impl Default for Aborter { + fn default() -> Self { + let (tx, rx) = flume::bounded(0); + Self(rx, tx) + } +} + +impl Aborter { + pub fn listener_count(&self) -> usize { + self.0.sender_count().saturating_sub(1) + } + + /// Simply drops the object. No need to call manually, unless you + /// temporarily have to retain object for some reason. + pub fn abort_mut(&mut self) { + std::mem::take(self); + } + + pub fn aborted(&self) -> Aborted { + Aborted(self.1.clone()) + } +} + +impl Aborted { + pub async fn wait(&self) { + // it returning an error means receiver was dropped + while self.0.send_async(()).await.is_ok() {} + } +} diff --git a/core/src/consensus.rs b/core/src/consensus.rs index bd149eba74..169ac9f752 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -2,6 +2,7 @@ use mina_p2p_messages::v2::{ self, BlockTimeTimeStableV1, ConsensusProofOfStakeDataConsensusStateValueStableV2 as MinaConsensusState, StateHash, }; +use redux::Timestamp; use serde::{Deserialize, Serialize}; use time::{macros::format_description, OffsetDateTime}; @@ -30,6 +31,15 @@ pub enum ConsensusLongRangeForkDecisionReason { StateHash, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusTime { + pub start_time: Timestamp, + pub end_time: Timestamp, + pub epoch: u32, + pub global_slot: u32, + pub slot: u32, +} + // TODO(binier): do we need to verify constants? Probably they are verified // using block proof verification, but check just to be sure. pub fn is_short_range_fork(a: &MinaConsensusState, b: &MinaConsensusState) -> bool { diff --git a/core/src/lib.rs b/core/src/lib.rs index 967b6276ff..0752c471b7 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -17,13 +17,13 @@ pub mod constants; pub mod dummy; pub mod block; +pub mod p2p; pub mod snark; pub mod transaction; pub mod consensus; mod substate; -use std::sync::OnceLock; pub use substate::{Substate, SubstateAccess, SubstateResult}; @@ -36,9 +36,6 @@ pub use chain_id::*; pub mod encrypted_key; pub use encrypted_key::*; -// FIXME(#1043): refactor -pub static IS_ARCHIVE: OnceLock = OnceLock::new(); - mod work_dir { use once_cell::sync::OnceCell; use std::path::PathBuf; diff --git a/core/src/p2p.rs b/core/src/p2p.rs new file mode 100644 index 0000000000..09463b9543 --- /dev/null +++ b/core/src/p2p.rs @@ -0,0 +1,16 @@ +/// TODO: These types and methods should be moved to `p2p` crate, they are here because they are used in `snark` crates callbacks +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)] +pub struct P2pNetworkPubsubMessageCacheId { + pub source: libp2p_identity::PeerId, + pub seqno: u64, +} + +impl P2pNetworkPubsubMessageCacheId { + pub fn to_raw_bytes(&self) -> Vec { + let mut message_id = self.source.to_base58(); + message_id.push_str(&self.seqno.to_string()); + message_id.into_bytes() + } +} diff --git a/core/src/snark/snark.rs b/core/src/snark/snark.rs index 51b61ebd3d..6ca66e068f 100644 --- a/core/src/snark/snark.rs +++ b/core/src/snark/snark.rs @@ -77,6 +77,16 @@ impl From for Snark { } } +impl From for TransactionSnarkWorkTStableV2 { + fn from(value: Snark) -> Self { + Self { + fee: value.fee, + proofs: value.proofs.as_ref().clone(), + prover: value.snarker, + } + } +} + impl From for Snark { fn from(value: NetworkPoolSnarkPoolDiffVersionedStableV2AddSolvedWork1) -> Self { Self { diff --git a/core/src/snark/snark_job_id.rs b/core/src/snark/snark_job_id.rs index 6cec218b71..9b4edab7ce 100644 --- a/core/src/snark/snark_job_id.rs +++ b/core/src/snark/snark_job_id.rs @@ -145,7 +145,7 @@ mod tests { #[test] fn test_snark_job_id_to_string_from_string() { - let s = "jw9nPCs68UNaKaLZwV6QzdswKWomwQxvTgrpmKWmnFJyswnrn4N:jwhHYWzvJG8esmqtYXbUZy3UGbLSjhKvn1FSxBGL1JDFHqbHMJc->jwiLuRrEqNgASgXEqibGs4VqKwSwiuFEtuPD53v8hiTtVuLfmTr:jwhHYWzvJG8esmqtYXbUZy3UGbLSjhKvn1FSxBGL1JDFHqbHMJc"; + let s = "jw9nPCs68UNaKaLZwV6QzdswKWomwQxvTgrpmKWmnFJyswnrn4N_jwhHYWzvJG8esmqtYXbUZy3UGbLSjhKvn1FSxBGL1JDFHqbHMJc-jwiLuRrEqNgASgXEqibGs4VqKwSwiuFEtuPD53v8hiTtVuLfmTr_jwhHYWzvJG8esmqtYXbUZy3UGbLSjhKvn1FSxBGL1JDFHqbHMJc"; let decoded = SnarkJobId::from_str(s).unwrap(); assert_eq!(decoded.to_string(), s); } diff --git a/core/src/transaction/mod.rs b/core/src/transaction/mod.rs index 4a4a9e5938..636c5ab18f 100644 --- a/core/src/transaction/mod.rs +++ b/core/src/transaction/mod.rs @@ -5,3 +5,36 @@ mod transaction_with_hash; pub use transaction_with_hash::*; pub use mina_p2p_messages::v2::{MinaBaseUserCommandStableV2 as Transaction, TransactionHash}; + +use crate::{p2p::P2pNetworkPubsubMessageCacheId, requests::RpcId}; + +/// TODO: Types and methods bellow, should be moved to `node` crate, they are here because they are used in `snark` crates callbacks +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, Copy, Default)] +pub enum TransactionPoolMessageSource { + Rpc { + id: RpcId, + }, + Pubsub { + id: P2pNetworkPubsubMessageCacheId, + }, + #[default] + None, +} + +impl TransactionPoolMessageSource { + pub fn rpc(id: RpcId) -> Self { + Self::Rpc { id } + } + + pub fn pubsub(id: P2pNetworkPubsubMessageCacheId) -> Self { + Self::Pubsub { id } + } + + pub fn is_sender_local(&self) -> bool { + matches!(self, Self::Rpc { .. }) + } + + pub fn is_libp2p(&self) -> bool { + matches!(self, Self::Pubsub { .. }) + } +} diff --git a/docker-compose.archive.devnet.compare.yml b/docker-compose.archive.devnet.compare.yml index 433df964ed..f43efd5e76 100644 --- a/docker-compose.archive.devnet.compare.yml +++ b/docker-compose.archive.devnet.compare.yml @@ -137,9 +137,11 @@ services: node-openmina: image: adrnagy/openmina:archive-test container_name: node-openmina + environment: + OPENMINA_ARCHIVE_ADDRESS: http://archive-openmina:3087 command: > node - --archive-address http://archive-openmina:3087 + --archive-archiver-process ports: - "3000:3000" depends_on: diff --git a/docker-compose.archive.devnet.yml b/docker-compose.archive.devnet.yml index 6175c5b65c..a94a95bce6 100644 --- a/docker-compose.archive.devnet.yml +++ b/docker-compose.archive.devnet.yml @@ -63,9 +63,11 @@ services: node-openmina: image: openmina/openmina:latest container_name: node-openmina + environment: + - OPENMINA_ARCHIVE_ADDRESS=http://archive-openmina:3086 command: > node - --archive-address http://archive-openmina:3086 + --archive-archiver-process ports: - "127.0.0.1:3000:3000" depends_on: diff --git a/docker-compose.local.producers.yml b/docker-compose.local.producers.yml index 9fff9d1a7f..2fc57ef5c4 100644 --- a/docker-compose.local.producers.yml +++ b/docker-compose.local.producers.yml @@ -1,7 +1,7 @@ services: local-producer-cluster: container_name: local-producer-cluster - image: openmina/openmina:0.14.0 + image: openmina/openmina:0.16.0 environment: - RUST_BACKTRACE=1 entrypoint: ["openmina-node-testing", "scenarios-generate", "--name", "simulation-small-forever-real-time"] @@ -12,7 +12,7 @@ services: frontend: container_name: frontend - image: openmina/frontend:0.14.0 + image: openmina/frontend:0.16.0 environment: OPENMINA_FRONTEND_ENVIRONMENT: block-producers ports: diff --git a/docs/archive-node-guide.md b/docs/archive-node-guide.md index 35f60820f0..0bae18dada 100644 --- a/docs/archive-node-guide.md +++ b/docs/archive-node-guide.md @@ -2,15 +2,61 @@ This guide is intended for setting up archive nodes on **Mina Devnet** only. Do not use this guide for Mina Mainnet +## Archive Mode Configuration + +We start archive mode in openmina by setting one of the following flags along with their associated environment variables: + +### Archiver Process (`--archive-archiver-process`) + +Stores blocks in a database by receiving them directly from the openmina node + +**Required Environment Variables**: +- `OPENMINA_ARCHIVE_ADDRESS`: Network address for the archiver service + +### Local Storage (`--archive-local-storage`) + +Stores blocks in the local filesystem + +**Required Environment Variables**: +- (None) + +**Optional Environment Variables**: +- `OPENMINA_ARCHIVE_LOCAL_STORAGE_PATH`: Custom path for block storage (default: ~/.openmina/archive-precomputed) + +### GCP Storage (`--archive-gcp-storage`) + +Uploads blocks to a Google Cloud Platform bucket + +**Required Environment Variables**: +- `GCP_CREDENTIALS_JSON`: Service account credentials JSON +- `GCP_BUCKET_NAME`: Target storage bucket name + +### AWS Storage (`--archive-aws-storage`) + +Uploads blocks to an AWS S3 bucket + +**Required Environment Variables**: +- `AWS_ACCESS_KEY_ID`: IAM user access key +- `AWS_SECRET_ACCESS_KEY`: IAM user secret key +- `AWS_DEFAULT_REGION`: AWS region name +- `AWS_SESSION_TOKEN`: Temporary session token for temporary credentials +- `OPENMINA_AWS_BUCKET_NAME`: Target S3 bucket name + +## Redundancy + +The archive mode is designed to be redundant. We can combine the flags to have multiple options running simultaneously. + ## Prerequisites Ensure Docker and Docker Compose are installed on your system - [Docker Installation Guide](./docker-installation.md) -## Docker compose setup +## Docker compose setup (with archiver process) + +The compose file sets up a PG database, the archiver process and the openmina node. The archiver process is responsible for storing the blocks in the database by receiving the blocks from the openmina node. -The compose file sets up a PG database, the archiver process and the openmina node. The archiver process is responsible for storing the blocks in the database by receiving the blocks from the openmina node. We start the archive mode in openmina by setting the `--archive-mode` flag to the address fo archiver process. See [docker-compose.archive.devnet.yml](../docker-compose.archive.devnet.yml) for more details. +See [docker-compose.archive.devnet.yml](../docker-compose.archive.devnet.yml) for more details. -## Starting the setup +### Starting the setup ```bash docker compose -f docker-compose.archive.devnet.yml up -d diff --git a/frontend/cypress/e2e/block-production/won-slots/side-panel.cy.ts b/frontend/cypress/e2e/block-production/won-slots/side-panel.cy.ts index ae20b18205..9ab26f6571 100644 --- a/frontend/cypress/e2e/block-production/won-slots/side-panel.cy.ts +++ b/frontend/cypress/e2e/block-production/won-slots/side-panel.cy.ts @@ -101,7 +101,6 @@ describe('BLOCK PRODUCTION WON SLOTS SIDE PANEL', () => { .then((state: BlockProductionWonSlotsState) => { expect(state.activeSlot.globalSlot).to.equal(expectedActiveSlot.globalSlot); expect(state.activeSlot.height).to.equal(expectedActiveSlot.height); - console.log(expectedActiveSlot.times); }) .get('mina-block-production-won-slots-side-panel .percentage') .should('have.text', ([ diff --git a/frontend/functions/.gitignore b/frontend/functions/.gitignore index 5d00677939..0f8bf124a0 100644 --- a/frontend/functions/.gitignore +++ b/frontend/functions/.gitignore @@ -2,3 +2,4 @@ node_modules/ *.local coverage/ lib/ +allowed_keys.txt diff --git a/frontend/functions/build.js b/frontend/functions/build.js index 485733fd6b..a78f673dc3 100644 --- a/frontend/functions/build.js +++ b/frontend/functions/build.js @@ -10,7 +10,7 @@ if (fs.existsSync(keysFilePath)) { .map(key => key.trim()) .filter(key => key.length > 0); - const validatorFilePath = path.resolve(__dirname, 'functions/submitterValidator.ts'); + const validatorFilePath = path.resolve(__dirname, 'lib/submitterValidator.js'); let validatorFileContent = fs.readFileSync(validatorFilePath, 'utf-8'); const keysSetString = keys.map(key => `'${key}'`).join(',\n '); diff --git a/frontend/functions/package-lock.json b/frontend/functions/package-lock.json index 63bbe4fb94..ef47b0232e 100644 --- a/frontend/functions/package-lock.json +++ b/frontend/functions/package-lock.json @@ -9,7 +9,7 @@ "blake2": "^5.0.0", "bs58check": "^3.0.1", "firebase-admin": "^12.1.0", - "firebase-functions": "^6.2.0", + "firebase-functions": "^6.3.2", "mina-signer": "^3.0.7" }, "devDependencies": { @@ -2898,9 +2898,9 @@ } }, "node_modules/firebase-functions": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/firebase-functions/-/firebase-functions-6.2.0.tgz", - "integrity": "sha512-vfyyVHS8elxplzEQ9To+NaINRPFUsDasQrasTa2eFJBYSPzdhkw6rwLmvwyYw622+ze+g4sDIb14VZym+afqXQ==", + "version": "6.3.2", + "resolved": "https://registry.npmjs.org/firebase-functions/-/firebase-functions-6.3.2.tgz", + "integrity": "sha512-FC3A1/nhqt1ZzxRnj5HZLScQaozAcFSD/vSR8khqSoFNOfxuXgwJS6ZABTB7+v+iMD5z6Mmxw6OfqITUBuI7OQ==", "license": "MIT", "dependencies": { "@types/cors": "^2.8.5", diff --git a/frontend/functions/package.json b/frontend/functions/package.json index 8d76d54827..3c12705b8f 100644 --- a/frontend/functions/package.json +++ b/frontend/functions/package.json @@ -9,7 +9,7 @@ "logs": "firebase functions:log", "test": "jest", "test:watch": "jest --watch", - "build": "node build.js && tsc -p tsconfig.json", + "build": "tsc -p tsconfig.json && node build.js", "build:watch": "tsc --watch" }, "engines": { @@ -21,7 +21,7 @@ "blake2": "^5.0.0", "bs58check": "^3.0.1", "firebase-admin": "^12.1.0", - "firebase-functions": "^6.2.0", + "firebase-functions": "^6.3.2", "mina-signer": "^3.0.7" }, "devDependencies": { diff --git a/frontend/functions/src/index.ts b/frontend/functions/src/index.ts index bcf9751b12..9f303ec135 100644 --- a/frontend/functions/src/index.ts +++ b/frontend/functions/src/index.ts @@ -23,8 +23,9 @@ const minaClient = new Client({ network: 'testnet' }); admin.initializeApp(); -// Rate limit duration between heartbeats from the same submitter (15 seconds) -const HEARTBEAT_RATE_LIMIT_MS = 15000; +// Rate limit configuration: sliding window +const WINDOW_SIZE_MS = 60000; // 1 minute window +const MAX_REQUESTS_PER_WINDOW = 6; function validateSignature( data: string, @@ -96,21 +97,36 @@ export const handleValidationAndStore = onCall( const newHeartbeatRef = db.collection('heartbeats').doc(); await db.runTransaction(async (transaction) => { - const doc = await transaction.get(rateLimitRef); + const rateLimitDoc = await transaction.get(rateLimitRef); const now = Date.now(); - const cutoff = now - HEARTBEAT_RATE_LIMIT_MS; + const windowStart = now - WINDOW_SIZE_MS; - if (doc.exists) { - const lastCall = doc.data()?.['lastCall']; - if (lastCall > cutoff) { + if (rateLimitDoc.exists) { + const data = rateLimitDoc.data(); + const previousTimestamps: number[] = data?.timestamps || []; + const currentWindowTimestamps = previousTimestamps.filter(ts => ts > windowStart); + + currentWindowTimestamps.push(now); + + if (currentWindowTimestamps.length > MAX_REQUESTS_PER_WINDOW) { throw new functions.https.HttpsError( 'resource-exhausted', - 'Rate limit exceeded for this public key', + 'Rate limit exceeded', ); } + + transaction.set(rateLimitRef, { + timestamps: currentWindowTimestamps, + lastCall: FieldValue.serverTimestamp(), + }); + } else { + // First request for this public key + transaction.set(rateLimitRef, { + timestamps: [now], + lastCall: FieldValue.serverTimestamp(), + }); } - transaction.set(rateLimitRef, { lastCall: FieldValue.serverTimestamp() }, { merge: true }); transaction.create(newHeartbeatRef, { ...data, createTime: FieldValue.serverTimestamp(), diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 4844afab1b..21897e6c26 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,12 +1,12 @@ { "name": "frontend", - "version": "1.0.95", + "version": "1.0.129", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "frontend", - "version": "1.0.95", + "version": "1.0.129", "dependencies": { "@angular/animations": "^17.3.12", "@angular/cdk": "^17.3.10", diff --git a/frontend/package.json b/frontend/package.json index 9003e5757a..3949213c4b 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "frontend", - "version": "1.0.103", + "version": "1.0.180", "scripts": { "install:deps": "npm install", "start": "npm install && ng serve --configuration local --open", @@ -91,4 +91,4 @@ "webpack": "^5.88.2", "webpack-bundle-analyzer": "^4.9.0" } -} \ No newline at end of file +} diff --git a/frontend/src/app/app.component.html b/frontend/src/app/app.component.html index 3b43ad7624..6cbe7f5358 100644 --- a/frontend/src/app/app.component.html +++ b/frontend/src/app/app.component.html @@ -1,7 +1,11 @@ @if (showLandingPage$ | async) { - - - + @if (showLeaderboard) { + + + } @else { + + } } @else if (showLoadingWebNodePage$ | async) { } @else if (showLeaderboardPage$ | async) { @@ -28,7 +32,8 @@ class="overflow-hidden" [class.no-toolbar]="hideToolbar" [class.no-submenus]="subMenusLength < 2" - [class.mobile]="menu.isMobile"> + [class.mobile]="menu.isMobile" + [class.uptime]="showLeaderboard"> @if (!isDesktop) { diff --git a/frontend/src/app/app.component.scss b/frontend/src/app/app.component.scss index c661e58c8c..2044b515e5 100644 --- a/frontend/src/app/app.component.scss +++ b/frontend/src/app/app.component.scss @@ -76,8 +76,10 @@ mat-sidenav-content { margin-bottom: 4px; border-top-right-radius: 6px; - &.no-toolbar { - height: calc(100% - #{$subMenus} - #{$tabs}); + + &.uptime { + $toolbar: 130px; + height: calc(100% - #{$toolbar} - #{$subMenus} - #{$tabs}); } &.no-submenus { @@ -86,6 +88,14 @@ mat-sidenav-content { &.no-toolbar { height: 100%; } + + &.uptime { + height: calc(100% - #{$toolbar} - #{$subMenus}); + } + } + + &.no-toolbar { + height: calc(100% - #{$subMenus} - #{$tabs}); } } } diff --git a/frontend/src/app/app.component.ts b/frontend/src/app/app.component.ts index 0e5f1f9deb..8377201887 100644 --- a/frontend/src/app/app.component.ts +++ b/frontend/src/app/app.component.ts @@ -28,6 +28,7 @@ export class AppComponent extends StoreDispatcher implements OnInit { readonly showLeaderboardPage$: Observable = this.select$(getMergedRoute).pipe(filter(Boolean), map((route: MergedRoute) => route.url.startsWith(`/${Routes.LEADERBOARD}`))); subMenusLength: number = 0; hideToolbar: boolean = CONFIG.hideToolbar; + showLeaderboard: boolean = CONFIG.showLeaderboard; loaded: boolean; isDesktop: boolean = isDesktop(); @@ -54,27 +55,25 @@ export class AppComponent extends StoreDispatcher implements OnInit { localStorage.setItem('webnodeArgs', args); } } + this.select(getMergedRoute, () => { + this.loaded = true; + this.detect(); + }, filter(Boolean), take(1)); - this.select( - getMergedRoute, - () => this.initAppFunctionalities(), - filter(Boolean), - take(1), - filter((route: MergedRoute) => route.url !== '/' && !route.url.startsWith('/?') && !route.url.startsWith('/leaderboard')), - ); - this.select( - getMergedRoute, - () => { - this.loaded = true; - this.detect(); - }, - filter(Boolean), - take(1), - ); + if (CONFIG.showLeaderboard && CONFIG.showWebNodeLandingPage) { + /* frontend with some landing page */ + this.select(getMergedRoute, () => { + this.initAppFunctionalities(); + }, filter((route: MergedRoute) => route?.url.startsWith('/loading-web-node')), take(1)); + + } else if (!CONFIG.showLeaderboard && !CONFIG.showWebNodeLandingPage) { + /* normal frontend (no landing pages) */ + this.initAppFunctionalities(); + } } goToWebNode(): void { - this.router.navigate([Routes.LOADING_WEB_NODE], { queryParamsHandling: 'merge' }); + // this.router.navigate([Routes.LOADING_WEB_NODE], { queryParamsHandling: 'merge' }); this.initAppFunctionalities(); } diff --git a/frontend/src/app/app.module.ts b/frontend/src/app/app.module.ts index 734d520c3d..ba48aacec3 100644 --- a/frontend/src/app/app.module.ts +++ b/frontend/src/app/app.module.ts @@ -36,13 +36,17 @@ import { ReactiveFormsModule } from '@angular/forms'; import { WebNodeLandingPageComponent } from '@app/layout/web-node-landing-page/web-node-landing-page.component'; import * as Sentry from '@sentry/angular'; import { Router } from '@angular/router'; -import { initializeApp, provideFirebaseApp } from '@angular/fire/app'; +import { getApp, initializeApp, provideFirebaseApp } from '@angular/fire/app'; import { getAnalytics, provideAnalytics, ScreenTrackingService } from '@angular/fire/analytics'; import { getPerformance, providePerformance } from '@angular/fire/performance'; import { BlockProductionPillComponent } from '@app/layout/block-production-pill/block-production-pill.component'; import { MenuTabsComponent } from '@app/layout/menu-tabs/menu-tabs.component'; import { getFirestore, provideFirestore } from '@angular/fire/firestore'; import { LeaderboardModule } from '@leaderboard/leaderboard.module'; +import { UptimePillComponent } from '@app/layout/uptime-pill/uptime-pill.component'; +import { provideAppCheck } from '@angular/fire/app-check'; +import { initializeAppCheck, ReCaptchaV3Provider } from 'firebase/app-check'; +import { SETTINGS } from '@angular/fire/compat/firestore'; registerLocaleData(localeFr, 'fr'); registerLocaleData(localeEn, 'en'); @@ -127,6 +131,26 @@ export class AppGlobalErrorhandler implements ErrorHandler { } } +const firebaseProviders = [ + { + provide: SETTINGS, + useValue: { experimentalForceLongPolling: true }, + }, + provideFirebaseApp(() => initializeApp(CONFIG.globalConfig.firebase)), + provideClientHydration(), + provideHttpClient(withFetch()), + provideAnalytics(() => getAnalytics()), + ScreenTrackingService, + // provideAppCheck(() => { + // // TODO get a reCAPTCHA Enterprise here https://console.cloud.google.com/security/recaptcha?project=_ + // const app = getApp(); + // const provider = new ReCaptchaV3Provider('6LfAB-QqAAAAAEu9BO6upFj6Sewd08lf0UtFC16c'); + // return initializeAppCheck(app, { provider, isTokenAutoRefreshEnabled: true }); + // }), + providePerformance(() => getPerformance()), + provideFirestore(() => getFirestore()), +]; + @NgModule({ declarations: [ AppComponent, @@ -166,6 +190,7 @@ export class AppGlobalErrorhandler implements ErrorHandler { BlockProductionPillComponent, MenuTabsComponent, LeaderboardModule, + UptimePillComponent, ], providers: [ THEME_PROVIDER, @@ -175,26 +200,17 @@ export class AppGlobalErrorhandler implements ErrorHandler { { provide: Sentry.TraceService, deps: [Router] }, { provide: APP_INITIALIZER, - useFactory: () => () => {}, + useFactory: () => () => { + }, deps: [Sentry.TraceService], multi: true, }, - provideClientHydration(), - provideHttpClient(withFetch()), - provideFirebaseApp(() => initializeApp(CONFIG.globalConfig.firebase)), - provideAnalytics(() => getAnalytics()), - ScreenTrackingService, - // provideAppCheck(() => { - // // TODO get a reCAPTCHA Enterprise here https://console.cloud.google.com/security/recaptcha?project=_ - // const provider = new ReCaptchaEnterpriseProvider(/* reCAPTCHA Enterprise site key */); - // return initializeAppCheck(undefined, { provider, isTokenAutoRefreshEnabled: true }); - // }), - providePerformance(() => getPerformance()), - provideFirestore(() => getFirestore()), + ...[CONFIG.globalConfig.firebase ? firebaseProviders : []], ], bootstrap: [AppComponent], exports: [ MenuComponent, ], }) -export class AppModule {} +export class AppModule { +} diff --git a/frontend/src/app/app.routing.ts b/frontend/src/app/app.routing.ts index 1a5d8eca6d..0bd402243e 100644 --- a/frontend/src/app/app.routing.ts +++ b/frontend/src/app/app.routing.ts @@ -2,6 +2,9 @@ import { NgModule } from '@angular/core'; import { NoPreloading, RouterModule, Routes } from '@angular/router'; import { CONFIG, getFirstFeature } from '@shared/constants/config'; import { WebNodeLandingPageComponent } from '@app/layout/web-node-landing-page/web-node-landing-page.component'; +import { getMergedRoute, MergedRoute } from '@openmina/shared'; +import { filter, take } from 'rxjs'; +import { landingPageGuard } from '@shared/guards/landing-page.guard'; const APP_TITLE: string = 'Open Mina'; @@ -24,6 +27,7 @@ function generateRoutes(): Routes { path: 'dashboard', loadChildren: () => import('@dashboard/dashboard.module').then(m => m.DashboardModule), title: DASHBOARD_TITLE, + canActivate: [landingPageGuard], }, { path: 'nodes', @@ -45,6 +49,7 @@ function generateRoutes(): Routes { path: 'state', loadChildren: () => import('@state/state.module').then(m => m.StateModule), title: STATE_TITLE, + canActivate: [landingPageGuard], }, { path: 'snarks', @@ -55,16 +60,19 @@ function generateRoutes(): Routes { path: 'block-production', loadChildren: () => import('@block-production/block-production.module').then(m => m.BlockProductionModule), title: BLOCK_PRODUCTION_TITLE, + canActivate: [landingPageGuard], }, { path: 'mempool', loadChildren: () => import('@mempool/mempool.module').then(m => m.MempoolModule), title: MEMPOOL_TITLE, + canActivate: [landingPageGuard], }, { path: 'benchmarks', loadChildren: () => import('@benchmarks/benchmarks.module').then(m => m.BenchmarksModule), title: BENCHMARKS_TITLE, + canActivate: [landingPageGuard], }, { path: 'fuzzing', @@ -75,13 +83,15 @@ function generateRoutes(): Routes { path: 'loading-web-node', loadChildren: () => import('@web-node/web-node.module').then(m => m.WebNodeModule), title: WEBNODE_TITLE, + canActivate: [landingPageGuard], }, - { + ]; + if (CONFIG.showLeaderboard) { + routes.push({ path: '', loadChildren: () => import('@leaderboard/leaderboard.module').then(m => m.LeaderboardModule), - }, - ]; - if (CONFIG.showWebNodeLandingPage) { + }); + } else if (CONFIG.showWebNodeLandingPage) { routes.push({ path: '', component: WebNodeLandingPageComponent, diff --git a/frontend/src/app/app.service.ts b/frontend/src/app/app.service.ts index 007658b11e..671b2bce09 100644 --- a/frontend/src/app/app.service.ts +++ b/frontend/src/app/app.service.ts @@ -6,17 +6,24 @@ import { RustService } from '@core/services/rust.service'; import { AppNodeDetails, AppNodeStatus } from '@shared/types/app/app-node-details.type'; import { getNetwork } from '@shared/helpers/mina.helper'; import { getLocalStorage, nanOrElse, ONE_MILLION } from '@openmina/shared'; -import { BlockProductionWonSlotsStatus } from '@shared/types/block-production/won-slots/block-production-won-slots-slot.type'; +import { + BlockProductionWonSlotsStatus +} from '@shared/types/block-production/won-slots/block-production-won-slots-slot.type'; import { AppEnvBuild } from '@shared/types/app/app-env-build.type'; -import { FirestoreService } from '@core/services/firestore.service'; +import { SentryService } from '@core/services/sentry.service'; +import { WebNodeService } from '@core/services/web-node.service'; @Injectable({ providedIn: 'root', }) export class AppService { + private previousProducedBlock: BlockProductionAttempt; + constructor(private rust: RustService, - private firestoreService: FirestoreService) { } + private sentryService: SentryService, + private webnodeService: WebNodeService) { + } getActiveNode(nodes: MinaNode[]): Observable { const nodeName = new URL(location.href).searchParams.get('node'); @@ -39,6 +46,7 @@ export class AppService { getActiveNodeDetails(): Observable { return this.rust.get('/status') .pipe( + tap((data: NodeDetailsResponse) => this.notifyPrevBlockChanged(data)), map((data: NodeDetailsResponse): AppNodeDetails => ({ status: this.getStatus(data), blockHeight: data.transition_frontier.best_tip?.height, @@ -54,19 +62,30 @@ export class AppService { producingBlockGlobalSlot: data.current_block_production_attempt?.won_slot.global_slot, producingBlockStatus: data.current_block_production_attempt?.status, } as AppNodeDetails)), - tap((details: any) => { - // undefined not allowed. Firestore does not accept undefined values - // foreach undefined value, we set it to null - Object.keys(details).forEach((key: string) => { - if (details[key] === undefined) { - details[key] = null; - } - }); - // this.firestoreService.addHeartbeat(details); - }), ); } + private notifyPrevBlockChanged(data: NodeDetailsResponse): void { + if (!this.rust.activeNodeIsWebNode) { + return; + } + + const isInProduction = (status: BlockProductionWonSlotsStatus) => + ![ + BlockProductionWonSlotsStatus.Discarded, + BlockProductionWonSlotsStatus.Orphaned, + BlockProductionWonSlotsStatus.Canonical, + ].includes(status) + + if ( + this.previousProducedBlock && data.previous_block_production_attempt + && isInProduction(this.previousProducedBlock.status) !== isInProduction(data.previous_block_production_attempt.status) + ) { + this.sentryService.updateProducedBlock(data.previous_block_production_attempt, this.webnodeService.publicKey); + } + this.previousProducedBlock = data.previous_block_production_attempt; + } + private getStatus(data: NodeDetailsResponse): AppNodeStatus { switch (data.transition_frontier.sync.phase) { case 'Bootstrap': @@ -88,6 +107,7 @@ export interface NodeDetailsResponse { snark_pool: SnarkPool; chain_id: string | undefined; current_block_production_attempt: BlockProductionAttempt; + previous_block_production_attempt: BlockProductionAttempt; } export interface BlockProductionAttempt { diff --git a/frontend/src/app/core/helpers/file-progress.helper.ts b/frontend/src/app/core/helpers/file-progress.helper.ts index 890310def7..fed9a8d31f 100644 --- a/frontend/src/app/core/helpers/file-progress.helper.ts +++ b/frontend/src/app/core/helpers/file-progress.helper.ts @@ -1,7 +1,7 @@ import { BehaviorSubject } from 'rxjs'; import { safelyExecuteInBrowser } from '@openmina/shared'; -const WASM_FILE_SIZE = 31705944; +const WASM_FILE_SIZE = 31556926; class AssetMonitor { readonly downloads: Map = new Map(); diff --git a/frontend/src/app/core/services/firestore.service.ts b/frontend/src/app/core/services/firestore.service.ts index 1073919e4e..9f3f6e047f 100644 --- a/frontend/src/app/core/services/firestore.service.ts +++ b/frontend/src/app/core/services/firestore.service.ts @@ -1,42 +1,28 @@ import { Injectable } from '@angular/core'; -import { - Firestore, - CollectionReference, - collection, - addDoc, - doc, - setDoc, - updateDoc, - deleteDoc, - DocumentData, -} from '@angular/fire/firestore'; import { HttpClient } from '@angular/common/http'; -import { Observable } from 'rxjs'; +import { catchError, Observable, of, tap } from 'rxjs'; +import { SentryService } from '@core/services/sentry.service'; @Injectable({ providedIn: 'root', }) export class FirestoreService { - private heartbeatCollection: CollectionReference; private cloudFunctionUrl = 'https://us-central1-webnode-gtm-test.cloudfunctions.net/handleValidationAndStore'; - constructor(private firestore: Firestore, - private http: HttpClient) { - this.heartbeatCollection = collection(this.firestore, 'heartbeat'); - } + constructor(private sentryService: SentryService, + private http: HttpClient) { } addHeartbeat(data: any): Observable { console.log('Posting to cloud function:', data); - return this.http.post(this.cloudFunctionUrl, { data }); - } - - updateHeartbeat(id: string, data: any): Promise { - const docRef = doc(this.heartbeatCollection, id); - return updateDoc(docRef, data); - } - - deleteHeartbeat(id: string): Promise { - const docRef = doc(this.heartbeatCollection, id); - return deleteDoc(docRef); + return this.http.post(this.cloudFunctionUrl, { data }) + .pipe( + // tap(() => { + // this.sentryService.updateHeartbeat(data, data.submitter); + // }), + catchError(error => { + console.error('Error while posting heartbeat', error); + return of(null); + }), + ); } } diff --git a/frontend/src/app/core/services/sentry.service.ts b/frontend/src/app/core/services/sentry.service.ts index 6e4de46daf..2007879b57 100644 --- a/frontend/src/app/core/services/sentry.service.ts +++ b/frontend/src/app/core/services/sentry.service.ts @@ -1,23 +1,31 @@ -import { inject, Injectable } from '@angular/core'; -import { NodesOverviewLedger, NodesOverviewLedgerStepState } from '@shared/types/nodes/dashboard/nodes-overview-ledger.type'; +import { Injectable } from '@angular/core'; +import { + NodesOverviewLedger, + NodesOverviewLedgerStepState +} from '@shared/types/nodes/dashboard/nodes-overview-ledger.type'; import * as Sentry from '@sentry/angular'; -import { NodesOverviewBlock, NodesOverviewNodeBlockStatus } from '@shared/types/nodes/dashboard/nodes-overview-block.type'; +import { + NodesOverviewBlock, + NodesOverviewNodeBlockStatus +} from '@shared/types/nodes/dashboard/nodes-overview-block.type'; import { lastItem, ONE_BILLION } from '@openmina/shared'; -import { RustService } from '@core/services/rust.service'; import { getElapsedTime } from '@shared/helpers/date.helper'; +import { + BlockProductionWonSlotsSlot +} from '@shared/types/block-production/won-slots/block-production-won-slots-slot.type'; +import { BlockProductionAttempt } from '@app/app.service'; @Injectable({ providedIn: 'root', }) export class SentryService { - private readonly rustService: RustService = inject(RustService); private ledgerIsSynced: boolean = false; private blockIsSynced: boolean = false; private ledgerSyncedTime: number; private blockSyncedTime: number; - updateLedgerSyncStatus(ledger: NodesOverviewLedger): void { + updateLedgerSyncStatus(ledger: NodesOverviewLedger, publicKey: string): void { if (this.ledgerIsSynced) { return; } @@ -45,16 +53,19 @@ export class SentryService { const syncedIn = Math.round((ledger.rootStaged.staged.reconstructEnd - ledger.stakingEpoch.snarked.fetchHashesStart) / ONE_BILLION); this.ledgerSyncedTime = syncedIn; - Sentry.captureMessage(`Ledger synced in ${getElapsedTime(syncedIn)}s`, { + Sentry.captureMessage(`Ledger synced in ${getElapsedTime(syncedIn)}`, { level: 'info', - tags: { type: 'webnode', subType: 'sync.ledger' }, + tags: { + type: 'webnode', subType: 'sync.ledger', publicKey, duration: syncedIn + }, contexts: { ledger: syncDetails }, + fingerprint: this.fingerprint, }); } } - updateBlockSyncStatus(blocks: NodesOverviewBlock[], startTime: number): void { - if (this.blockIsSynced || !this.rustService.activeNodeIsWebNode) { + updateBlockSyncStatus(blocks: NodesOverviewBlock[], startTime: number, publicKey: string): void { + if (this.blockIsSynced) { return; } @@ -65,19 +76,79 @@ export class SentryService { const bestTipBlock = blocks[0].height; const root = lastItem(blocks).height; this.blockSyncedTime = Math.round((Date.now() - startTime) / 1000); - Sentry.captureMessage(`Last 290 blocks synced in ${getElapsedTime(this.blockSyncedTime)}s`, { + Sentry.captureMessage(`Last 290 blocks synced in ${getElapsedTime(this.blockSyncedTime)}`, { level: 'info', - tags: { type: 'webnode', subType: 'sync.block' }, + tags: { + type: 'webnode', subType: 'sync.block', publicKey, duration: this.blockSyncedTime + }, contexts: { blocks: { bestTipBlock, root } }, + fingerprint: this.fingerprint, }); const syncTotal = this.ledgerSyncedTime + this.blockSyncedTime; setTimeout(() => { Sentry.captureMessage(`Web Node Synced in ${getElapsedTime(syncTotal)}`, { level: 'info', - tags: { type: 'webnode', subType: 'sync.total' }, + tags: { + type: 'webnode', subType: 'sync.total', publicKey, duration: syncTotal + }, + fingerprint: this.fingerprint, }); }, 2000); } } + + updatePeersConnected(seconds: number, publicKey: string): void { + Sentry.captureMessage(`Web Node connected in ${seconds.toFixed(1)}s`, { + level: 'info', + tags: { type: 'webnode', subType: 'sync.peers', publicKey, duration: seconds }, + fingerprint: this.fingerprint, + }); + } + + updateProducedBlock(attempt: BlockProductionAttempt, publicKey: string): void { + const times = { + stagedLedgerDiffCreate: !attempt.times.staged_ledger_diff_create_end || !attempt.times.staged_ledger_diff_create_start + ? 0 : (attempt.times.staged_ledger_diff_create_end - attempt.times.staged_ledger_diff_create_start) / ONE_BILLION, + produced: !attempt.times.produced || !attempt.times.staged_ledger_diff_create_end + ? 0 : (attempt.times.produced - attempt.times.staged_ledger_diff_create_end) / ONE_BILLION, + proofCreate: !attempt.times.proof_create_end || !attempt.times.proof_create_start + ? 0 : (attempt.times.proof_create_end - attempt.times.proof_create_start) / ONE_BILLION, + }; + + Sentry.captureMessage(`Block Production Finished (${attempt.status}) - ` + attempt.block?.height, { + level: 'info', + tags: { type: 'webnode', subType: 'block.production', publicKey, duration: times.stagedLedgerDiffCreate + times.produced + times.proofCreate }, + fingerprint: this.fingerprint, + contexts: { block: this.flattenObject(attempt) }, + }); + } + + updateHeartbeat(data: any, publicKey: string): void { + Sentry.captureMessage('Heartbeat', { + level: 'info', + tags: { type: 'webnode', subType: 'heartbeat', publicKey }, + contexts: { heartbeat: { payload: data.payload, signatureField: data.signature.field, signatureScalar: data.signature.scalar } }, + fingerprint: this.fingerprint, + }); + } + + private flattenObject(obj: Record, prefix: string = ''): Record { + return Object.keys(obj).reduce((acc: Record, key: string) => { + const prefixedKey = prefix ? `${prefix}.${key}` : key; + + if (typeof obj[key] === 'object' && obj[key] !== null && !Array.isArray(obj[key])) { + const nestedObj = this.flattenObject(obj[key], prefixedKey); + Object.assign(acc, nestedObj); + } else { + acc[prefixedKey] = obj[key]; + } + + return acc; + }, {}); + } + + private get fingerprint(): string[] { + return [Math.random().toString(36).substring(2, 22)]; + } } diff --git a/frontend/src/app/core/services/web-node.service.ts b/frontend/src/app/core/services/web-node.service.ts index 549afe48cb..082608a63b 100644 --- a/frontend/src/app/core/services/web-node.service.ts +++ b/frontend/src/app/core/services/web-node.service.ts @@ -10,6 +10,7 @@ import { CONFIG } from '@shared/constants/config'; import firebase from 'firebase/compat'; import FirebaseStorageError = firebase.storage.FirebaseStorageError; import { FirestoreService } from '@core/services/firestore.service'; +import {SentryService} from "@core/services/sentry.service"; export interface PrivateStake { publicKey: string; @@ -37,7 +38,8 @@ export class WebNodeService { noBlockProduction: boolean = false; constructor(private http: HttpClient, - private firestore: FirestoreService) { + private firestore: FirestoreService, + private sentryService: SentryService) { FileProgressHelper.initDownloadProgress(); const basex = base('123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'); safelyExecuteInBrowser(() => { @@ -48,6 +50,10 @@ export class WebNodeService { }); } + get publicKey(): string { + return this.privateStake?.publicKey; + } + hasWebNodeConfig(): boolean { return CONFIG.configs.some(c => c.isWebNode); } @@ -124,7 +130,6 @@ export class WebNodeService { } })(); console.log('webnode config:', !!this.webNodeKeyPair.privateKey, this.webNodeNetwork, urls); - console.log(this.privateStake); let privateKey = this.privateStake ? [this.privateStake.stake, this.privateStake.password] : this.webNodeKeyPair.privateKey; if (this.noBlockProduction) { privateKey = null; @@ -142,9 +147,14 @@ export class WebNodeService { return throwError(() => new Error(error.message)); }), switchMap(() => this.webnode$.asObservable()), + filter(() => CONFIG.globalConfig.heartbeats), switchMap(() => timer(0, 60000)), switchMap(() => this.heartBeat$), switchMap(heartBeat => this.firestore.addHeartbeat(heartBeat)), + catchError(error => { + console.log('Error from heartbeat api:', error); + return of(null); + }), ); } return EMPTY; @@ -178,7 +188,7 @@ export class WebNodeService { // } if (!this.sentryEvents.firstPeerConnected && peers.some((p: any) => p.connection_status === DashboardPeerStatus.CONNECTED)) { const seconds = (Date.now() - this.webNodeStartTime) / 1000; - sendSentryEvent(`WebNode connected in ${seconds.toFixed(1)}s`, 'info'); + this.sentryService.updatePeersConnected(seconds, this.publicKey); this.sentryEvents.firstPeerConnected = true; this.webnodeProgress$.next('Connected'); } diff --git a/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.html b/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.html index 66b0aeb7e1..9485b4e681 100644 --- a/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.html +++ b/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.html @@ -27,11 +27,11 @@
-
Mina Explorer
+
Minascan
diff --git a/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.ts b/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.ts index 788621a3e6..bd8068cd6d 100644 --- a/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.ts +++ b/frontend/src/app/features/block-production/overview/slot-details/block-production-overview-slot-details.component.ts @@ -18,7 +18,7 @@ import { safelyExecuteInBrowser } from '@openmina/shared'; export class BlockProductionOverviewSlotDetailsComponent extends StoreDispatcher { @Input({ required: true }) activeSlot: BlockProductionOverviewSlot; - private minaExplorer: string; + private network: string; ngOnInit(): void { this.listenToActiveNode(); @@ -26,13 +26,12 @@ export class BlockProductionOverviewSlotDetailsComponent extends StoreDispatcher private listenToActiveNode(): void { this.select(AppSelectors.activeNodeDetails, (node: AppNodeDetails) => { - this.minaExplorer = node.network?.toLowerCase(); + this.network = node.network?.toLowerCase(); }, filter(Boolean)); } - viewInMinaExplorer(): void { - const network = this.minaExplorer !== 'mainnet' ? (this.minaExplorer + '.') : ''; - const url = `https://${network}minaexplorer.com/block/${this.activeSlot.hash}`; + viewInMinaScan(): void { + const url = `https://minascan.io/${this.network}/block/${this.activeSlot.hash}`; safelyExecuteInBrowser(() => window.open(url, '_blank')); } } diff --git a/frontend/src/app/features/block-production/won-slots/block-production-won-slots.effects.ts b/frontend/src/app/features/block-production/won-slots/block-production-won-slots.effects.ts index 68f2412866..71b703704f 100644 --- a/frontend/src/app/features/block-production/won-slots/block-production-won-slots.effects.ts +++ b/frontend/src/app/features/block-production/won-slots/block-production-won-slots.effects.ts @@ -39,7 +39,7 @@ export class BlockProductionWonSlotsEffects extends BaseEffect { ofType(BlockProductionWonSlotsActions.getSlots, BlockProductionWonSlotsActions.close), this.latestActionState(), switchMap(({ action, state }) => - action.type === BlockProductionWonSlotsActions.close.type + action.type.includes('Close') ? EMPTY : this.wonSlotsService.getSlots().pipe( switchMap(({ slots, epoch }) => { diff --git a/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.html b/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.html index bb13a67b4d..70316db46c 100644 --- a/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.html +++ b/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.html @@ -26,10 +26,13 @@ class="mr-10">
Public Key
-
{{ card5.publicKey | truncateMid: 6: 6 }}
-
- content_copy - Copy +
+ {{ card5.publicKey | truncateMid: 4: 4 }} + content_copy +
+
+ open_in_new + Open in Minascan
diff --git a/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.ts b/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.ts index 24b339574b..d83d44af37 100644 --- a/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.ts +++ b/frontend/src/app/features/block-production/won-slots/cards/block-production-won-slots-cards.component.ts @@ -1,17 +1,14 @@ import { ChangeDetectionStrategy, Component, OnInit } from '@angular/core'; import { StoreDispatcher } from '@shared/base-classes/store-dispatcher.class'; import { BlockProductionWonSlotsSelectors } from '@block-production/won-slots/block-production-won-slots.state'; -import { lastItem, ONE_BILLION, ONE_THOUSAND } from '@openmina/shared'; +import { lastItem, ONE_BILLION, ONE_THOUSAND, safelyExecuteInBrowser } from '@openmina/shared'; import { getTimeDiff } from '@shared/helpers/date.helper'; import { filter } from 'rxjs'; -import { - BlockProductionWonSlotsSlot, - BlockProductionWonSlotsStatus, -} from '@shared/types/block-production/won-slots/block-production-won-slots-slot.type'; -import { - BlockProductionWonSlotsEpoch, -} from '@shared/types/block-production/won-slots/block-production-won-slots-epoch.type'; +import { BlockProductionWonSlotsSlot, BlockProductionWonSlotsStatus, } from '@shared/types/block-production/won-slots/block-production-won-slots-slot.type'; +import { BlockProductionWonSlotsEpoch, } from '@shared/types/block-production/won-slots/block-production-won-slots-epoch.type'; import { BlockProductionWonSlotsActions } from '@block-production/won-slots/block-production-won-slots.actions'; +import { AppSelectors } from '@app/app.state'; +import { AppNodeDetails } from '@shared/types/app/app-node-details.type'; @Component({ selector: 'mina-block-production-won-slots-cards', @@ -28,9 +25,18 @@ export class BlockProductionWonSlotsCardsComponent extends StoreDispatcher imple card4: { epochProgress: string; endIn: string; } = { epochProgress: '-', endIn: null }; card5: { publicKey: string; totalRewards: string } = { publicKey: null, totalRewards: null }; + private network: string; + ngOnInit(): void { this.listenToSlots(); this.listenToEpoch(); + this.listenToActiveNode(); + } + + private listenToActiveNode(): void { + this.select(AppSelectors.activeNodeDetails, (node: AppNodeDetails) => { + this.network = node.network?.toLowerCase(); + }, filter(Boolean)); } private listenToEpoch(): void { @@ -80,4 +86,9 @@ export class BlockProductionWonSlotsCardsComponent extends StoreDispatcher imple toggleSidePanel(): void { this.dispatch2(BlockProductionWonSlotsActions.toggleSidePanel()); } + + openInMinascan(): void { + const url = `https://minascan.io/${this.network}/account/${this.card5.publicKey}`; + safelyExecuteInBrowser(() => window.open(url, '_blank')); + } } diff --git a/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.html b/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.html index dc6ca90815..505fdcd56b 100644 --- a/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.html +++ b/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.html @@ -115,12 +115,12 @@
- Mina Explorer + Minascan
diff --git a/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.ts b/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.ts index 486440ef00..f38f601d7a 100644 --- a/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.ts +++ b/frontend/src/app/features/block-production/won-slots/side-panel/block-production-won-slots-side-panel.component.ts @@ -47,7 +47,7 @@ export class BlockProductionWonSlotsSidePanelComponent extends StoreDispatcher i private timer: any; private stopTimer: boolean; private stateWhenReachedZero: { globalSlot: number; status: BlockProductionWonSlotsStatus }; - private minaExplorer: string; + private network: string; @ViewChild('beforeLedger', { read: ViewContainerRef }) private beforeLedger: ViewContainerRef; @ViewChild('ledger', { read: ViewContainerRef }) private ledger: ViewContainerRef; @@ -67,7 +67,7 @@ export class BlockProductionWonSlotsSidePanelComponent extends StoreDispatcher i private listenToActiveNode(): void { this.select(AppSelectors.activeNodeDetails, (node: AppNodeDetails) => { - this.minaExplorer = node.network?.toLowerCase(); + this.network = node.network?.toLowerCase(); }, filter(Boolean)); } @@ -105,9 +105,8 @@ export class BlockProductionWonSlotsSidePanelComponent extends StoreDispatcher i }, filter(Boolean)); } - viewInMinaExplorer(): void { - const network = this.minaExplorer !== 'mainnet' ? (this.minaExplorer + '.') : ''; - const url = `https://${network}minaexplorer.com/block/${this.slot.hash}`; + viewInMinascan(): void { + const url = `https://minascan.io/${this.network}/block/${this.slot.hash}`; safelyExecuteInBrowser(() => window.open(url, '_blank')); } @@ -136,7 +135,6 @@ export class BlockProductionWonSlotsSidePanelComponent extends StoreDispatcher i this.stopTimer = true; this.stateWhenReachedZero = { globalSlot: this.slot.globalSlot, status: this.slot.status }; this.remainingTime = '-'; - this.queryServerOftenToGetTheNewSlotState(); } this.detect(); } else { @@ -176,16 +174,6 @@ export class BlockProductionWonSlotsSidePanelComponent extends StoreDispatcher i } } - private queryServerOftenToGetTheNewSlotState(): void { - const timer = setInterval(() => { - if (!this.stateWhenReachedZero) { - clearInterval(timer); - return; - } - this.dispatch2(BlockProductionWonSlotsActions.getSlots()); - }, 1000); - } - closeSidePanel(): void { this.router.navigate([Routes.BLOCK_PRODUCTION, Routes.WON_SLOTS]); this.dispatch2(BlockProductionWonSlotsActions.toggleSidePanel()); diff --git a/frontend/src/app/features/block-production/won-slots/table/block-production-won-slots-table.component.ts b/frontend/src/app/features/block-production/won-slots/table/block-production-won-slots-table.component.ts index 4f23b56c9c..4147130b31 100644 --- a/frontend/src/app/features/block-production/won-slots/table/block-production-won-slots-table.component.ts +++ b/frontend/src/app/features/block-production/won-slots/table/block-production-won-slots-table.component.ts @@ -10,6 +10,8 @@ import { } from '@shared/types/block-production/won-slots/block-production-won-slots-slot.type'; import { BlockProductionWonSlotsSelectors } from '@block-production/won-slots/block-production-won-slots.state'; import { BlockProductionWonSlotsActions } from '@block-production/won-slots/block-production-won-slots.actions'; +import { SentryService } from '@core/services/sentry.service'; +import { WebNodeService } from '@core/services/web-node.service'; @Component({ selector: 'mina-block-production-won-slots-table', @@ -47,13 +49,34 @@ export class BlockProductionWonSlotsTableComponent extends MinaTableRustWrapper< private fromRoute: string; - constructor(private router: Router) { super(); } + constructor(private router: Router, + private sentryService: SentryService, + private webnodeService: WebNodeService) { + super(); + } + + currentlyProducing: BlockProductionWonSlotsSlot; override async ngOnInit(): Promise { await super.ngOnInit(); this.listenToRouteChange(); this.listenToActiveSlotChange(); this.listenToNodesChanges(); + + + // this.select(BlockProductionWonSlotsSelectors.filteredSlots, (slots: BlockProductionWonSlotsSlot[]) => { + // const blockProductionWonSlotsSlot = slots.find(d => d.message.includes('Confirm') || d.message.includes('Producing')); + // + // if (blockProductionWonSlotsSlot?.globalSlot !== this.currentlyProducing?.globalSlot) { + // if (!blockProductionWonSlotsSlot?.globalSlot) { + // const block = slots.find(d => d.globalSlot === this.currentlyProducing?.globalSlot); + // this.sentryService.updateProducedBlock(block, this.webnodeService.publicKey); + // } + // this.currentlyProducing = blockProductionWonSlotsSlot; + // } + // this.detect(); + // }, filter((slots: BlockProductionWonSlotsSlot[]) => slots.length > 0)); + } protected override setupTable(): void { diff --git a/frontend/src/app/features/dashboard/dashboard-blocks-sync/dashboard-blocks-sync.component.html b/frontend/src/app/features/dashboard/dashboard-blocks-sync/dashboard-blocks-sync.component.html index daae651356..b68bab1a95 100644 --- a/frontend/src/app/features/dashboard/dashboard-blocks-sync/dashboard-blocks-sync.component.html +++ b/frontend/src/app/features/dashboard/dashboard-blocks-sync/dashboard-blocks-sync.component.html @@ -21,12 +21,12 @@ labelColor="var(--base-tertiary)" [color]="fetchedPercentage === '100%' ? 'var(--success-primary)' : 'var(--base-primary)'" [value]="fetchedPercentage" - [hint]="(fetched || 0) + '/290 blocks'"> + [hint]="(fetched || 0) + '/' + (lengthWithoutRoot || 0) + ' blocks'"> + [hint]="(applied || 0) + '/' + (lengthWithoutRoot || 0) + ' blocks'"> @if (isDesktop) { ![NodesOverviewNodeBlockStatus.MISSING, NodesOverviewNodeBlockStatus.FETCHING].includes(b.status)).length; this.applied = blocks.filter(b => b.status === NodesOverviewNodeBlockStatus.APPLIED).length; diff --git a/frontend/src/app/features/dashboard/dashboard-ledger/dashboard-ledger.component.html b/frontend/src/app/features/dashboard/dashboard-ledger/dashboard-ledger.component.html index 4bce1f531d..7e4e9379c1 100644 --- a/frontend/src/app/features/dashboard/dashboard-ledger/dashboard-ledger.component.html +++ b/frontend/src/app/features/dashboard/dashboard-ledger/dashboard-ledger.component.html @@ -76,7 +76,12 @@
-
Fetch parts
+
+ Fetch parts + @if (fetchedParts.total) { + {{ fetchedParts.current }}/{{ fetchedParts.total }} + } +
{{ ledgers.rootStaged.staged.fetchPartsDuration ?? ledgers.rootStaged.staged.fetchPassedTime | secDuration: configMap.rootStaged }}
; private overlayRef: OverlayRef; constructor(private overlay: Overlay, private viewContainerRef: ViewContainerRef, - private sentryService: SentryService) { - super(); - } + private sentryService: SentryService, + private webNodeService: WebNodeService) { super() } ngOnInit(): void { this.listenToActiveNode(); @@ -104,17 +122,6 @@ export class DashboardLedgerComponent extends StoreDispatcher implements OnInit, }); } - remainingStakingLedger: number; - private previousStakingLedgerDownloaded: number; - remainingNextLedger: number; - private previousNextLedgerDownloaded: number; - remainingRootSnarkedLedger: number; - private previousRootSnarkedLedgerDownloaded: number; - remainingRootStagedLedgerFetchParts: number; - private previousRootStagedLedgerDownloaded: number; - remainingReconstruct: number = 20; - private reconstructTimer: any; - private listenToNodesChanges(): void { this.select(selectDashboardNodesAndRpcStats, ([nodes, rpcStats]: [NodesOverviewNode[], DashboardRpcStats]) => { if (nodes.length === 0) { @@ -136,6 +143,7 @@ export class DashboardLedgerComponent extends StoreDispatcher implements OnInit, this.rootSnarkedProgress = 0; this.rootStagedProgress = 0; this.totalProgress = 0; + this.fetchedParts = { current: 0, total: 0 }; } else { this.ledgers = nodes[0].ledgers; @@ -208,6 +216,10 @@ export class DashboardLedgerComponent extends StoreDispatcher implements OnInit, this.nextProgress = rpcStats.nextLedger?.fetched / rpcStats.nextLedger?.estimation * 100 || 0; this.rootSnarkedProgress = rpcStats.snarkedRootLedger?.fetched / rpcStats.snarkedRootLedger?.estimation * 100 || 0; + if (rpcStats.stagedRootLedger?.estimation) { + this.fetchedParts = { current: rpcStats.stagedRootLedger?.fetched, total: rpcStats.stagedRootLedger?.estimation }; + } + this.rootStagedProgress = 0; if (this.ledgers.rootStaged.staged.fetchPartsEnd) { this.rootStagedProgress += 50; @@ -232,10 +244,11 @@ export class DashboardLedgerComponent extends StoreDispatcher implements OnInit, } if (this.ledgers.rootStaged.state === NodesOverviewLedgerStepState.SUCCESS) { this.rootStagedProgress = 100; + } this.totalProgress = (this.stakingProgress + this.nextProgress + this.rootSnarkedProgress + this.rootStagedProgress) / 4; - this.sentryService.updateLedgerSyncStatus(this.ledgers); + this.sentryService.updateLedgerSyncStatus(this.ledgers, this.webNodeService.publicKey); } this.detect(); }); diff --git a/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.html b/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.html index e5e3da605c..c38242ee36 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.html @@ -1,3 +1,3 @@ - - Round 1 Applications Close in 5d 5h 12m - Apply arrow_right_alt + + Applications Close Soon arrow_right_alt diff --git a/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.scss b/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.scss index 447df2fdcb..97e0746e11 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-apply/leaderboard-apply.component.scss @@ -1,5 +1,14 @@ @import 'leaderboard-variables'; +@keyframes bounceLeftToRight { + 0%, 100% { + transform: translateX(0); + } + 50% { + transform: translateX(10px); + } +} + .gradient { height: 52px; background: $mina-brand-gradient; @@ -13,4 +22,8 @@ @media (max-width: 767px) { font-size: 3.1vw; } + + &:hover .mina-icon { + animation: bounceLeftToRight .85s infinite ease-out; + } } diff --git a/frontend/src/app/features/leaderboard/leaderboard-details/leaderboard-details.component.scss b/frontend/src/app/features/leaderboard/leaderboard-details/leaderboard-details.component.scss index b5c5a9483d..ca5b5b1ddb 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-details/leaderboard-details.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-details/leaderboard-details.component.scss @@ -36,6 +36,6 @@ h1 { margin-bottom: 80px; color: $mina-base-primary; font-size: 80px; - font-weight: 400; + font-weight: 300; line-height: 80px; } diff --git a/frontend/src/app/features/leaderboard/leaderboard-filters/leaderboard-filters.component.scss b/frontend/src/app/features/leaderboard/leaderboard-filters/leaderboard-filters.component.scss index 2ac9d53612..81fe7b5c56 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-filters/leaderboard-filters.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-filters/leaderboard-filters.component.scss @@ -18,12 +18,12 @@ padding: 0 24px; border-radius: 44px; font-size: 16px; - transition: all 0.15s ease; background-color: $mina-base-divider; color: $black; cursor: pointer; position: relative; overflow: hidden; + transition: all 0.15s ease; &:hover { box-shadow: 0 2px 4px 0 $black3; @@ -106,7 +106,6 @@ .search input, .search input::placeholder { font-size: 3.3vw; - } } } diff --git a/frontend/src/app/features/leaderboard/leaderboard-footer/leaderboard-footer.component.html b/frontend/src/app/features/leaderboard/leaderboard-footer/leaderboard-footer.component.html index cffccd6d15..8459c0a98f 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-footer/leaderboard-footer.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-footer/leaderboard-footer.component.html @@ -1,8 +1,8 @@
© 2025 Mina Foundation. All rights reserved.
diff --git a/frontend/src/app/features/leaderboard/leaderboard-header/leaderboard-header.component.html b/frontend/src/app/features/leaderboard/leaderboard-header/leaderboard-header.component.html index 2e93e9e1d0..e9c9c7bcb1 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-header/leaderboard-header.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-header/leaderboard-header.component.html @@ -6,8 +6,9 @@ [@dropdownAnimation]="isMenuOpen ? 'open' : 'closed'" [class.open]="isMenuOpen" (clickOutside)="closeMenu()"> - Mina Web Node Leaderboard - Round 1 Details + Program Details + Prize Draw & Tie-Break + Process
diff --git a/frontend/src/app/features/leaderboard/leaderboard-impressum/leaderboard-impressum.component.scss b/frontend/src/app/features/leaderboard/leaderboard-impressum/leaderboard-impressum.component.scss index b5c5a9483d..ca5b5b1ddb 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-impressum/leaderboard-impressum.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-impressum/leaderboard-impressum.component.scss @@ -36,6 +36,6 @@ h1 { margin-bottom: 80px; color: $mina-base-primary; font-size: 80px; - font-weight: 400; + font-weight: 300; line-height: 80px; } diff --git a/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.html b/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.html index c5a364c04b..043b0cf0ea 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.html @@ -1,5 +1,9 @@ - -
+
+ +
+ +
+
@@ -10,10 +14,10 @@
-

We(b) Node
Do You?

+

We Node
Do You?

- Apply to be a node runner + Apply Now

Round 1 is limited to 100 seats

@@ -44,7 +48,7 @@

Node-To-Earn

[style.background-image]="'url(assets/images/landing-page/cta-section-bg.png)'">

Run a web node on Testnet and enter a 1000 MINA lottery

- Start Testing & Earn $500 USD + Apply Now

Apply by DATE. Not Selected? You're first in line next time.

@@ -56,13 +60,14 @@

Run a web node on Testnet and enter a 1000 MINA lottery

-

The Mina Web Node, part 1

-

Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore . -

-

Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore .

+

Join the Mina Web Node Testing Program

+

Discover all the details about Mina's Web Node Testing Program. Learn how to apply, participate, and earn rewards in Mina Web Node Testing Round + 1.

diff --git a/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.scss b/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.scss index 0ee7917bcf..63939df9ae 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.scss @@ -4,12 +4,26 @@ :host { - padding-top: 52px; background-color: $mina-cta-primary; color: $mina-base-primary; font-family: "IBM Plex Sans", sans-serif; } +.floating-banner { + position: fixed; + bottom: -100%; + left: 20px; + right: 20px; + transition: bottom 0.5s ease; + z-index: 1000; + border-radius: 12px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + + &.show { + bottom: 20px; + } +} + main, mina-leaderboard-header, mina-leaderboard-footer { @@ -34,6 +48,7 @@ mina-leaderboard-footer { line-height: 20px; font-weight: 300; transition: .15s ease; + min-width: 240px; &:hover { background-color: $black; @@ -49,6 +64,7 @@ mina-leaderboard-footer { } .overflow-y-scroll { + padding-bottom: 72px; background-color: $mina-cta-primary; &::-webkit-scrollbar-track { diff --git a/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.ts b/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.ts index afce1668e7..233518cf56 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.ts +++ b/frontend/src/app/features/leaderboard/leaderboard-landing-page/leaderboard-landing-page.component.ts @@ -1,4 +1,7 @@ -import { ChangeDetectionStrategy, Component, OnInit } from '@angular/core'; +import { AfterViewInit, ChangeDetectionStrategy, Component, DestroyRef, ElementRef, ViewChild } from '@angular/core'; +import { ManualDetection } from '@openmina/shared'; +import { debounceTime, fromEvent } from 'rxjs'; +import { takeUntilDestroyed } from '@angular/core/rxjs-interop'; @Component({ selector: 'mina-leaderboard-landing-page', @@ -7,9 +10,34 @@ import { ChangeDetectionStrategy, Component, OnInit } from '@angular/core'; changeDetection: ChangeDetectionStrategy.OnPush, host: { class: 'flex-column h-100 align-center' }, }) -export class LeaderboardLandingPageComponent implements OnInit { +export class LeaderboardLandingPageComponent extends ManualDetection implements AfterViewInit { + showBanner: boolean = false; - ngOnInit(): void { + private readonly SCROLL_THRESHOLD = 100; + @ViewChild('scrollContainer') private scrollContainer!: ElementRef; + + constructor(private destroyRef: DestroyRef) { + super(); } + ngAfterViewInit(): void { + const container = this.scrollContainer.nativeElement; + + fromEvent(container, 'scroll') + .pipe( + debounceTime(100), + takeUntilDestroyed(this.destroyRef), + ) + .subscribe(() => { + const scrollPosition = container.scrollTop; + + if (scrollPosition > this.SCROLL_THRESHOLD && !this.showBanner) { + this.showBanner = true; + this.detect(); + } else if (scrollPosition <= this.SCROLL_THRESHOLD && this.showBanner) { + this.showBanner = false; + this.detect(); + } + }); + } } diff --git a/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.html b/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.html index dcdc8794c2..cdb1d68b39 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.html @@ -1,11 +1,73 @@ - + + + -
+
+
+ + + @if (canDownloadCSV) { + + }
+ + +
+
+
+ info + Live results are not final because blockchain finality takes time +
+ chevron_right +
+
+
    +
  • New blocks can reorganize the chain, changing past data
  • +
  • Network nodes need time to reach consensus
  • +
  • Block confirmations become more certain over time
  • +
  • Final results will be published after the program ends and complete chain verification
  • +
+

To learn more about how uptime is tracked, please refer to the How Uptime Tracking Works section in the + program details.

+
+
+
+ + +
+
+ +
+
+
+ + +
+

Download Public Keys that qualify for the following Prizes:

+
+ + + +
+
+
diff --git a/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.scss b/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.scss index 557f63b4eb..04dc73f796 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.scss @@ -1,10 +1,40 @@ @import 'leaderboard-variables'; :host { - padding-top: 52px; background-color: $mina-cta-primary; } +.floating-banner { + position: fixed; + bottom: -100%; + left: 20px; + right: 20px; + transition: bottom 0.5s ease; + z-index: 1000; + border-radius: 12px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + + &.show { + bottom: 20px; + } +} + +.overflow-y-scroll { + background-color: $mina-cta-primary; + + &::-webkit-scrollbar-track { + background-color: transparent; + } + + &::-webkit-scrollbar-thumb { + background-color: $white4; + } + + &::-webkit-scrollbar-thumb:hover { + background-color: $mina-base-secondary; + } +} + main, mina-leaderboard-header, mina-leaderboard-footer { @@ -41,3 +71,53 @@ main { background-color: $mina-base-secondary; } } + +.accordion { + color: $mina-base-primary; + font-size: 16px; + font-weight: 600; + background: rgba(248, 214, 17, 0.10); + cursor: pointer; + padding: 16px; +} + +.accordion-content { + font-weight: 400; + line-height: 24px; +} + +p { + font-size: 16px; + line-height: 24px; + color: $mina-base-primary; +} + +.download-btns { + color: $mina-base-primary; + margin-bottom: 80px; + + button { + border-radius: 999px; + font-size: 20px; + gap: 8px; + padding: 0 16px; + font-weight: 300; + transition: all 0.15s ease; + + &:hover { + box-shadow: 0 2px 4px 0 $black3; + } + + &:nth-child(1) { + background-color: $mina-brand-aqua; + } + + &:nth-child(2) { + background-color: $mina-brand-lilac; + } + + &:nth-child(3) { + background-color: $mina-brand-peony; + } + } +} diff --git a/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.ts b/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.ts index 15e87823ce..550c29022f 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.ts +++ b/frontend/src/app/features/leaderboard/leaderboard-page/leaderboard-page.component.ts @@ -1,8 +1,12 @@ -import { ChangeDetectionStrategy, Component, OnInit } from '@angular/core'; +import { AfterViewInit, ChangeDetectionStrategy, Component, DestroyRef, ElementRef, OnInit, ViewChild } from '@angular/core'; import { StoreDispatcher } from '@shared/base-classes/store-dispatcher.class'; import { LeaderboardActions } from '@leaderboard/leaderboard.actions'; -import { timer } from 'rxjs'; +import { debounceTime, fromEvent, timer } from 'rxjs'; import { untilDestroyed } from '@ngneat/until-destroy'; +import { trigger, state, style, animate, transition } from '@angular/animations'; +import { ManualDetection } from '@openmina/shared'; +import { takeUntilDestroyed } from '@angular/core/rxjs-interop'; +import { LeaderboardService } from '@leaderboard/leaderboard.service'; @Component({ selector: 'mina-leaderboard-page', @@ -10,8 +14,42 @@ import { untilDestroyed } from '@ngneat/until-destroy'; styleUrl: './leaderboard-page.component.scss', changeDetection: ChangeDetectionStrategy.OnPush, host: { class: 'flex-column h-100' }, + animations: [ + trigger('expandCollapse', [ + state('false', style({ + height: '0', + overflow: 'hidden', + opacity: '0', + })), + state('true', style({ + height: '*', + opacity: '1', + })), + transition('false <=> true', [ + animate('200ms ease-in-out'), + ]), + ]), + trigger('rotateIcon', [ + state('false', style({ transform: 'rotate(0)' })), + state('true', style({ transform: 'rotate(90deg)' })), + transition('false <=> true', [ + animate('200ms'), + ]), + ]), + ], }) -export class LeaderboardPageComponent extends StoreDispatcher implements OnInit { +export class LeaderboardPageComponent extends StoreDispatcher implements OnInit, AfterViewInit { + isExpanded = false; + showBanner: boolean = false; + canDownloadCSV = localStorage.getItem('download_leaderboard') === 'true'; + + private readonly SCROLL_THRESHOLD = 100; + @ViewChild('scrollContainer') private scrollContainer!: ElementRef; + + constructor(private destroyRef: DestroyRef, + private leaderboardService: LeaderboardService) { + super(); + } ngOnInit(): void { timer(0, 5000) @@ -21,4 +59,40 @@ export class LeaderboardPageComponent extends StoreDispatcher implements OnInit }); } + ngAfterViewInit(): void { + const container = this.scrollContainer.nativeElement; + + fromEvent(container, 'scroll') + .pipe( + debounceTime(100), + takeUntilDestroyed(this.destroyRef), + ) + .subscribe(() => { + const scrollPosition = container.scrollTop; + + if (scrollPosition > this.SCROLL_THRESHOLD && !this.showBanner) { + this.showBanner = true; + this.detect(); + } else if (scrollPosition <= this.SCROLL_THRESHOLD && this.showBanner) { + this.showBanner = false; + this.detect(); + } + }); + } + + downloadUptimeLottery(): void { + this.leaderboardService.downloadUptimeLottery(); + } + + downloadHighestUptime(): void { + this.leaderboardService.downloadHighestUptime(); + } + + downloadMostProducedBlocks(): void { + this.leaderboardService.downloadMostProducedBlocks(); + } + + downloadAll(): void { + this.leaderboardService.downloadAll(); + } } diff --git a/frontend/src/app/features/leaderboard/leaderboard-privacy-policy/leaderboard-privacy-policy.component.scss b/frontend/src/app/features/leaderboard/leaderboard-privacy-policy/leaderboard-privacy-policy.component.scss index b5c5a9483d..ca5b5b1ddb 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-privacy-policy/leaderboard-privacy-policy.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-privacy-policy/leaderboard-privacy-policy.component.scss @@ -36,6 +36,6 @@ h1 { margin-bottom: 80px; color: $mina-base-primary; font-size: 80px; - font-weight: 400; + font-weight: 300; line-height: 80px; } diff --git a/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.html b/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.html index 6c9f2f3686..30826d9f06 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.html @@ -8,25 +8,29 @@
@if (!isLoading) { @for (row of rows; track $index) { -
+
- circle {{ row.publicKey | truncateMid: (desktop ? 15 : 6): 6 }} - - {{ row.uptimePercentage }}% - @if (row.uptimePercentage > 33.33) { - bookmark_check + + {{ row.uptimePercentage }}% + @if (!row.isWhale && row.uptimePercentage > 33.33) { + circle } @if (row.uptimePrize) { - + circle } {{ row.blocksProduced ?? 0 }} @if (row.blocksPrize) { - + circle }
@@ -40,11 +44,3 @@
}
- - - - - - diff --git a/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.scss b/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.scss index a4868d0b6c..3a318ddea7 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.scss @@ -11,6 +11,10 @@ } .row-wrap { + &.whale { + opacity: 0.3; + } + &.head { max-width: unset; height: 56px; @@ -36,13 +40,20 @@ font-size: 16px; @media (max-width: 480px) { - grid-template-columns: 48% 24% 1fr; + grid-template-columns: 43% 28% 1fr; + } + + @media (max-width: 676px) { + grid-template-columns: 43% 28% 1fr; } span { color: $black; &:not(.mina-icon) { + @media (max-width: 676px) { + font-size: 2.5vw; + } @media (max-width: 480px) { font-size: 3vw; } @@ -50,19 +61,32 @@ } .circle { - color: $black4; + &.blue { + color: $mina-brand-cyan; + } + + &.purple { + color: $mina-access-primary; + } + + &.dark-red { + color: $mina-brand-orchid; + } + } + + .cursor-default { + cursor: default; } .perc { - width: 37px; + width: 58px; + @media (max-width: 676px) { + width: 55px; + } @media (max-width: 480px) { - width: 26px; + width: 48px; } } - - .circle.active { - color: $mina-brand-cyan; - } } .fx-row-vert-cent { diff --git a/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.ts b/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.ts index a36b6fcc7a..898136a931 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.ts +++ b/frontend/src/app/features/leaderboard/leaderboard-table/leaderboard-table.component.ts @@ -2,7 +2,7 @@ import { ChangeDetectionStrategy, Component, OnInit } from '@angular/core'; import { LeaderboardSelectors } from '@leaderboard/leaderboard.state'; import { HeartbeatSummary } from '@shared/types/leaderboard/heartbeat-summary.type'; import { StoreDispatcher } from '@shared/base-classes/store-dispatcher.class'; -import { isDesktop } from '@openmina/shared'; +import { isDesktop, TooltipPosition } from '@openmina/shared'; import { animate, style, transition, trigger } from '@angular/animations'; @Component({ @@ -47,4 +47,6 @@ export class LeaderboardTableComponent extends StoreDispatcher implements OnInit this.detect(); }); } + + protected readonly TooltipPosition = TooltipPosition; } diff --git a/frontend/src/app/features/leaderboard/leaderboard-terms-and-conditions/leaderboard-terms-and-conditions.component.scss b/frontend/src/app/features/leaderboard/leaderboard-terms-and-conditions/leaderboard-terms-and-conditions.component.scss index b5c5a9483d..ca5b5b1ddb 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-terms-and-conditions/leaderboard-terms-and-conditions.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-terms-and-conditions/leaderboard-terms-and-conditions.component.scss @@ -36,6 +36,6 @@ h1 { margin-bottom: 80px; color: $mina-base-primary; font-size: 80px; - font-weight: 400; + font-weight: 300; line-height: 80px; } diff --git a/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.html b/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.html index 243f8138fe..7ce680fc1d 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.html +++ b/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.html @@ -1 +1,2 @@ +

Mina Web Node Testing Program

Leaderboard

diff --git a/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.scss b/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.scss index e4872bf88f..c65ca7f048 100644 --- a/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.scss +++ b/frontend/src/app/features/leaderboard/leaderboard-title/leaderboard-title.component.scss @@ -11,9 +11,19 @@ h1 { font-weight: 300; font-size: 80px; color: $black6; - margin: 80px 0; + margin-bottom: 56px; + margin-top: 0; @media (max-width: 1023px) { font-size: 10vw; } } + +h2 { + margin-top: 72px; + margin-bottom: 16px; + color: $mina-base-secondary; + font-size: 20px; + font-weight: 500; + line-height: 28px; +} diff --git a/frontend/src/app/features/leaderboard/leaderboard.reducer.ts b/frontend/src/app/features/leaderboard/leaderboard.reducer.ts index 770d162cb4..bc90f3941e 100644 --- a/frontend/src/app/features/leaderboard/leaderboard.reducer.ts +++ b/frontend/src/app/features/leaderboard/leaderboard.reducer.ts @@ -40,7 +40,10 @@ export const leaderboardReducer = createReducer( function sortHeartbeats(node: HeartbeatSummary[], tableSort: TableSort): HeartbeatSummary[] { - return sort(node, tableSort, []); + const data = sort(node, tableSort, []); + const whales = data.filter(d => d.isWhale).sort((a, b) => b.uptimePercentage - a.uptimePercentage); + const nonWhales = data.filter(d => !d.isWhale); + return [...nonWhales, ...whales]; } function filterHeartbeats(summaries: HeartbeatSummary[], filters: any): HeartbeatSummary[] { diff --git a/frontend/src/app/features/leaderboard/leaderboard.routing.ts b/frontend/src/app/features/leaderboard/leaderboard.routing.ts index 69b819e42c..1a72308dba 100644 --- a/frontend/src/app/features/leaderboard/leaderboard.routing.ts +++ b/frontend/src/app/features/leaderboard/leaderboard.routing.ts @@ -29,7 +29,7 @@ const routes: Routes = [ }, { path: '**', - redirectTo: '', + redirectTo: 'leaderboard', }, ]; diff --git a/frontend/src/app/features/leaderboard/leaderboard.service.ts b/frontend/src/app/features/leaderboard/leaderboard.service.ts index 6c19937356..24a47e6471 100644 --- a/frontend/src/app/features/leaderboard/leaderboard.service.ts +++ b/frontend/src/app/features/leaderboard/leaderboard.service.ts @@ -1,7 +1,10 @@ -import { Injectable } from '@angular/core'; +import { Injectable, Optional } from '@angular/core'; import { combineLatest, map, Observable } from 'rxjs'; import { HeartbeatSummary } from '@shared/types/leaderboard/heartbeat-summary.type'; -import { collection, collectionData, CollectionReference, Firestore } from '@angular/fire/firestore'; +import { collection, collectionData, CollectionReference, Firestore, getDocs } from '@angular/fire/firestore'; +import { WebNodeService } from '@core/services/web-node.service'; +import { getElapsedTimeInMinsAndHours } from '@shared/helpers/date.helper'; +import { ONE_THOUSAND, toReadableDate } from '@openmina/shared'; @Injectable({ providedIn: 'root', @@ -11,9 +14,14 @@ export class LeaderboardService { private scoresCollection: CollectionReference; private maxScoreCollection: CollectionReference; - constructor(private firestore: Firestore) { - this.scoresCollection = collection(this.firestore, 'scores'); - this.maxScoreCollection = collection(this.firestore, 'maxScore'); + private maxScoreRightNow: number; + + constructor(@Optional() private firestore: Firestore, + private webnodeService: WebNodeService) { + if (this.firestore) { + this.scoresCollection = collection(this.firestore, 'scores'); + this.maxScoreCollection = collection(this.firestore, 'maxScore'); + } } getHeartbeatsSummaries(): Observable { @@ -22,26 +30,318 @@ export class LeaderboardService { collectionData(this.maxScoreCollection, { idField: 'id' }), ]).pipe( map(([scores, maxScore]) => { - const maxScoreRightNow = maxScore.find(c => c.id === 'current')['value']; - - const items = scores.map(score => ({ - publicKey: score['publicKey'], - blocksProduced: score['blocksProduced'], - isActive: score['lastUpdated'] > Date.now() - 120000, - uptimePercentage: Math.floor((score['score'] / maxScoreRightNow) * 100), - uptimePrize: false, - blocksPrize: false, - } as HeartbeatSummary)); - - const sortedItemsByUptime = [...items].sort((a, b) => b.uptimePercentage - a.uptimePercentage); + // this.printHeartbeats(scores); + const maxScoreNow: any = maxScore.find(c => c.id === 'current'); + this.maxScoreRightNow = maxScoreNow ? maxScoreNow['value'] : 0; + const items = scores.map(score => { + const isWhale = score['publicKey'].includes('B62qkiqPXFDayJV8JutYvjerERZ35EKrdmdcXh3j1rDUHRs1bJkFFcX') || score['publicKey'].includes('B62qpQT46XiGQs7KhcczifvvYcnx7fbTzKj8a83UcT2BhPEs5mYnzdp'); + return ({ + publicKey: score['publicKey'], + blocksProduced: score['blocksProduced'], + isWhale, + uptimePercentage: this.getUptimePercentage(score['score'], this.maxScoreRightNow), + uptimePrize: false, + blocksPrize: false, + score: score['score'], + maxScore: this.maxScoreRightNow, + } as HeartbeatSummary); + }); + + const sortedItemsByUptime = [...items].filter(i => !i.isWhale).sort((a, b) => b.uptimePercentage - a.uptimePercentage); const fifthPlacePercentageByUptime = sortedItemsByUptime[4]?.uptimePercentage ?? 0; - const highestProducedBlocks = Math.max(...items.map(item => item.blocksProduced)); + const highestProducedBlocks = Math.max( + ...items + .filter(i => !i.isWhale) + .filter(item => item.score > 0.3333 * this.maxScoreRightNow) + .map(item => item.blocksProduced), + ); return items.map(item => ({ ...item, - uptimePrize: item.uptimePercentage >= fifthPlacePercentageByUptime, - blocksPrize: item.blocksProduced === highestProducedBlocks, + uptimePrize: item.isWhale ? false : (item.uptimePercentage >= fifthPlacePercentageByUptime), + blocksPrize: item.isWhale ? false : (item.blocksProduced === highestProducedBlocks), })); }), ); } + + one: any; + + printHeartbeats(heartbeats: any[]): void { + if (this.one) { + return; + } + this.one = 1; + // Sort the heartbeats by createTime (oldest first) + const sortedHeartbeats = [...heartbeats].sort((a, b) => { + const timeA = a.createTime.seconds * 1000 + a.createTime.nanoseconds / 1000000; + const timeB = b.createTime.seconds * 1000 + b.createTime.nanoseconds / 1000000; + return timeA - timeB; + }); + + // Create an array of {time, publicKey} objects + const formattedData = sortedHeartbeats.map(heartbeat => { + // Convert seconds and nanoseconds to milliseconds for Date constructor + const milliseconds = heartbeat.createTime.seconds * 1000 + heartbeat.createTime.nanoseconds / 1000000; + const date = new Date(milliseconds); + + // Get full day name + const dayNames = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']; + const fullDayName = dayNames[date.getUTCDay()]; + + // Format in UTC with full day name + const utcString = date.toUTCString(); + const formattedTime = utcString.replace(/^[A-Za-z]{3},/, `${fullDayName},`); + + return { + time: formattedTime, + 'Public Key': heartbeat.submitter + }; + }); + + const csvRows = []; + + // Define headers to match the property names exactly + const headers = ['Public Key', 'time']; + csvRows.push(headers.join(',')); + + // Map rows by accessing properties directly + formattedData.forEach((row) => { + // Make sure to escape any commas within the values + const publicKey = `"${row['Public Key']}"`; + const time = `"${row['time']}"`; + + // Join the values with a comma to create a CSV row + csvRows.push(`${publicKey},${time}`); + }); + + const csvString = csvRows.join('\n'); + const blob = new Blob([csvString], { type: 'text/csv' }); + const url = URL.createObjectURL(blob); + + const link = document.createElement('a'); + link.href = url; + link.download = `All heartbeats ${new Date().toISOString().replace(/:/g, '-')}.csv`; + link.click(); + + URL.revokeObjectURL(url); + } + + getUptime(): Observable<{ uptimePercentage: number, uptimeTime: string }> { + const publicKey = this.webnodeService.privateStake?.publicKey?.replace('\n', ''); + + return combineLatest([ + collectionData(this.scoresCollection, { idField: 'id' }), + collectionData(this.maxScoreCollection, { idField: 'id' }), + ]).pipe( + map(([scores, maxScore]) => { + const activeEntry = scores.find(score => score['publicKey'] === publicKey); + + if (!activeEntry) { + return { + uptimePercentage: 0, + uptimeTime: '', + }; + } + + return { + uptimePercentage: this.getUptimePercentage(activeEntry['score'], maxScore[0]['value']), + uptimeTime: getElapsedTimeInMinsAndHours(activeEntry['score'] * 5), + }; + }), + ); + } + + private camelCaseToTitle(camelCase: string): string { + return camelCase + .replace(/([A-Z])/g, ' $1') + .replace(/^./, match => match.toUpperCase()); + } + + private getUptimePercentage(score: number, maxScore: number): number { + let uptimePercentage = Number(((score / maxScore) * 100).toFixed(2)); + if (maxScore === 0) { + uptimePercentage = 0; + } + return uptimePercentage; + } + + async downloadUptimeLottery(): Promise { + const querySnapshot = await getDocs(this.scoresCollection); + const scoresData: any[] = []; + + querySnapshot.forEach((doc) => { + scoresData.push({ id: doc.id, ...doc.data() }); + }); + + const csvRows = []; + + let filteredData = scoresData + .map(row => ({ + publicKey: row.publicKey, + score: row.score, + })) + .filter(row => row.score > 0.3333 * this.maxScoreRightNow); + filteredData = [...filteredData].sort((a, b) => b.score - a.score); + + const headers = ['publicKey', 'score'].map(header => this.camelCaseToTitle(header)); + csvRows.push(headers.join(',')); + + filteredData.forEach((row: any) => { + const values = headers.map(header => { + const key = header.charAt(0).toLowerCase() + header.slice(1); // Convert to corresponding key + const escape = ('' + row[key.replace(' ', '')]).replace(/"/g, '\\"'); + return `"${escape}"`; + }); + csvRows.push(values.join(',')); + }); + + const csvString = csvRows.join('\n'); + const blob = new Blob([csvString], { type: 'text/csv' }); + const url = URL.createObjectURL(blob); + + const link = document.createElement('a'); + link.href = url; + link.download = `export_${new Date().toISOString()}.csv`; + link.click(); + + URL.revokeObjectURL(url); + } + + async downloadHighestUptime(): Promise { + const querySnapshot = await getDocs(this.scoresCollection); + const scoresData: any[] = []; + + querySnapshot.forEach((doc) => { + scoresData.push({ id: doc.id, ...doc.data() }); + }); + + const csvRows = []; + + let filteredData = scoresData + .map(row => ({ + publicKey: row.publicKey, + score: row.score, + })) + .filter(row => row.score > 0.3333 * this.maxScoreRightNow); + filteredData = [...filteredData].sort((a, b) => b.score - a.score); + + const sortedItemsByUptime = [...filteredData].sort((a, b) => b.score - a.score); + const fifthPlaceByUptime = sortedItemsByUptime[4]?.score ?? 0; + filteredData = filteredData.filter(row => row.score >= fifthPlaceByUptime); + + // Convert camelCase headers to Title Case with spaces + const headers = ['publicKey', 'score'].map(header => this.camelCaseToTitle(header)); + csvRows.push(headers.join(',')); + + filteredData.forEach((row: any) => { + const values = headers.map(header => { + const key = header.charAt(0).toLowerCase() + header.slice(1); // Convert to corresponding key + const escape = ('' + row[key.replace(' ', '')]).replace(/"/g, '\\"'); + return `"${escape}"`; + }); + csvRows.push(values.join(',')); + }); + + const csvString = csvRows.join('\n'); + const blob = new Blob([csvString], { type: 'text/csv' }); + const url = URL.createObjectURL(blob); + + const link = document.createElement('a'); + link.href = url; + link.download = `export_${new Date().toISOString()}.csv`; + link.click(); + + URL.revokeObjectURL(url); + } + + async downloadMostProducedBlocks(): Promise { + const querySnapshot = await getDocs(this.scoresCollection); + const scoresData: any[] = []; + + querySnapshot.forEach((doc) => { + scoresData.push({ id: doc.id, ...doc.data() }); + }); + + const csvRows = []; + + let filteredData = scoresData + .filter(row => row.score > 0.3333 * this.maxScoreRightNow) + .map(row => ({ + publicKey: row.publicKey, + blocksProduced: row.blocksProduced, + })); + filteredData = [...filteredData].sort((a, b) => b.blocksProduced - a.blocksProduced); + + const highestProducedBlocks = Math.max(...filteredData.map(row => row.blocksProduced)); + filteredData = filteredData.filter(row => row.blocksProduced === highestProducedBlocks); + + const headers = ['publicKey', 'blocksProduced'].map(header => this.camelCaseToTitle(header)); + csvRows.push(headers.join(',')); + + filteredData.forEach((row: any) => { + const values = headers.map(header => { + const key = header.charAt(0).toLowerCase() + header.slice(1); // Convert to corresponding key + const escape = ('' + row[key.replace(' ', '')]).replace(/"/g, '\\"'); + return `"${escape}"`; + }); + csvRows.push(values.join(',')); + }); + + const csvString = csvRows.join('\n'); + const blob = new Blob([csvString], { type: 'text/csv' }); + const url = URL.createObjectURL(blob); + + const link = document.createElement('a'); + link.href = url; + link.download = `export_${new Date().toISOString()}.csv`; + link.click(); + + URL.revokeObjectURL(url); + } + + async downloadAll(): Promise { + const querySnapshot = await getDocs(this.scoresCollection); + const scoresData: any[] = []; + + querySnapshot.forEach((doc) => { + scoresData.push({ id: doc.id, ...doc.data() }); + }); + + const csvRows = []; + + let filteredData = scoresData + .map(row => ({ + publicKey: row.publicKey, + score: row.score + ' / ' + this.maxScoreRightNow, + uptime: this.getUptimePercentage(row.score, this.maxScoreRightNow) + '%', + uptimeTime: row.score, + producedBlocks: row.blocksProduced, + lastUpdated: toReadableDate(row.lastUpdated * ONE_THOUSAND), + })); + filteredData = [...filteredData].sort((a, b) => b.uptimeTime - a.uptimeTime); + + const headers = ['publicKey', 'score', 'uptime', /*'lastUpdated',*/ 'producedBlocks'].map(header => this.camelCaseToTitle(header)); + csvRows.push(headers.join(',')); + + // Map rows + filteredData.forEach((row: any) => { + const values = headers.map(header => { + const key = header.charAt(0).toLowerCase() + header.slice(1); // Convert to corresponding key + const escape = ('' + row[key.replace(' ', '')]).replace(/"/g, '\\"'); + return `"${escape}"`; + }); + csvRows.push(values.join(',')); + }); + + const csvString = csvRows.join('\n'); + const blob = new Blob([csvString], { type: 'text/csv' }); + const url = URL.createObjectURL(blob); + + const link = document.createElement('a'); + link.href = url; + link.download = `export_${new Date().toISOString().replace(/:/g, '-')}.csv`; + link.click(); + + URL.revokeObjectURL(url); + } } diff --git a/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.html b/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.html index 0d5208f617..405b46d0ed 100644 --- a/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.html +++ b/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.html @@ -9,7 +9,7 @@

Set Up Your Web Node

@if (!validFiles) { @if (!error) { -
+
@@ -22,7 +22,7 @@

Set Up Your Web Node

Select configuration file (.zip)
- @@ -31,7 +31,7 @@

Set Up Your Web Node

(change)="onFileSelected($event)" accept=".zip">
-
Upload webnode-account-XY.zip we sent you
+
Upload webnode-account-XY.zip we sent you
} @else {
@@ -68,18 +68,19 @@

Set Up Your Web Node

}
- - + @if (!isLeaderboard) { + + + }
diff --git a/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.scss b/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.scss index d84b38c533..ca69800201 100644 --- a/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.scss +++ b/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.scss @@ -8,6 +8,10 @@ } } +::ng-deep body.light .header { + filter: invert(1); +} + .content { height: calc(100% - 56px); max-width: 391px; @@ -42,7 +46,7 @@ } &.special-node-bg { - background-color: $special-node; + background-color: $success-container; } .close-file:hover { diff --git a/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.ts b/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.ts index b8425a6323..9e2a167307 100644 --- a/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.ts +++ b/frontend/src/app/features/web-node/web-node-file-upload/web-node-file-upload.component.ts @@ -30,6 +30,7 @@ export class WebNodeFileUploadComponent extends ManualDetection { validFiles: boolean = false; error: boolean = false; uploadedFileName: string; + isLeaderboard: boolean = CONFIG.showLeaderboard; constructor(private webnodeService: WebNodeService) { super(); } @@ -51,10 +52,10 @@ export class WebNodeFileUploadComponent extends ManualDetection { onFileSelected(event: any): void { this.processZipFile(event.target.files[0]).then(files => { - const publicKey = files.find(f => f.name.includes('.pub'))?.content; + const publicKey = files.find(f => f.name.endsWith('.pub'))?.content; const password = files.find(f => f.name.includes('password'))?.content.replace(/\r?\n|\r/g, ''); - const stake = files.find(f => f.name.includes('stake') && !f.name.includes('.pub'))?.content; - if (this.error || !publicKey || !password || !stake) { + const stake = files.find(f => !f.name.includes('password') && !f.name.endsWith('.pub'))?.content; + if (this.error || !publicKey || !stake) { this.error = true; } else { this.webnodeService.privateStake = { publicKey, password, stake: JSON.parse(stake) }; diff --git a/frontend/src/app/layout/server-status/server-status.component.html b/frontend/src/app/layout/server-status/server-status.component.html index 0198d5a15a..9cc8bd21a3 100644 --- a/frontend/src/app/layout/server-status/server-status.component.html +++ b/frontend/src/app/layout/server-status/server-status.component.html @@ -2,21 +2,25 @@
@if (!switchForbidden && !hideNodeStats && !isMobile) { -
- blur_circular -
{{ details.transactions }} Tx{{ details.transactions | plural }}
-
{{ details.snarks }} SNARK{{ details.snarks | plural }}
-
-
- language -
{{ details.peersConnected }} Peer{{ details.peersConnected | plural }}
-
+ @if (!hideTx) { +
+ blur_circular +
{{ details.transactions }} Tx{{ details.transactions | plural }}
+
{{ details.snarks }} SNARK{{ details.snarks | plural }}
+
+ } + @if (!hidePeers) { +
+ language +
{{ details.peersConnected }} Peer{{ details.peersConnected | plural }}
+
+ } }
; diff --git a/frontend/src/app/layout/toolbar/toolbar.component.html b/frontend/src/app/layout/toolbar/toolbar.component.html index 0bdd8f4ad4..cbe14e39be 100644 --- a/frontend/src/app/layout/toolbar/toolbar.component.html +++ b/frontend/src/app/layout/toolbar/toolbar.component.html @@ -10,10 +10,13 @@ }
-
- @if (!isMobile || (isMobile && errors.length)) { +
+ @if (!isMobile) { } + @if (showUptime) { + + }
@if (haveNextBP && !isAllNodesPage) { diff --git a/frontend/src/app/layout/toolbar/toolbar.component.scss b/frontend/src/app/layout/toolbar/toolbar.component.scss index 71c8f6be53..fab767af7d 100644 --- a/frontend/src/app/layout/toolbar/toolbar.component.scss +++ b/frontend/src/app/layout/toolbar/toolbar.component.scss @@ -4,6 +4,9 @@ height: 40px; @media (max-width: 767px) { height: 96px; + &.uptime { + height: 130px; + } } } @@ -50,6 +53,13 @@ } } } + + .pills-holder { + &.is-mobile { + width: 100%; + flex-direction: column !important; + } + } } @keyframes loading { diff --git a/frontend/src/app/layout/toolbar/toolbar.component.ts b/frontend/src/app/layout/toolbar/toolbar.component.ts index 4c66ec1860..8d07dfdad3 100644 --- a/frontend/src/app/layout/toolbar/toolbar.component.ts +++ b/frontend/src/app/layout/toolbar/toolbar.component.ts @@ -1,5 +1,5 @@ -import { ChangeDetectionStrategy, Component, ElementRef, OnInit, ViewChild } from '@angular/core'; -import { filter, map } from 'rxjs'; +import { ChangeDetectionStrategy, Component, ElementRef, HostBinding, OnInit, ViewChild } from '@angular/core'; +import { catchError, filter, map, of, switchMap, timer } from 'rxjs'; import { AppSelectors } from '@app/app.state'; import { getMergedRoute, hasValue, MergedRoute, removeParamsFromURL, TooltipService } from '@openmina/shared'; import { AppMenu } from '@shared/types/app/app-menu.type'; @@ -10,6 +10,9 @@ import { selectErrorPreviewErrors } from '@error-preview/error-preview.state'; import { MinaError } from '@shared/types/error-preview/mina-error.type'; import { AppNodeStatus } from '@shared/types/app/app-node-details.type'; import { Routes } from '@shared/enums/routes.enum'; +import { CONFIG } from '@shared/constants/config'; +import { LeaderboardService } from '@leaderboard/leaderboard.service'; +import { untilDestroyed } from '@ngneat/until-destroy'; @Component({ selector: 'mina-toolbar', @@ -26,6 +29,9 @@ export class ToolbarComponent extends StoreDispatcher implements OnInit { haveNextBP: boolean; isAllNodesPage: boolean; + @HostBinding('class.uptime') + showUptime: boolean = CONFIG.showLeaderboard; + @ViewChild('loadingRef') private loadingRef: ElementRef; constructor(private tooltipService: TooltipService) { super(); } diff --git a/frontend/src/app/layout/uptime-pill/uptime-pill.component.html b/frontend/src/app/layout/uptime-pill/uptime-pill.component.html new file mode 100644 index 0000000000..bd5c83e924 --- /dev/null +++ b/frontend/src/app/layout/uptime-pill/uptime-pill.component.html @@ -0,0 +1,6 @@ +
+ beenhere +
Uptime {{ uptime.uptimePercentage }}% {{ uptime.uptimeTime }}
+
diff --git a/frontend/src/app/layout/uptime-pill/uptime-pill.component.scss b/frontend/src/app/layout/uptime-pill/uptime-pill.component.scss new file mode 100644 index 0000000000..ab83fde521 --- /dev/null +++ b/frontend/src/app/layout/uptime-pill/uptime-pill.component.scss @@ -0,0 +1,63 @@ +@import 'openmina'; + + +.chip { + gap: 4px; + background-color: $base-surface; + + &::before { + content: ''; + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + border-radius: 6px; + background-color: $success-container; + } + + div { + color: $success-primary; + } + + span { + color: $success-secondary; + + &.mina-icon { + color: $success-primary; + } + } + + &.aware { + &::before { + background-color: $aware-container; + } + + div { + color: $aware-primary; + } + + span { + color: $aware-secondary; + + &.mina-icon { + display: none; + color: $aware-primary; + } + } + } + + @media (max-width: 767px) { + width: 100%; + margin-bottom: 5px; + font-size: 12px; + + .mina-icon { + display: none; + } + + &.h-sm { + height: 32px !important; + } + } +} diff --git a/frontend/src/app/layout/uptime-pill/uptime-pill.component.ts b/frontend/src/app/layout/uptime-pill/uptime-pill.component.ts new file mode 100644 index 0000000000..b2f8ff0d97 --- /dev/null +++ b/frontend/src/app/layout/uptime-pill/uptime-pill.component.ts @@ -0,0 +1,45 @@ +import { ChangeDetectionStrategy, Component, OnInit } from '@angular/core'; +import { catchError, mergeMap, of, switchMap, timer } from 'rxjs'; +import { UntilDestroy, untilDestroyed } from '@ngneat/until-destroy'; +import { LeaderboardService } from '@leaderboard/leaderboard.service'; +import { ManualDetection, OpenminaEagerSharedModule } from '@openmina/shared'; +import { sendSentryEvent } from '@shared/helpers/webnode.helper'; + +@UntilDestroy() +@Component({ + selector: 'mina-uptime-pill', + standalone: true, + imports: [ + OpenminaEagerSharedModule, + ], + templateUrl: './uptime-pill.component.html', + styleUrl: './uptime-pill.component.scss', + changeDetection: ChangeDetectionStrategy.OnPush, +}) +export class UptimePillComponent extends ManualDetection implements OnInit { + + uptime: { uptimePercentage: number, uptimeTime: string } = { uptimePercentage: 0, uptimeTime: '' }; + + constructor(private leaderboardService: LeaderboardService) { super(); } + + ngOnInit(): void { + this.listenToUptime(); + } + + private listenToUptime(): void { + timer(0, 60000).pipe( + mergeMap(() => this.leaderboardService.getUptime()), + catchError(err => { + sendSentryEvent(err.message); + return of({ + uptimePercentage: 0, + uptimeTime: '', + }); + }), + untilDestroyed(this), + ).subscribe(uptime => { + this.uptime = uptime; + this.detect(); + }); + } +} diff --git a/frontend/src/app/shared/guards/landing-page.guard.ts b/frontend/src/app/shared/guards/landing-page.guard.ts new file mode 100644 index 0000000000..d381a585a7 --- /dev/null +++ b/frontend/src/app/shared/guards/landing-page.guard.ts @@ -0,0 +1,46 @@ +import { CanActivateFn, Router } from '@angular/router'; +import { inject } from '@angular/core'; +import { Store } from '@ngrx/store'; +import { map, take } from 'rxjs/operators'; +import { CONFIG } from '@shared/constants/config'; +import { getMergedRoute } from '@openmina/shared'; +import { Routes } from '@shared/enums/routes.enum'; + +let isFirstLoad = true; + +export const landingPageGuard: CanActivateFn = (route, state) => { + + if (!isFirstLoad || !CONFIG.showWebNodeLandingPage) { + return true; + } + const router = inject(Router); + const store = inject(Store); + isFirstLoad = false; + + return store.select(getMergedRoute).pipe( + take(1), + map(route => { + if (!route) return true; + + const startsWith = (path: string) => route.url.startsWith(path); + + if ( + startsWith('/dashboard') || + startsWith('/block-production') || + startsWith('/state') || + startsWith('/mempool') || + startsWith('/loading-web-node') + ) { + return router.createUrlTree([Routes.LOADING_WEB_NODE], { + queryParamsHandling: 'preserve', + }); + } + + if (!startsWith('/') && !startsWith('/?') && !startsWith('/leaderboard')) { + return router.createUrlTree(['']); + } + + return true; + }), + ); +}; diff --git a/frontend/src/app/shared/helpers/date.helper.ts b/frontend/src/app/shared/helpers/date.helper.ts index 2dc15940dd..97243ff87a 100644 --- a/frontend/src/app/shared/helpers/date.helper.ts +++ b/frontend/src/app/shared/helpers/date.helper.ts @@ -98,3 +98,14 @@ export function getElapsedTime(timeInSeconds: number): string { return `${minutes}m ${seconds}s`; } + +export function getElapsedTimeInMinsAndHours(timeInMinutes: number): string { + if (timeInMinutes < 60) { + return `${timeInMinutes}m`; + } + + const hours = Math.floor(timeInMinutes / 60); + const minutes = timeInMinutes % 60; + + return `${hours}h ${minutes}m`; +} diff --git a/frontend/src/app/shared/types/core/environment/mina-env.type.ts b/frontend/src/app/shared/types/core/environment/mina-env.type.ts index febcec9c4b..06d4dd4ab6 100644 --- a/frontend/src/app/shared/types/core/environment/mina-env.type.ts +++ b/frontend/src/app/shared/types/core/environment/mina-env.type.ts @@ -6,6 +6,9 @@ export interface MinaEnv { hideNodeStats?: boolean; canAddNodes?: boolean; showWebNodeLandingPage?: boolean; + showLeaderboard?: boolean; + hidePeersPill?: boolean; + hideTxPill?: boolean; sentry?: { dsn: string; tracingOrigins: string[]; @@ -14,6 +17,7 @@ export interface MinaEnv { features?: FeaturesConfig; graphQL?: string; firebase?: any; + heartbeats?: boolean; }; } diff --git a/frontend/src/app/shared/types/leaderboard/heartbeat-summary.type.ts b/frontend/src/app/shared/types/leaderboard/heartbeat-summary.type.ts index a134981c2f..20165eaa92 100644 --- a/frontend/src/app/shared/types/leaderboard/heartbeat-summary.type.ts +++ b/frontend/src/app/shared/types/leaderboard/heartbeat-summary.type.ts @@ -1,8 +1,10 @@ export interface HeartbeatSummary { publicKey: string; - isActive: boolean; + isWhale: boolean; uptimePercentage: number; blocksProduced: number; uptimePrize: boolean; blocksPrize: boolean; + score: number; + maxScore: number; } diff --git a/frontend/src/assets/environments/leaderboard.js b/frontend/src/assets/environments/leaderboard.js index 904531dd35..fda81bbe3a 100644 --- a/frontend/src/assets/environments/leaderboard.js +++ b/frontend/src/assets/environments/leaderboard.js @@ -6,12 +6,14 @@ export default { production: true, canAddNodes: false, showWebNodeLandingPage: true, + showLeaderboard: true, + hidePeersPill: true, + hideTxPill: true, globalConfig: { features: { 'dashboard': [], 'block-production': ['won-slots'], 'mempool': [], - 'benchmarks': ['wallets'], 'state': ['actions'], }, firebase: { @@ -23,11 +25,12 @@ export default { appId: '1:1016673359357:web:bbd2cbf3f031756aec7594', measurementId: 'G-ENDBL923XT', }, + heartbeats: true, + }, + sentry: { + dsn: 'https://4f225bda3dec1b73074b07923ecc1d1a@o4508216158584832.ingest.de.sentry.io/4508817332109392', + tracingOrigins: ['https://www.openmina.com', 'webnode-gtm-test.firebaseapp.com', 'webnode-gtm-test.firebasestorage.app', 'webnode.minaprotocol.com'], }, - // sentry: { - // dsn: 'https://69aba72a6290383494290cf285ab13b3@o4508216158584832.ingest.de.sentry.io/4508216160616528', - // tracingOrigins: ['https://www.openmina.com', 'webnode-gtm-test.firebaseapp.com', 'webnode-gtm-test.firebasestorage.app'], - // }, configs: [ { name: 'Web Node', diff --git a/frontend/src/assets/environments/webnode.js b/frontend/src/assets/environments/webnode.js index 14ed05ebc1..99848ea84f 100644 --- a/frontend/src/assets/environments/webnode.js +++ b/frontend/src/assets/environments/webnode.js @@ -10,9 +10,6 @@ export default { features: { 'dashboard': [], 'block-production': ['won-slots'], - 'mempool': [], - 'benchmarks': ['wallets'], - 'state': ['actions'], }, firebase: { 'projectId': 'openminawebnode', diff --git a/frontend/src/assets/styles/leaderboard-variables.scss b/frontend/src/assets/styles/leaderboard-variables.scss index 63c4f54c84..51cef88c4f 100644 --- a/frontend/src/assets/styles/leaderboard-variables.scss +++ b/frontend/src/assets/styles/leaderboard-variables.scss @@ -19,6 +19,7 @@ $mina-brand-cyan: #31cdea; $mina-brand-lilac: #e2dfff; $mina-brand-peony: #f1dceb; $mina-brand-gray: #d9d9d9; +$mina-brand-orchid: #B4448D; $mina-brand-gradient: linear-gradient(272deg, #b6eeff 2.39%, #f7f5ff 25.39%, #d7c4fa 48.39%, #f4c0da 71.4%, #ffc4a4 94.4%); $mina-brand-gradient-reversed: linear-gradient(272deg, #ffc4a4 2.39%, #f4c0da 25.39%, #d7c4fa 48.39%, #f7f5ff 71.4%, #b6eeff 94.4%); diff --git a/frontend/src/environments/environment.prod.ts b/frontend/src/environments/environment.prod.ts index 4325e3ef25..f1f0b8ef10 100644 --- a/frontend/src/environments/environment.prod.ts +++ b/frontend/src/environments/environment.prod.ts @@ -10,5 +10,8 @@ export const environment: Readonly = { hideToolbar: env.hideToolbar, canAddNodes: env.canAddNodes, showWebNodeLandingPage: env.showWebNodeLandingPage, + showLeaderboard: env.showLeaderboard, + hidePeersPill: env.hidePeersPill, + hideTxPill: env.hideTxPill, sentry: env.sentry, }; diff --git a/frontend/src/environments/environment.ts b/frontend/src/environments/environment.ts index 27fb7cddd8..2f12739379 100644 --- a/frontend/src/environments/environment.ts +++ b/frontend/src/environments/environment.ts @@ -4,7 +4,10 @@ export const environment: Readonly = { production: false, identifier: 'Dev FE', canAddNodes: true, - showWebNodeLandingPage: true, + showWebNodeLandingPage: false, + showLeaderboard: false, + hidePeersPill: true, + hideTxPill: true, globalConfig: { features: { dashboard: [], @@ -27,6 +30,7 @@ export const environment: Readonly = { appId: '1:1016673359357:web:bbd2cbf3f031756aec7594', measurementId: 'G-ENDBL923XT', }, + heartbeats: true, graphQL: 'https://adonagy.com/graphql', // graphQL: 'https://api.minascan.io/node/devnet/v1/graphql', // graphQL: 'http://65.109.105.40:5000/graphql', @@ -48,14 +52,14 @@ export const environment: Readonly = { // name: 'Producer-2', // url: 'https://staging-devnet-openmina-bp-2-dashboard.minaprotocol.network', // }, - // { - // name: 'staging-devnet-bp-0', - // url: 'https://staging-devnet-openmina-bp-0.minaprotocol.network', - // }, - // { - // name: 'staging-devnet-bp-1', - // url: 'https://staging-devnet-openmina-bp-1.minaprotocol.network', - // }, + { + name: 'staging-devnet-bp-0', + url: 'https://staging-devnet-openmina-bp-0.minaprotocol.network', + }, + { + name: 'staging-devnet-bp-1', + url: 'https://staging-devnet-openmina-bp-1.minaprotocol.network', + }, // { // name: 'staging-devnet-bp-2', // url: 'https://staging-devnet-openmina-bp-2.minaprotocol.network', @@ -64,10 +68,10 @@ export const environment: Readonly = { // name: 'staging-devnet-bp-3', // url: 'https://staging-devnet-openmina-bp-3.minaprotocol.network', // }, - { - name: 'Web Node 1', - isWebNode: true, - }, + // { + // name: 'Web Node 1', + // isWebNode: true, + // }, // { // name: 'http://65.109.105.40:3000', // url: 'http://65.109.105.40:3000', @@ -87,46 +91,41 @@ export const environment: Readonly = { // resources: ['memory'], // }, // }, - { - name: 'Docker 11010', - url: 'http://localhost:11010', - }, - { - name: 'Docker 11012', - url: 'http://localhost:11012', - }, - { - name: 'Docker 11014', - url: 'http://localhost:11014', - }, + // { + // name: 'Docker 11010', + // url: 'http://localhost:11010', + // }, + // { + // name: 'Docker 11012', + // url: 'http://localhost:11012', + // }, + // { + // name: 'Docker 11014', + // url: 'http://localhost:11014', + // }, // { // name: 'Producer', // url: 'http://65.109.105.40:3000', - // minaExplorerNetwork: 'devnet', // memoryProfiler: 'http://1.k8.openmina.com:31164', // }, // { // name: 'http://65.109.110.75:3000', // url: 'http://65.109.110.75:3000', - // minaExplorerNetwork: 'devnet', // memoryProfiler: 'http://1.k8.openmina.com:31164', // }, // { // name: 'http://65.109.110.75:11010', // url: 'http://65.109.110.75:11010', - // minaExplorerNetwork: 'devnet', // memoryProfiler: 'http://1.k8.openmina.com:31164', // }, // { // name: 'http://65.109.110.75:11012', // url: 'http://65.109.110.75:11012', - // minaExplorerNetwork: 'devnet', // memoryProfiler: 'http://1.k8.openmina.com:31164', // }, // { // name: 'http://65.109.110.75:11014', // url: 'http://65.109.110.75:11014', - // minaExplorerNetwork: 'devnet', // memoryProfiler: 'http://1.k8.openmina.com:31164', // }, // { diff --git a/frontend/src/index.html b/frontend/src/index.html index e627af2266..65e3742455 100644 --- a/frontend/src/index.html +++ b/frontend/src/index.html @@ -4,7 +4,7 @@ - diff --git a/frontend/src/main.ts b/frontend/src/main.ts index 618cc3ffbe..4463fcabf1 100644 --- a/frontend/src/main.ts +++ b/frontend/src/main.ts @@ -5,6 +5,8 @@ import { CONFIG } from '@shared/constants/config'; import * as Sentry from '@sentry/angular'; import type { ErrorEvent } from '@sentry/types/build/types/event'; +// (window as any).FIREBASE_APPCHECK_DEBUG_TOKEN = 'A4B7AFB5-1534-4D5A-A02E-BCF5847C07F0'; + if (CONFIG.production) { initSentry(); } diff --git a/fuzzer/Cargo.toml b/fuzzer/Cargo.toml index f7ec98841d..17ec2d0678 100644 --- a/fuzzer/Cargo.toml +++ b/fuzzer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-fuzzer" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index b13cb156df..f254c9346e 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mina-tree" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/ledger/src/account/account.rs b/ledger/src/account/account.rs index d566261a77..1c4fb09b7a 100644 --- a/ledger/src/account/account.rs +++ b/ledger/src/account/account.rs @@ -27,10 +27,7 @@ use crate::{ }, witness::Witness, }, - scan_state::{ - currency::{Balance, Magnitude, Nonce, Slot, TxnVersion}, - transaction_logic::account_min_balance_at_slot, - }, + scan_state::currency::{Amount, Balance, Magnitude, Nonce, Slot, TxnVersion}, zkapps::snark::FlaggedOption, AppendToInputs as _, MerklePath, MyCow, ToInputs, }; @@ -1079,6 +1076,10 @@ impl AccountId { } } + pub fn new_with_default_token(public_key: CompressedPubKey) -> Self { + Self::new(public_key, TokenId::default()) + } + pub fn create(public_key: CompressedPubKey, token_id: TokenId) -> Self { Self::new(public_key, token_id) } @@ -1382,21 +1383,8 @@ impl Account { pub fn has_locked_tokens(&self, global_slot: Slot) -> bool { match self.timing { Timing::Untimed => false, - Timing::Timed { - initial_minimum_balance, - cliff_time, - cliff_amount, - vesting_period, - vesting_increment, - } => { - let curr_min_balance = account_min_balance_at_slot( - global_slot, - cliff_time, - cliff_amount, - vesting_period, - vesting_increment, - initial_minimum_balance, - ); + Timing::Timed { .. } => { + let curr_min_balance = self.min_balance_at_slot(global_slot); !curr_min_balance.is_zero() } @@ -1434,26 +1422,68 @@ impl Account { pub fn liquid_balance_at_slot(&self, global_slot: Slot) -> Balance { match self.timing { Timing::Untimed => self.balance, + Timing::Timed { .. } => self + .balance + .sub_amount(self.min_balance_at_slot(global_slot).to_amount()) + .unwrap(), + } + } + + pub fn min_balance_at_slot(&self, global_slot: Slot) -> Balance { + match self.timing { + Timing::Untimed => Balance::zero(), Timing::Timed { initial_minimum_balance, cliff_time, cliff_amount, vesting_period, vesting_increment, - } => self - .balance - .sub_amount( - account_min_balance_at_slot( - global_slot, - cliff_time, - cliff_amount, - vesting_period, - vesting_increment, - initial_minimum_balance, - ) - .to_amount(), - ) - .unwrap(), + } => { + if global_slot < cliff_time { + initial_minimum_balance + } else if vesting_period.is_zero() { + // If vesting period is zero then everything vests immediately at the cliff + Balance::zero() + } else { + match initial_minimum_balance.sub_amount(cliff_amount) { + None => Balance::zero(), + Some(min_balance_past_cliff) => { + // take advantage of fact that global slots are uint32's + + let num_periods = (global_slot.as_u32() - cliff_time.as_u32()) + / vesting_period.as_u32(); + let num_periods: u64 = num_periods.into(); + + let vesting_decrement = { + let vesting_increment = vesting_increment.as_u64(); + + if u64::MAX + .checked_div(num_periods) + .map(|res| { + matches!( + res.cmp(&vesting_increment), + std::cmp::Ordering::Less + ) + }) + .unwrap_or(false) + { + // The vesting decrement will overflow, use [max_int] instead. + Amount::from_u64(u64::MAX) + } else { + Amount::from_u64( + num_periods.checked_mul(vesting_increment).unwrap(), + ) + } + }; + + match min_balance_past_cliff.sub_amount(vesting_decrement) { + None => Balance::zero(), + Some(amount) => amount, + } + } + } + } + } } } diff --git a/ledger/src/base.rs b/ledger/src/base.rs index 3df762bbd5..a204982a74 100644 --- a/ledger/src/base.rs +++ b/ledger/src/base.rs @@ -300,7 +300,12 @@ impl LedgerIntf for Mask { /// public API of this module. /// This should *NOT* be used to create a ledger for other purposes. fn create_masked(&self) -> Self { - let mask = Mask::new_unattached(self.depth() as usize); + let mut mask = Mask::new_unattached(self.depth() as usize); + + if self.has_token_owners() { + mask.set_token_owners(); + } + // We don't register the mask here. This is only used in transaction logic, // where we don't want to unregister. Transaction logic is also // synchronous, so we don't need to worry that our mask will be reparented. diff --git a/ledger/src/database/database.rs b/ledger/src/database/database.rs index 7759b85465..00d40ac8d5 100644 --- a/ledger/src/database/database.rs +++ b/ledger/src/database/database.rs @@ -61,6 +61,20 @@ impl Database { Self::create_with_dir(depth, None) } + pub fn create_with_token_owners(depth: u8) -> Self { + let mut db = Self::create_with_dir(depth, None); + db.set_token_owners(); + db + } + + pub fn set_token_owners(&mut self) { + self.with(|this| this.set_token_owners()); + } + + pub fn unset_token_owners(&mut self) { + self.with(|this| this.unset_token_owners()); + } + pub fn root_hash(&mut self) -> Fp { self.with(|this| this.root_hash()) } @@ -105,6 +119,10 @@ impl Database { self.with(|this| this.transfert_hashes(hashes)) } + pub fn has_token_owners(&self) -> bool { + self.with(|this| this.has_token_owners()) + } + #[cfg(test)] pub fn test_matrix(&self) -> HashesMatrix { self.with(|this| this.hashes_matrix.clone()) diff --git a/ledger/src/database/database_impl.rs b/ledger/src/database/database_impl.rs index 369a8ce2c3..3f9b2a7569 100644 --- a/ledger/src/database/database_impl.rs +++ b/ledger/src/database/database_impl.rs @@ -6,7 +6,6 @@ use std::{ use mina_hasher::Fp; use mina_signer::CompressedPubKey; -use openmina_core::IS_ARCHIVE; use crate::{ next_uuid, Account, AccountId, AccountIndex, AccountLegacy, Address, AddressIterator, @@ -279,6 +278,10 @@ impl DatabaseImpl { pub fn transfert_hashes(&mut self, hashes: HashesMatrix) { self.hashes_matrix.transfert_hashes(hashes) } + + pub fn has_token_owners(&self) -> bool { + self.token_owners.is_some() + } } impl DatabaseImpl { @@ -311,11 +314,12 @@ impl DatabaseImpl { } impl DatabaseImpl { + const NACCOUNTS: usize = 10_000; + const NTOKENS: usize = 10; + pub fn create_with_dir(depth: u8, dir_name: Option) -> Self { assert!((1..0xfe).contains(&depth)); - const NACCOUNTS: usize = 10_000; - let uuid = next_uuid(); let path = match dir_name { @@ -339,19 +343,13 @@ impl DatabaseImpl { // std::fs::create_dir_all(&path).ok(); - let token_owners = if IS_ARCHIVE.get().cloned().unwrap_or_default() { - Some(HashMap::with_capacity(NACCOUNTS)) - } else { - None - }; - Self { depth, - accounts: Vec::with_capacity(NACCOUNTS), + accounts: Vec::with_capacity(Self::NACCOUNTS), last_location: None, naccounts: 0, - id_to_addr: HashMap::with_capacity(NACCOUNTS), - token_owners, + id_to_addr: HashMap::with_capacity(Self::NACCOUNTS), + token_owners: None, uuid, directory: path, hashes_matrix: HashesMatrix::new(depth as usize), @@ -363,6 +361,22 @@ impl DatabaseImpl { Self::create_with_dir(depth, None) } + pub fn create_with_token_owners(depth: u8) -> Self { + let mut db = Self::create_with_dir(depth, None); + db.set_token_owners(); + db + } + + pub fn set_token_owners(&mut self) { + if self.token_owners.is_none() { + self.token_owners = Some(HashMap::with_capacity(Self::NTOKENS)); + } + } + + pub fn unset_token_owners(&mut self) { + self.token_owners = None; + } + pub fn root_hash(&mut self) -> Fp { self.emulate_tree_to_get_hash_at(Address::root()) } diff --git a/ledger/src/mask/mask.rs b/ledger/src/mask/mask.rs index 28aacb4b60..8d85785852 100644 --- a/ledger/src/mask/mask.rs +++ b/ledger/src/mask/mask.rs @@ -7,8 +7,6 @@ use std::{ use mina_hasher::Fp; use mina_signer::CompressedPubKey; -use openmina_core::IS_ARCHIVE; - use crate::{ account::{Account, AccountId, TokenId}, address::Address, @@ -99,16 +97,10 @@ impl Mask { pub fn new_unattached(depth: usize) -> Self { let uuid = next_uuid(); - let is_archive = IS_ARCHIVE.get().cloned().unwrap_or_default(); - let mask = Self { inner: Arc::new(Mutex::new(MaskImpl::Unattached { owning_account: Default::default(), - token_owners: if is_archive { - Some(Default::default()) - } else { - None - }, + token_owners: Default::default(), id_to_addr: Default::default(), last_location: None, depth: depth as u8, @@ -127,8 +119,26 @@ impl Mask { Self::new_root(Database::create(depth as u8)) } + pub fn create_with_token_owners(depth: usize) -> Self { + Self::new_root(Database::create_with_token_owners(depth as u8)) + } + + pub fn set_token_owners(&mut self) { + self.with(|this| this.set_token_owners()); + } + + // Note: This should be only called on startup + pub fn unset_token_owners(&mut self) { + self.with(|this| this.unset_token_owners()); + } + pub fn make_child(&self) -> Mask { - let new_mask = Mask::new_unattached(self.depth() as usize); + let mut new_mask = Mask::new_unattached(self.depth() as usize); + + if self.has_token_owners() { + new_mask.set_token_owners(); + } + self.register_mask(new_mask) } @@ -286,6 +296,9 @@ impl Mask { pub fn set_raw_inner_hashes(&self, hashes: Vec<(u64, Fp)>) { self.with(|this| this.set_raw_inner_hashes(hashes)) } + pub fn has_token_owners(&self) -> bool { + self.with(|this| this.has_token_owners()) + } /// For tests only, check if the address is in the mask, without checking parent #[cfg(test)] diff --git a/ledger/src/mask/mask_impl.rs b/ledger/src/mask/mask_impl.rs index b929df3ae4..a970e6f263 100644 --- a/ledger/src/mask/mask_impl.rs +++ b/ledger/src/mask/mask_impl.rs @@ -215,6 +215,36 @@ impl MaskImpl { !childs.is_empty() } + pub fn set_token_owners(&mut self) { + match self { + Root { database, .. } => database.set_token_owners(), + Attached { token_owners, .. } | Unattached { token_owners, .. } => { + if token_owners.is_none() { + *token_owners = Some(Default::default()); + } + } + } + } + + pub fn unset_token_owners(&mut self) { + match self { + Root { database, .. } => { + database.unset_token_owners(); + } + Attached { token_owners, .. } | Unattached { token_owners, .. } => { + *token_owners = None; + } + } + } + + pub fn has_token_owners(&self) -> bool { + match self { + Root { database, .. } => database.has_token_owners(), + Attached { token_owners, .. } => token_owners.is_some(), + Unattached { token_owners, .. } => token_owners.is_some(), + } + } + /// Make `mask` a child of `self` pub fn register_mask(&mut self, self_mask: Mask, mask: Mask) -> Mask { let childs = self.childs(); diff --git a/ledger/src/mask/mod.rs b/ledger/src/mask/mod.rs index 7397d79d8a..97a75911da 100644 --- a/ledger/src/mask/mod.rs +++ b/ledger/src/mask/mod.rs @@ -41,6 +41,9 @@ pub fn alive_len() -> usize { exec(|list| list.len()) } -pub fn alive_collect() -> Vec { +pub fn alive_collect() -> B +where + B: FromIterator, +{ exec(|list| list.iter().cloned().collect()) } diff --git a/ledger/src/port_ocaml/hash.rs b/ledger/src/port_ocaml/hash.rs index 7f5b55cda9..14c05f661e 100644 --- a/ledger/src/port_ocaml/hash.rs +++ b/ledger/src/port_ocaml/hash.rs @@ -22,6 +22,7 @@ impl Default for JaneStreetHasher { } } +#[allow(clippy::precedence)] fn rotl32(x: u32, n: u32) -> u32 { (x) << n | (x) >> (32 - n) } diff --git a/ledger/src/proofs/block.rs b/ledger/src/proofs/block.rs index 0e9e095ecf..46e4e8ee84 100644 --- a/ledger/src/proofs/block.rs +++ b/ledger/src/proofs/block.rs @@ -1,5 +1,6 @@ use std::{rc::Rc, sync::Arc}; +use anyhow::Context; use ark_ff::fields::arithmetic::InvalidBigInt; use consensus::ConsensusState; use mina_curves::pasta::Fq; @@ -51,7 +52,7 @@ use super::{ }, step::{step, InductiveRule, OptFlag, PreviousProofStatement, StepParams, StepProof}, to_field_elements::ToFieldElements, - transaction::{transaction_snark::checked_hash, Check, ProofError, Prover}, + transaction::{transaction_snark::checked_hash, Check, Prover}, witness::Witness, wrap::WrapProof, }; @@ -195,7 +196,7 @@ impl Check for ProtocolState { fn ledger_proof_opt( proof: Option<&v2::LedgerProofProdStableV2>, next_state: &v2::MinaStateProtocolStateValueStableV2, -) -> Result<(Statement, Arc), InvalidBigInt> { +) -> anyhow::Result<(Statement, Arc)> { match proof { Some(proof) => { let statement: Statement = (&proof.0.statement).try_into()?; @@ -216,7 +217,7 @@ fn ledger_proof_opt( fn checked_hash_protocol_state( state: &ProtocolState, w: &mut Witness, -) -> Result<(Fp, Fp), InvalidBigInt> { +) -> anyhow::Result<(Fp, Fp)> { let ProtocolState { previous_state_hash, body, @@ -499,7 +500,7 @@ mod floating_point { res } - pub fn constant(value: &BigInteger256, precision: usize) -> Result { + pub fn constant(value: &BigInteger256, precision: usize) -> anyhow::Result { Ok(Self { value: (*value).try_into()?, precision, @@ -714,7 +715,7 @@ mod vrf { message: Message, prover_state: &v2::ConsensusStakeProofStableV2, w: &mut Witness, - ) -> Result<(Fp, Box), InvalidBigInt> { + ) -> anyhow::Result<(Fp, Box)> { let private_key = prover_state.producer_private_key.to_field::()?; let private_key = w.exists(field_to_bits::(private_key)); @@ -779,15 +780,12 @@ mod vrf { seed: Fp, prover_state: &v2::ConsensusStakeProofStableV2, w: &mut Witness, - ) -> Result< - ( - Boolean, - Fp, - Box<[bool; VRF_OUTPUT_NBITS]>, - Box, - ), - InvalidBigInt, - > { + ) -> anyhow::Result<( + Boolean, + Fp, + Box<[bool; VRF_OUTPUT_NBITS]>, + Box, + )> { let (winner_addr, winner_addr_bits) = { const LEDGER_DEPTH: usize = 35; assert_eq!(constraint_constants().ledger_depth, LEDGER_DEPTH as u64); @@ -1283,7 +1281,7 @@ pub mod consensus { supply_increase: CheckedSigned>, prover_state: &v2::ConsensusStakeProofStableV2, w: &mut Witness, - ) -> Result<(Boolean, CheckedConsensusState), InvalidBigInt> { + ) -> anyhow::Result<(Boolean, CheckedConsensusState)> { let previous_blockchain_state_ledger_hash = prev_state .body .blockchain_state @@ -1535,7 +1533,7 @@ fn genesis_state_hash_checked( state_hash: Fp, state: &ProtocolState, w: &mut Witness, -) -> Result { +) -> anyhow::Result { let is_genesis = is_genesis_state_var(&state.body.consensus_state, w); Ok(w.exists_no_check(match is_genesis { @@ -1608,7 +1606,7 @@ fn protocol_create_var( fn block_main<'a>( params: BlockMainParams<'a>, w: &mut Witness, -) -> Result<(Fp, [PreviousProofStatement<'a>; 2]), InvalidBigInt> { +) -> anyhow::Result<(Fp, [PreviousProofStatement<'a>; 2])> { let BlockMainParams { transition, prev_state, @@ -1835,7 +1833,7 @@ const BLOCK_N_PREVIOUS_PROOFS: usize = 2; pub(super) fn generate_block_proof( params: BlockParams, w: &mut Witness, -) -> Result { +) -> anyhow::Result { let BlockParams { input: v2::ProverExtendBlockchainInputStableV2 { @@ -1855,7 +1853,7 @@ pub(super) fn generate_block_proof( } = params; let (txn_snark_statement, txn_snark_proof) = - ledger_proof_opt(ledger_proof.as_deref(), next_state)?; + ledger_proof_opt(ledger_proof.as_deref(), next_state).context("ledger_proof_opt")?; let prev_state_proof = &chain.proof; let (new_state_hash, previous_proof_statements) = block_main( @@ -1870,10 +1868,12 @@ pub(super) fn generate_block_proof( pending_coinbase, }, w, - )?; + ) + .context("block_main")?; let prev_challenge_polynomial_commitments = - extract_recursion_challenges(&[prev_state_proof, &txn_snark_proof])?; + extract_recursion_challenges(&[prev_state_proof, &txn_snark_proof]) + .context("extract_recursion_challenges")?; let rule = InductiveRule { previous_proof_statements, @@ -1918,7 +1918,8 @@ pub(super) fn generate_block_proof( only_verify_constraints, }, w, - )?; + ) + .context("step")?; if let Some(expected) = expected_step_proof { let proof_json = serde_json::to_vec(&proof.proof).unwrap(); @@ -1943,4 +1944,5 @@ pub(super) fn generate_block_proof( }, &mut w, ) + .context("wrap") } diff --git a/ledger/src/proofs/merge.rs b/ledger/src/proofs/merge.rs index bc89c323e1..a1383ea7a5 100644 --- a/ledger/src/proofs/merge.rs +++ b/ledger/src/proofs/merge.rs @@ -29,7 +29,7 @@ use super::{ step::{ extract_recursion_challenges, InductiveRule, OptFlag, PreviousProofStatement, StepProof, }, - transaction::{PlonkVerificationKeyEvals, ProofError, Prover}, + transaction::{PlonkVerificationKeyEvals, Prover}, util::two_u64_to_field, witness::Witness, wrap::WrapProof, @@ -39,7 +39,7 @@ fn merge_main( statement: &Statement, proofs: &[v2::LedgerProofProdStableV2; 2], w: &mut Witness, -) -> Result<(Statement, Statement), InvalidBigInt> { +) -> anyhow::Result<(Statement, Statement)> { let (s1, s2) = w.exists({ let [p1, p2] = proofs; let (s1, s2) = (&p1.0.statement, &p2.0.statement); @@ -216,7 +216,7 @@ const MERGE_N_PREVIOUS_PROOFS: usize = 2; pub(super) fn generate_merge_proof( params: MergeParams, w: &mut Witness, -) -> Result { +) -> anyhow::Result { let MergeParams { statement, proofs, diff --git a/ledger/src/proofs/mod.rs b/ledger/src/proofs/mod.rs index bab126ec6d..879a2639b5 100644 --- a/ledger/src/proofs/mod.rs +++ b/ledger/src/proofs/mod.rs @@ -46,21 +46,17 @@ pub type ProverProof = kimchi::proof::ProverProof< pub fn generate_tx_proof( params: transaction::TransactionParams, -) -> Result { +) -> anyhow::Result { use {mina_hasher::Fp, witness::Witness}; let mut w: Witness = Witness::new::(); transaction::generate_tx_proof(params, &mut w) } -pub fn generate_merge_proof( - params: merge::MergeParams, -) -> Result { +pub fn generate_merge_proof(params: merge::MergeParams) -> anyhow::Result { use {mina_hasher::Fp, witness::Witness}; let mut w: Witness = Witness::new::(); merge::generate_merge_proof(params, &mut w) } -pub fn generate_block_proof( - params: block::BlockParams, -) -> Result { +pub fn generate_block_proof(params: block::BlockParams) -> anyhow::Result { use {mina_hasher::Fp, witness::Witness}; let mut w: Witness = Witness::new::(); block::generate_block_proof(params, &mut w) diff --git a/ledger/src/proofs/prover.rs b/ledger/src/proofs/prover.rs index 4121ac5616..d1f8189761 100644 --- a/ledger/src/proofs/prover.rs +++ b/ledger/src/proofs/prover.rs @@ -1,6 +1,5 @@ use std::{borrow::Cow, str::FromStr}; -use ark_ff::fields::arithmetic::InvalidBigInt; use kimchi::{ poly_commitment::PolyComm, proof::{PointEvaluations, ProofEvaluations, ProverCommitments, RecursionChallenge}, @@ -37,11 +36,12 @@ pub fn make_padded_proof_from_p2p( prev_evals: _, // unused proof, }: &PicklesProofProofsVerified2ReprStableV2, -) -> Result, InvalidBigInt> { - let of_coord = - |(a, b): &(BigInt, BigInt)| Ok(Pallas::of_coordinates(a.to_field()?, b.to_field()?)); +) -> anyhow::Result> { + let of_coord = |(a, b): &(BigInt, BigInt)| -> anyhow::Result<_> { + Ok(Pallas::of_coordinates(a.to_field()?, b.to_field()?)) + }; - let make_poly = |poly: &(BigInt, BigInt)| { + let make_poly = |poly: &(BigInt, BigInt)| -> anyhow::Result<_> { Ok(PolyComm { elems: vec![of_coord(poly)?], }) @@ -66,7 +66,7 @@ pub fn make_padded_proof_from_p2p( let lr: Vec<(Pallas, Pallas)> = lr .iter() .map(|(a, b)| Ok((of_coord(a)?, of_coord(b)?))) - .collect::>()?; + .collect::>()?; let delta: Pallas = of_coord(&bulletproof.delta)?; let z1: Fq = bulletproof.z_1.to_field()?; @@ -83,7 +83,7 @@ pub fn make_padded_proof_from_p2p( // }; // let to_fields = |x: &Vec| x.iter().map(BigInt::to_field).collect(); - let to_pt_eval = |(first, second): &(BigInt, BigInt)| { + let to_pt_eval = |(first, second): &(BigInt, BigInt)| -> anyhow::Result<_> { Ok(PointEvaluations { zeta: vec![first.to_field::()?], zeta_omega: vec![second.to_field::()?], @@ -130,7 +130,7 @@ pub fn make_padded_proof_from_p2p( old_bulletproof_challenges.0[1].0.clone(), ]); - let make_poly = |poly: &(BigInt, BigInt)| { + let make_poly = |poly: &(BigInt, BigInt)| -> anyhow::Result<_> { let point = of_coord(poly)?; Ok(PolyComm { elems: vec![point] }) }; diff --git a/ledger/src/proofs/public_input/prepared_statement.rs b/ledger/src/proofs/public_input/prepared_statement.rs index 19bdf23a7d..f84451e83b 100644 --- a/ledger/src/proofs/public_input/prepared_statement.rs +++ b/ledger/src/proofs/public_input/prepared_statement.rs @@ -1,4 +1,4 @@ -use ark_ff::{fields::arithmetic::InvalidBigInt, BigInteger256, Zero}; +use ark_ff::{BigInteger256, Zero}; use mina_curves::pasta::Fq; use mina_hasher::Fp; use mina_p2p_messages::v2::{ @@ -51,7 +51,7 @@ pub struct PreparedStatement { impl PreparedStatement { /// Implementation of `tock_unpadded_public_input_of_statement` /// https://github.com/MinaProtocol/mina/blob/32a91613c388a71f875581ad72276e762242f802/src/lib/pickles/common.ml#L202 - pub fn to_public_input(&self, npublic_input: usize) -> Result, InvalidBigInt> { + pub fn to_public_input(&self, npublic_input: usize) -> anyhow::Result> { let PreparedStatement { proof_state: ProofState { @@ -172,7 +172,7 @@ impl PreparedStatement { uses_lookup.to_field_elements(&mut fields); if uses_lookup { - fields.push(lookup_value.unwrap()); + fields.push(lookup_value.unwrap_or(Fq::zero())); } else { fields.push(Fq::zero()); } @@ -186,7 +186,7 @@ impl PreparedStatement { &self, hack_feature_flags: OptFlag, npublic_input: usize, - ) -> Result, InvalidBigInt> { + ) -> anyhow::Result> { let PreparedStatement { proof_state: ProofState { diff --git a/ledger/src/proofs/step.rs b/ledger/src/proofs/step.rs index acb3613160..39aeaf6053 100644 --- a/ledger/src/proofs/step.rs +++ b/ledger/src/proofs/step.rs @@ -17,6 +17,7 @@ use crate::{ }, verifier::{get_srs, get_srs_mut}, }; +use anyhow::Context; use ark_ff::{fields::arithmetic::InvalidBigInt, BigInteger256, One, Zero}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, Radix2EvaluationDomain, UVPolynomial, @@ -52,7 +53,7 @@ use super::{ create_proof, make_group, messages_for_next_wrap_proof_padding, scalar_challenge::to_field_checked, Check, CircuitPlonkVerificationKeyEvals, CreateProofParams, InnerCurve, MessagesForNextStepProof, PlonkVerificationKeyEvals, - ProofError, ProofWithPublic, Prover, ReducedMessagesForNextStepProof, StepStatement, + ProofWithPublic, Prover, ReducedMessagesForNextStepProof, StepStatement, }, unfinalized::{evals_from_p2p, AllEvals, EvalsWithPublicInput, Unfinalized}, util::{extract_bulletproof, two_u64_to_field}, @@ -336,7 +337,6 @@ pub mod step_verifier { MakeScalarsEnvParams, PERMUTS_MINUS_1_ADD_N1, }, }; - use ark_ff::fields::arithmetic::InvalidBigInt; use itertools::Itertools; use kimchi::circuits::wires::PERMUTS; use poly_commitment::{srs::SRS, PolyComm}; @@ -532,7 +532,7 @@ pub mod step_verifier { pub(super) fn finalize_other_proof( params: FinalizeOtherProofParams, w: &mut Witness, - ) -> Result<(Boolean, Vec), InvalidBigInt> { + ) -> anyhow::Result<(Boolean, Vec)> { let FinalizeOtherProofParams { max_proof_verified, feature_flags: _, @@ -1387,7 +1387,7 @@ pub mod step_verifier { fn check_bulletproof( params: CheckBulletProofParams, w: &mut Witness, - ) -> Result<(Boolean, Vec), InvalidBigInt> { + ) -> anyhow::Result<(Boolean, Vec)> { let CheckBulletProofParams { pcs_batch: _, mut sponge, @@ -1550,7 +1550,7 @@ pub mod step_verifier { fn incrementally_verify_proof( params: IncrementallyVerifyProofParams, w: &mut Witness, - ) -> Result<(Fp, (Boolean, Vec)), InvalidBigInt> { + ) -> anyhow::Result<(Fp, (Boolean, Vec))> { let IncrementallyVerifyProofParams { proofs_verified: _, srs, @@ -1707,10 +1707,7 @@ pub mod step_verifier { pub(super) unfinalized: &'a Unfinalized, } - pub(super) fn verify( - params: VerifyParams, - w: &mut Witness, - ) -> Result { + pub(super) fn verify(params: VerifyParams, w: &mut Witness) -> anyhow::Result { let VerifyParams { srs, feature_flags: _, @@ -1800,10 +1797,7 @@ struct VerifyOneParams<'a> { should_verify: CircuitVar, } -fn verify_one( - params: VerifyOneParams, - w: &mut Witness, -) -> Result<(Vec, Boolean), InvalidBigInt> { +fn verify_one(params: VerifyOneParams, w: &mut Witness) -> anyhow::Result<(Vec, Boolean)> { let VerifyOneParams { srs, proof, @@ -1931,7 +1925,7 @@ pub struct ExpandDeferredParams<'a> { pub zk_rows: u64, } -pub fn expand_deferred(params: ExpandDeferredParams) -> Result, InvalidBigInt> { +pub fn expand_deferred(params: ExpandDeferredParams) -> anyhow::Result> { let ExpandDeferredParams { evals, old_bulletproof_challenges, @@ -1948,8 +1942,15 @@ pub fn expand_deferred(params: ExpandDeferredParams) -> Result = - Radix2EvaluationDomain::new(1 << step_domain as u64).unwrap(); + + let Some(num_coeffs) = 1u64.checked_shl(step_domain as u32) else { + return Err(InvalidBigInt)?; + }; + + let Some(domain) = Radix2EvaluationDomain::::new(num_coeffs as usize) else { + return Err(InvalidBigInt)?; + }; + let zetaw = zeta * domain.group_gen; let plonk_minimal = PlonkMinimal:: { @@ -2116,7 +2117,7 @@ struct ExpandProofParams<'a> { zk_rows: u64, } -fn expand_proof(params: ExpandProofParams) -> Result { +fn expand_proof(params: ExpandProofParams) -> anyhow::Result { let ExpandProofParams { dlog_vk, dlog_plonk_index, @@ -2226,7 +2227,7 @@ fn expand_proof(params: ExpandProofParams) -> Result>()?, + .collect::>()?, old_bulletproof_challenges: old_bulletproof_challenges.clone(), } .hash(); @@ -2372,7 +2373,7 @@ fn expand_proof(params: ExpandProofParams) -> Result(x.to_field()?, y.to_field()?))) - .collect::, _>>()?; + .collect::, InvalidBigInt>>()?; while challenge_polynomial_commitments.len() < 2 { challenge_polynomial_commitments.insert(0, dummy_ipa_wrap_sg()); @@ -2645,17 +2646,18 @@ impl Check for PerProofWitness { pub fn extract_recursion_challenges( proofs: &[&v2::PicklesProofProofsVerified2ReprStableV2; N], -) -> Result>>, InvalidBigInt> { +) -> anyhow::Result>>> { use poly_commitment::PolyComm; - let comms: [(Fq, Fq); N] = crate::try_array_into_with(proofs, |proof| { - let (a, b) = &proof - .statement - .proof_state - .messages_for_next_wrap_proof - .challenge_polynomial_commitment; - Ok((a.to_field::()?, b.to_field::()?)) - })?; + let comms: [(Fq, Fq); N] = + crate::try_array_into_with(proofs, |proof| -> Result<_, InvalidBigInt> { + let (a, b) = &proof + .statement + .proof_state + .messages_for_next_wrap_proof + .challenge_polynomial_commitment; + Ok((a.to_field::()?, b.to_field::()?)) + })?; let challs = proofs .iter() @@ -2713,7 +2715,7 @@ pub struct StepProof { pub fn step( params: StepParams, w: &mut Witness, -) -> Result { +) -> anyhow::Result { let StepParams { app_state, rule, @@ -2752,7 +2754,8 @@ pub fn step( zk_rows: data.zk_rows, }) }) - .collect::, _>>()? + .collect::, _>>() + .context("expand_proof")? .try_into() .unwrap(); @@ -2847,7 +2850,8 @@ pub fn step( Ok(chals.try_into().unwrap()) // Never fail, we know it's 16 }, ) - .collect::, InvalidBigInt>>()?; + .collect::, anyhow::Error>>() + .context("verify_one")?; std::mem::drop(srs); @@ -2890,7 +2894,8 @@ pub fn step( only_verify_constraints, }, w, - )?; + ) + .context("create_proof")?; let proofs: [&v2::PicklesProofProofsVerified2ReprStableV2; N_PREVIOUS] = rule .previous_proof_statements @@ -2913,7 +2918,8 @@ pub fn step( }, }) }) - .collect::, InvalidBigInt>>()?; + .collect::>>() + .context("prev_evals")?; let challenge_polynomial_commitments = expanded_proofs .iter() @@ -2933,7 +2939,8 @@ pub fn step( messages_for_next_wrap_proof, )) }) - .collect::, InvalidBigInt>>()? // TODO: Refactor when `try_unzip` is stable + .collect::, InvalidBigInt>>() // TODO: Refactor when `try_unzip` is stable + .context("unzip proof state")? .into_iter() .unzip(); diff --git a/ledger/src/proofs/transaction.rs b/ledger/src/proofs/transaction.rs index 9f8a4ba471..5e804cea84 100644 --- a/ledger/src/proofs/transaction.rs +++ b/ledger/src/proofs/transaction.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, rc::Rc, str::FromStr, sync::Arc}; +use anyhow::Context; use ark_ec::{short_weierstrass_jacobian::GroupProjective, AffineCurve, ProjectiveCurve}; use ark_ff::{fields::arithmetic::InvalidBigInt, BigInteger256, Field, PrimeField}; use kimchi::{ @@ -1295,10 +1296,22 @@ impl InnerCurve { let proj: GroupProjective = ark_ff::UniformRand::rand(&mut rng); let proj: F::Projective = proj.into(); + let proj2 = proj; + LATEST_RANDOM.set(Box::new(move || { + let this = Self { inner: proj2 }; + format!("{:#?}", this.to_affine()) + })); + Self { inner: proj } } } +use std::cell::RefCell; + +thread_local! { + static LATEST_RANDOM: RefCell String>> = RefCell::new(Box::new(String::new)); +} + impl InnerCurve { // TODO: Remove this pub fn rand() -> Self { @@ -2786,14 +2799,11 @@ pub mod transaction_snark { tx: &TransactionUnion, sparse_ledger: &SparseLedger, w: &mut Witness, - ) -> Result< - ( - Fp, - CheckedSigned>, - CheckedSigned>, - ), - InvalidBigInt, - > { + ) -> anyhow::Result<( + Fp, + CheckedSigned>, + CheckedSigned>, + )> { let TransactionUnion { payload, signer, @@ -3591,7 +3601,7 @@ pub mod transaction_snark { statement_with_sok: &Statement, tx_witness: &v2::TransactionWitnessStableV2, w: &mut Witness, - ) -> Result<(), InvalidBigInt> { + ) -> anyhow::Result<()> { let tx: crate::scan_state::transaction_logic::Transaction = (&tx_witness.transaction).try_into()?; let tx = transaction_union_payload::TransactionUnion::of_transaction(&tx); @@ -3912,15 +3922,19 @@ fn get_rng() -> rand::rngs::OsRng { rand::rngs::OsRng } -#[derive(Debug, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum ProofError { - #[from] - ProvingError(kimchi::error::ProverError), + #[error("kimchi error: {0:?}")] + ProvingError(#[from] kimchi::error::ProverError), + #[error("kimchi error with context: {0:?}")] + ProvingErrorWithContext(#[from] debug::KimchiProofError), + #[error("constraint not satisfield: {0}")] ConstraintsNotSatisfied(String), - #[from] - InvalidBigint(InvalidBigInt), + #[error("invalid bigint")] + InvalidBigint(#[from] InvalidBigInt), /// We still return an error when `only_verify_constraints` is true and /// constraints are verified, to short-circuit easily + #[error("Constraints ok")] ConstraintsOk, } @@ -3946,7 +3960,7 @@ impl ProofWithPublic { pub(super) fn create_proof( params: CreateProofParams, w: &Witness, -) -> Result, ProofError> { +) -> anyhow::Result> { type EFrSponge = mina_poseidon::sponge::DefaultFrSponge; let CreateProofParams { @@ -3959,19 +3973,18 @@ pub(super) fn create_proof( let prover_index: &ProverIndex = &prover.index; // public input - let public_input = computed_witness[0][0..prover_index.cs.public].to_vec(); + let public_input = computed_witness[0][..prover_index.cs.public].to_vec(); if only_verify_constraints { - let public = &computed_witness[0][0..prover_index.cs.public]; prover_index - .verify(&computed_witness, public) + .verify(&computed_witness, &public_input) .map_err(|e| { ProofError::ConstraintsNotSatisfied(format!("incorrect witness: {:?}", e)) })?; // We still return an error when `only_verify_constraints` is true and // constraints are verified, to short-circuit easily - return Err(ProofError::ConstraintsOk); + return Err(ProofError::ConstraintsOk.into()); } // NOTE: Not random in `cfg(test)` @@ -3984,10 +3997,40 @@ pub(super) fn create_proof( computed_witness, &[], prover_index, - prev_challenges, + prev_challenges.clone(), None, &mut rng, - )?; + ) + .map_err(|e| { + use kimchi::groupmap::GroupMap; + + let prev_challenges_hash = debug::hash_prev_challenge::(&prev_challenges); + let witness_primary_hash = debug::hash_slice(&w.primary); + let witness_aux_hash = debug::hash_slice(w.aux()); + let group_map_hash = debug::hash_slice(&group_map.composition()); + + dbg!( + &prev_challenges_hash, + &witness_primary_hash, + &witness_aux_hash, + &group_map_hash + ); + + let context = debug::KimchiProofError { + inner_error: e.to_string(), + witness_primary: w.primary.iter().map(|f| (*f).into()).collect(), + witness_aux: w.aux().iter().map(|f| (*f).into()).collect(), + // prev_challenges, + witness_primary_hash, + witness_aux_hash, + prev_challenges_hash, + group_map_hash, + latest_random: LATEST_RANDOM.with_borrow(|fun| (fun)()), + }; + + ProofError::ProvingErrorWithContext(context) + }) + .context("create_recursive")?; eprintln!("proof_elapsed={:?}", now.elapsed()); @@ -3997,6 +4040,165 @@ pub(super) fn create_proof( }) } +pub mod debug { + use super::*; + + use mina_p2p_messages::bigint::BigInt; + use mina_p2p_messages::binprot; + use sha2::Digest; + + fn hash_field(state: &mut sha2::Sha256, f: &F) { + for limb in f.montgomery_form_ref() { + state.update(limb.to_le_bytes()); + } + } + + fn hash_field_slice(state: &mut sha2::Sha256, slice: &[F]) { + state.update(slice.len().to_le_bytes()); + for f in slice.iter().flat_map(|f| f.montgomery_form_ref()) { + state.update(f.to_le_bytes()); + } + } + + pub(super) fn hash_slice(slice: &[F]) -> String { + let mut hasher = sha2::Sha256::new(); + hash_field_slice(&mut hasher, slice); + hex::encode(hasher.finalize()) + } + + pub(super) fn hash_prev_challenge( + prevs: &[RecursionChallenge], + ) -> String { + use poly_commitment::commitment::CommitmentCurve; + use sha2::Digest; + let mut hasher = sha2::Sha256::new(); + for RecursionChallenge { chals, comm } in prevs { + hash_field_slice(&mut hasher, chals); + let poly_commitment::PolyComm { elems } = comm; + for elem in elems { + match elem.to_coordinates() { + None => { + hasher.update([0]); + } + Some((c1, c2)) => { + hasher.update([1]); + hash_field(&mut hasher, &c1); + hash_field(&mut hasher, &c2); + } + } + } + } + hex::encode(hasher.finalize()) + } + + #[derive(Clone)] + pub struct KimchiProofError { + pub inner_error: String, + pub witness_primary: Vec, + pub witness_aux: Vec, + // pub prev_challenges: Vec>, + // Store hashes in case there is a de/serialization bug + pub witness_primary_hash: String, + pub witness_aux_hash: String, + pub prev_challenges_hash: String, + pub group_map_hash: String, + pub latest_random: String, + } + + // Manual implementation because String does not implement binprot traits (because unbounded) + impl binprot::BinProtWrite for KimchiProofError { + fn binprot_write(&self, w: &mut W) -> std::io::Result<()> { + let Self { + inner_error, + witness_primary, + witness_aux, + witness_primary_hash, + witness_aux_hash, + prev_challenges_hash, + group_map_hash, + latest_random, + } = self; + let inner_error: &[u8] = inner_error.as_bytes(); + let witness_primary_hash: &[u8] = witness_primary_hash.as_bytes(); + let witness_aux_hash: &[u8] = witness_aux_hash.as_bytes(); + let prev_challenges_hash: &[u8] = prev_challenges_hash.as_bytes(); + let group_map_hash: &[u8] = group_map_hash.as_bytes(); + let latest_random: &[u8] = latest_random.as_bytes(); + binprot::BinProtWrite::binprot_write(&inner_error, w)?; + binprot::BinProtWrite::binprot_write(witness_primary, w)?; + binprot::BinProtWrite::binprot_write(witness_aux, w)?; + binprot::BinProtWrite::binprot_write(&witness_primary_hash, w)?; + binprot::BinProtWrite::binprot_write(&witness_aux_hash, w)?; + binprot::BinProtWrite::binprot_write(&prev_challenges_hash, w)?; + binprot::BinProtWrite::binprot_write(&group_map_hash, w)?; + binprot::BinProtWrite::binprot_write(&latest_random, w)?; + Ok(()) + } + } + // Manual implementation because String does not implement binprot traits (because unbounded) + impl binprot::BinProtRead for KimchiProofError { + fn binprot_read(r: &mut R) -> Result + where + Self: Sized, + { + let to_string = |bytes: Vec| -> String { String::from_utf8(bytes).unwrap() }; + let inner_error: Vec = binprot::BinProtRead::binprot_read(r)?; + let witness_primary: Vec = binprot::BinProtRead::binprot_read(r)?; + let witness_aux: Vec = binprot::BinProtRead::binprot_read(r)?; + let witness_primary_hash: Vec = binprot::BinProtRead::binprot_read(r)?; + let witness_aux_hash: Vec = binprot::BinProtRead::binprot_read(r)?; + let prev_challenges_hash: Vec = binprot::BinProtRead::binprot_read(r)?; + let group_map_hash: Vec = binprot::BinProtRead::binprot_read(r)?; + let latest_random: Vec = binprot::BinProtRead::binprot_read(r)?; + Ok(Self { + inner_error: to_string(inner_error), + witness_primary, + witness_aux, + witness_primary_hash: to_string(witness_primary_hash), + witness_aux_hash: to_string(witness_aux_hash), + prev_challenges_hash: to_string(prev_challenges_hash), + group_map_hash: to_string(group_map_hash), + latest_random: to_string(latest_random), + }) + } + } + + impl core::fmt::Display for KimchiProofError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("{:?}", self)) + } + } + + impl std::error::Error for KimchiProofError {} + + impl core::fmt::Debug for KimchiProofError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { + inner_error, + witness_primary, + witness_aux, + witness_primary_hash, + witness_aux_hash, + prev_challenges_hash, + group_map_hash, + latest_random, + } = self; + + // Print witness lengths, not the whole vectors + f.debug_struct("KimchiProofError") + .field("inner_error", inner_error) + .field("witness_primary", &witness_primary.len()) + .field("witness_aux", &witness_aux.len()) + .field("witness_primary_hash", &witness_primary_hash) + .field("witness_aux_hash", &witness_aux_hash) + .field("prev_challenges_hash", &prev_challenges_hash) + .field("group_map_hash", &group_map_hash) + .field("latest_random", &latest_random) + .finish() + } + } +} + #[derive(Clone)] pub struct Prover { /// Constants to each kind of proof @@ -4024,7 +4226,7 @@ pub struct TransactionParams<'a> { pub(super) fn generate_tx_proof( params: TransactionParams, w: &mut Witness, -) -> Result { +) -> anyhow::Result { let TransactionParams { statement, tx_witness, @@ -4752,6 +4954,7 @@ pub(super) mod tests { struct DumpBlockProof { input: Box, key: Vec, + error: Vec, } let rsa_private_key = { @@ -4767,13 +4970,18 @@ pub(super) mod tests { rsa::RsaPrivateKey::from_pkcs1_pem(&string).unwrap() }; - let DumpBlockProof { mut input, key } = { + let DumpBlockProof { + mut input, + key, + error, + } = { let Ok(data) = std::fs::read("/tmp/block_proof.binprot") else { eprintln!("Missing block proof"); return; }; DumpBlockProof::binprot_read(&mut data.as_slice()).unwrap() }; + eprintln!("error was: {}", String::from_utf8_lossy(&error)); let producer_private_key = { let producer_private_key = rsa_private_key.decrypt(Pkcs1v15Encrypt, &key).unwrap(); @@ -4786,6 +4994,7 @@ pub(super) mod tests { let mut file = std::fs::File::create("/tmp/block_proof_with_key.binprot").unwrap(); input.binprot_write(&mut file).unwrap(); file.sync_all().unwrap(); + eprintln!("saved to /tmp/block_proof_with_key.binprot"); } #[test] diff --git a/ledger/src/proofs/unfinalized.rs b/ledger/src/proofs/unfinalized.rs index c80ee529e4..5794a640a0 100644 --- a/ledger/src/proofs/unfinalized.rs +++ b/ledger/src/proofs/unfinalized.rs @@ -177,7 +177,7 @@ impl TryFrom<&v2::PicklesProofProofsVerified2ReprStableV2PrevEv /// Equivalent of `to_kimchi` in OCaml pub fn evals_from_p2p( e: &v2::PicklesWrapWireProofEvaluationsStableV1, -) -> Result>>, InvalidBigInt> { +) -> anyhow::Result>>> { let v2::PicklesWrapWireProofEvaluationsStableV1 { w, coefficients, @@ -193,12 +193,13 @@ pub fn evals_from_p2p( use mina_p2p_messages::bigint::BigInt; - let of = |(zeta, zeta_omega): &(BigInt, BigInt)| -> Result>, _> { - Ok(PointEvaluations { - zeta: vec![zeta.to_field()?], - zeta_omega: vec![zeta_omega.to_field()?], - }) - }; + let of = + |(zeta, zeta_omega): &(BigInt, BigInt)| -> Result>, InvalidBigInt> { + Ok(PointEvaluations { + zeta: vec![zeta.to_field()?], + zeta_omega: vec![zeta_omega.to_field()?], + }) + }; use std::array; Ok(ProofEvaluations { diff --git a/ledger/src/proofs/verification.rs b/ledger/src/proofs/verification.rs index 9551ea7fe1..ce7c5a4521 100644 --- a/ledger/src/proofs/verification.rs +++ b/ledger/src/proofs/verification.rs @@ -673,7 +673,7 @@ fn run_checks( fn compute_deferred_values( proof: &PicklesProofProofsVerified2ReprStableV2, -) -> Result, InvalidBigInt> { +) -> anyhow::Result> { let bulletproof_challenges: Vec = proof .statement .proof_state @@ -837,7 +837,7 @@ fn verify_impl( app_state: &AppState, proof: &PicklesProofProofsVerified2ReprStableV2, vk: &VK, -) -> Result +) -> anyhow::Result where AppState: ToFieldElements, { @@ -875,7 +875,7 @@ where fn batch_verify_impl( proofs: &[(&AppState, &PicklesProofProofsVerified2ReprStableV2, &VK)], -) -> Result +) -> anyhow::Result where AppState: ToFieldElements, { @@ -976,7 +976,7 @@ mod on_fail { #[allow(unreachable_code)] fn dump_to_file(data: &D, filename: &str) { - #[cfg(test)] + #[cfg(any(test, feature = "fuzzing"))] { let (_, _) = (data, filename); // avoid unused vars return; diff --git a/ledger/src/proofs/wrap.rs b/ledger/src/proofs/wrap.rs index d59c18efb3..fb3ca859f6 100644 --- a/ledger/src/proofs/wrap.rs +++ b/ledger/src/proofs/wrap.rs @@ -48,7 +48,7 @@ use super::{ step::{step_verifier::PlonkDomain, FeatureFlags}, to_field_elements::{ToFieldElements, ToFieldElementsDebug}, transaction::{ - plonk_curve_ops::scale_fast, Check, PlonkVerificationKeyEvals, ProofError, Prover, + plonk_curve_ops::scale_fast, Check, PlonkVerificationKeyEvals, Prover, ReducedMessagesForNextStepProof, StepProofState, StepStatement, }, unfinalized::{AllEvals, EvalsWithPublicInput}, @@ -540,7 +540,7 @@ fn exists_prev_statement( step_statement: &StepStatement, messages_for_next_step_proof_hash: [u64; 4], w: &mut Witness, -) -> Result<(), InvalidBigInt> { +) -> anyhow::Result<()> { for unfinalized in &step_statement.proof_state.unfinalized_proofs { w.exists_no_check(unfinalized); } @@ -593,7 +593,7 @@ pub struct WrapParams<'a> { pub fn wrap( params: WrapParams, w: &mut Witness, -) -> Result { +) -> anyhow::Result { use crate::proofs::public_input::scalar_challenge::ScalarChallenge; let WrapParams { @@ -2653,7 +2653,7 @@ fn pack_statement( statement: &StepStatementWithHash, messages_for_next_step_proof_hash: &[u64; 4], w: &mut Witness, -) -> Result>, InvalidBigInt> { +) -> anyhow::Result>> { let StepStatementWithHash { proof_state: StepProofState { @@ -2788,7 +2788,7 @@ struct WrapMainParams<'a> { step_prover_index: &'a ProverIndex, } -fn wrap_main(params: WrapMainParams, w: &mut Witness) -> Result<(), InvalidBigInt> { +fn wrap_main(params: WrapMainParams, w: &mut Witness) -> anyhow::Result<()> { let WrapMainParams { step_statement, next_statement, diff --git a/ledger/src/proofs/zkapp.rs b/ledger/src/proofs/zkapp.rs index 7cc88f7c01..9d7e88102d 100644 --- a/ledger/src/proofs/zkapp.rs +++ b/ledger/src/proofs/zkapp.rs @@ -68,7 +68,7 @@ use super::{ }, provers::devnet_circuit_directory, to_field_elements::ToFieldElements, - transaction::{dummy_constraints, Check, ProofError, Prover}, + transaction::{dummy_constraints, Check, Prover}, witness::Witness, wrap::WrapProof, }; @@ -1310,7 +1310,7 @@ fn of_zkapp_command_segment_exn( tx_wrap_prover: &Prover, fps_path: Option, fqs_path: Option, -) -> Result +) -> anyhow::Result where StepConstants: ProofConstants, WrapConstants: ProofConstants + ForWrapData, @@ -1659,7 +1659,7 @@ fn of_zkapp_command_segment( tx_wrap_prover: &Prover, opt_signed_path: Option<&str>, proved_path: Option<&str>, -) -> Result { +) -> anyhow::Result { let (step_prover, step_path, wrap_path) = match spec { SegmentBasic::OptSignedOptSigned => (step_opt_signed_opt_signed_prover, None, None), SegmentBasic::OptSigned => { @@ -1697,7 +1697,7 @@ fn of_zkapp_command_segment( ) } -pub fn generate_zkapp_proof(params: ZkappParams) -> Result { +pub fn generate_zkapp_proof(params: ZkappParams) -> anyhow::Result { let ZkappParams { statement, tx_witness, @@ -1792,7 +1792,7 @@ fn merge_zkapp_proofs( message: &SokMessage, merge_step_prover: &Prover, tx_wrap_prover: &Prover, -) -> Result { +) -> anyhow::Result { let merged_statement = prev .statement .clone() diff --git a/ledger/src/scan_state/scan_state.rs b/ledger/src/scan_state/scan_state.rs index fe61a0f11d..5ee75063f8 100644 --- a/ledger/src/scan_state/scan_state.rs +++ b/ledger/src/scan_state/scan_state.rs @@ -59,7 +59,9 @@ use super::{ pub use super::parallel_scan::base::Job as JobValueBase; pub use super::parallel_scan::merge::Job as JobValueMerge; -pub use super::parallel_scan::{JobValue, JobValueWithIndex, SpacePartition}; +pub use super::parallel_scan::{ + AvailableJob as ParallelScanAvailableJob, JobValue, JobValueWithIndex, SpacePartition, +}; // type LedgerProof = LedgerProofProdStableV2; // type LedgerProofWithSokMessage = TransactionSnarkScanStateLedgerProofWithSokMessageStableV2; diff --git a/ledger/src/scan_state/transaction_logic.rs b/ledger/src/scan_state/transaction_logic.rs index 0402a2752d..21207c8b28 100644 --- a/ledger/src/scan_state/transaction_logic.rs +++ b/ledger/src/scan_state/transaction_logic.rs @@ -7710,10 +7710,7 @@ fn validate_timing_with_min_balance_impl( } Timed { initial_minimum_balance, - cliff_time, - cliff_amount, - vesting_period, - vesting_increment, + .. } => { let account_balance = account.balance; @@ -7727,14 +7724,7 @@ fn validate_timing_with_min_balance_impl( (true, false, *initial_minimum_balance) } Some(proposed_new_balance) => { - let curr_min_balance = account_min_balance_at_slot( - *txn_global_slot, - *cliff_time, - *cliff_amount, - *vesting_period, - *vesting_increment, - *initial_minimum_balance, - ); + let curr_min_balance = account.min_balance_at_slot(*txn_global_slot); if proposed_new_balance < curr_min_balance { (false, true, curr_min_balance) @@ -7764,54 +7754,6 @@ fn validate_timing_with_min_balance_impl( } } -// TODO: This should be in `account.rs` -pub fn account_min_balance_at_slot( - global_slot: Slot, - cliff_time: Slot, - cliff_amount: Amount, - vesting_period: SlotSpan, - vesting_increment: Amount, - initial_minimum_balance: Balance, -) -> Balance { - if global_slot < cliff_time { - initial_minimum_balance - } else if vesting_period.is_zero() { - // If vesting period is zero then everything vests immediately at the cliff - Balance::zero() - } else { - match initial_minimum_balance.sub_amount(cliff_amount) { - None => Balance::zero(), - Some(min_balance_past_cliff) => { - // take advantage of fact that global slots are uint32's - - let num_periods = - (global_slot.as_u32() - cliff_time.as_u32()) / vesting_period.as_u32(); - let num_periods: u64 = num_periods.into(); - - let vesting_decrement = { - let vesting_increment = vesting_increment.as_u64(); - - if u64::MAX - .checked_div(num_periods) - .map(|res| matches!(res.cmp(&vesting_increment), std::cmp::Ordering::Less)) - .unwrap_or(false) - { - // The vesting decrement will overflow, use [max_int] instead. - Amount::from_u64(u64::MAX) - } else { - Amount::from_u64(num_periods.checked_mul(vesting_increment).unwrap()) - } - }; - - match min_balance_past_cliff.sub_amount(vesting_decrement) { - None => Balance::zero(), - Some(amount) => amount, - } - } - } - } -} - fn sub_amount(balance: Balance, amount: Amount) -> Result { balance .sub_amount(amount) diff --git a/ledger/src/staged_ledger/staged_ledger.rs b/ledger/src/staged_ledger/staged_ledger.rs index ec547f4c51..1076accc92 100644 --- a/ledger/src/staged_ledger/staged_ledger.rs +++ b/ledger/src/staged_ledger/staged_ledger.rs @@ -116,7 +116,6 @@ pub struct StagedLedger { } impl StagedLedger { - #[cfg(feature = "fuzzing")] pub fn ledger_ref(&self) -> &Mask { &self.ledger } diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 7ec7ab3a84..4241bf9df5 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-macros" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" authors = [ "Alexander Koptelov " ] diff --git a/macros/src/action_event.rs b/macros/src/action_event.rs index fd6d685532..22fca6e167 100644 --- a/macros/src/action_event.rs +++ b/macros/src/action_event.rs @@ -165,9 +165,8 @@ fn filter_fields(field_spec: &FieldsSpec, fields: &FieldsNamed) -> Result f .iter() .filter(|(name, _)| { - name.as_ref().map_or(true, |name| { - fields.named.iter().any(|n| Some(name) == n.ident.as_ref()) - }) + name.as_ref() + .is_none_or(|name| fields.named.iter().any(|n| Some(name) == n.ident.as_ref())) }) .map(|(_, expr)| Ok(expr.clone())) .collect(), diff --git a/mina-p2p-messages/src/v2/hashing.rs b/mina-p2p-messages/src/v2/hashing.rs index f138383ce6..239063f26a 100644 --- a/mina-p2p-messages/src/v2/hashing.rs +++ b/mina-p2p-messages/src/v2/hashing.rs @@ -115,7 +115,7 @@ impl Serialize for TransactionHash { if serializer.is_human_readable() { serializer.serialize_str(&self.to_string()) } else { - self.0.serialize(serializer) + serde_bytes::serialize(&*self.0, serializer) } } } @@ -129,9 +129,7 @@ impl<'de> serde::Deserialize<'de> for TransactionHash { let b58: String = Deserialize::deserialize(deserializer)?; Ok(b58.parse().map_err(|err| serde::de::Error::custom(err))?) } else { - let v = Vec::deserialize(deserializer)?; - v.try_into() - .map_err(|_| serde::de::Error::custom("transaction hash wrong size")) + serde_bytes::deserialize(deserializer) .map(Arc::new) .map(Self) } diff --git a/mina-p2p-messages/src/v2/manual.rs b/mina-p2p-messages/src/v2/manual.rs index d47ba006c7..1e95f1216d 100644 --- a/mina-p2p-messages/src/v2/manual.rs +++ b/mina-p2p-messages/src/v2/manual.rs @@ -726,7 +726,7 @@ pub type NonZeroCurvePoint = Base58CheckOfBinProt< >; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, BinProtRead, BinProtWrite)] -pub enum ArchiveTransitionFronntierDiff { +pub enum ArchiveTransitionFrontierDiff { BreadcrumbAdded { block: (MinaBlockBlockStableV2, (Option, StateHash)), accounts_accessed: List<(crate::number::UInt64, MinaBaseAccountBinableArgStableV2)>, @@ -740,9 +740,128 @@ pub enum ArchiveTransitionFronntierDiff { BoostrapOf(()), } +impl ArchiveTransitionFrontierDiff { + pub fn block(&self) -> Option { + match self { + // TODO(adonagy): maybe we should use Arc here instead of cloning + ArchiveTransitionFrontierDiff::BreadcrumbAdded { block, .. } => Some(block.0.clone()), + _ => None, + } + } + + pub fn accounts_accessed( + &self, + ) -> List<(crate::number::UInt64, MinaBaseAccountBinableArgStableV2)> { + match self { + ArchiveTransitionFrontierDiff::BreadcrumbAdded { + accounts_accessed, .. + } => accounts_accessed.clone(), + _ => List::new(), + } + } + + pub fn accounts_created(&self) -> List<(MinaBaseAccountIdStableV2, CurrencyFeeStableV1)> { + match self { + ArchiveTransitionFrontierDiff::BreadcrumbAdded { + accounts_created, .. + } => accounts_created.clone(), + _ => List::new(), + } + } + + pub fn tokens_used( + &self, + ) -> List<(MinaBaseTokenIdStableV2, Option)> { + match self { + ArchiveTransitionFrontierDiff::BreadcrumbAdded { tokens_used, .. } => { + tokens_used.clone() + } + _ => List::new(), + } + } +} + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, BinProtRead, BinProtWrite)] pub enum ArchiveRpc { - SendDiff(ArchiveTransitionFronntierDiff), + SendDiff(ArchiveTransitionFrontierDiff), +} + +#[derive(Clone, Debug, PartialEq, BinProtRead, BinProtWrite)] +pub struct PrecomputedBlockProof(pub MinaBaseProofStableV2); + +impl Serialize for PrecomputedBlockProof { + fn serialize(&self, serializer: S) -> Result + where + S: serde::ser::Serializer, + { + use base64::{engine::general_purpose::URL_SAFE, Engine as _}; + use binprot::BinProtWrite; + let mut buf = Vec::new(); + self.0 + .binprot_write(&mut buf) + .map_err(serde::ser::Error::custom)?; + let base64_data = URL_SAFE.encode(&buf); + serializer.serialize_str(&base64_data) + } +} + +impl<'de> Deserialize<'de> for PrecomputedBlockProof { + fn deserialize(deserializer: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + use base64::{engine::general_purpose::URL_SAFE, Engine as _}; + let base64_data = String::deserialize(deserializer)?; + let binprot_data = URL_SAFE + .decode(&base64_data) + .map_err(serde::de::Error::custom)?; + let mut read = binprot_data.as_slice(); + let proof: MinaBaseProofStableV2 = + binprot::BinProtRead::binprot_read(&mut read).map_err(serde::de::Error::custom)?; + Ok(PrecomputedBlockProof(proof)) + } +} + +impl From for PrecomputedBlockProof { + fn from(value: MinaBaseProofStableV2) -> Self { + Self(value) + } +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, BinProtRead, BinProtWrite)] +pub struct PrecomputedBlock { + pub scheduled_time: BlockTimeTimeStableV1, + pub protocol_state: MinaStateProtocolStateValueStableV2, + pub protocol_state_proof: PrecomputedBlockProof, + pub staged_ledger_diff: StagedLedgerDiffDiffStableV2, + // FIXME: for some reason in OCaml the base58check conversion for the JSON value + // uses version byte = 0x05 (ledger hash) instead of 0x10 (StateHash) and 0x11 (StateBodyHash) + // Note: keeping the proper types here, we should raise an issue in the ocaml repo + pub delta_transition_chain_proof: ( + StateHash, // LedgerHash, // StateHash + List, // List, // StateBodyHash + ), + pub protocol_version: ProtocolVersionStableV2, + #[serde(default)] + pub proposed_protocol_version: Option, + pub accounts_accessed: List<(crate::number::UInt64, MinaBaseAccountBinableArgStableV2)>, + pub accounts_created: List<(MinaBaseAccountIdStableV2, CurrencyFeeStableV1)>, + pub tokens_used: List<(MinaBaseTokenIdStableV2, Option)>, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, BinProtRead, BinProtWrite)] +pub struct PrecomputedBlockData { + pub version: u32, + pub data: PrecomputedBlock, +} + +impl PrecomputedBlock { + pub fn with_version(&self, version: u32) -> PrecomputedBlockData { + PrecomputedBlockData { + version, + data: self.clone(), + } + } } #[cfg(test)] @@ -1771,7 +1890,7 @@ mod test { fn test_archive_breadcrumb_deserialization() { let breadcrumb_bytes = include_bytes!("../../../tests/files/archive-breadcrumb/3NK56ZbCS31qb8SvCtCCYza4beRDtKgXA2JL6s3evKouG2KkKtiy.bin"); let result = - v2::ArchiveTransitionFronntierDiff::binprot_read(&mut breadcrumb_bytes.as_slice()); + v2::ArchiveTransitionFrontierDiff::binprot_read(&mut breadcrumb_bytes.as_slice()); assert!(result.is_ok()); } diff --git a/node/Cargo.toml b/node/Cargo.toml index 07e3925bbb..5f65aec219 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/node/account/Cargo.toml b/node/account/Cargo.toml index f135d8a0d3..04462503ae 100644 --- a/node/account/Cargo.toml +++ b/node/account/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-node-account" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/node/account/src/public_key.rs b/node/account/src/public_key.rs index e8a8d1784c..781a1bde8f 100644 --- a/node/account/src/public_key.rs +++ b/node/account/src/public_key.rs @@ -78,3 +78,10 @@ impl fmt::Display for AccountPublicKey { write!(f, "{p2p_key}") } } + +// for a simple hashmap or btree map use the hash of the string representation +impl std::hash::Hash for AccountPublicKey { + fn hash(&self, state: &mut H) { + self.to_string().hash(state); + } +} diff --git a/node/common/Cargo.toml b/node/common/Cargo.toml index 54a75e434a..cb21007560 100644 --- a/node/common/Cargo.toml +++ b/node/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-node-common" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" @@ -21,6 +21,8 @@ sha3 = "0.10.8" ark-ff = { workspace = true } binprot = { git = "https://github.com/openmina/binprot-rs", rev = "400b52c" } binprot_derive = { git = "https://github.com/openmina/binprot-rs", rev = "400b52c" } +bitflags = "2.8.0" +anyhow = "1" node = { path = "../../node", features = ["replay"] } openmina-core = { path = "../../core" } @@ -41,6 +43,11 @@ tracing-appender = "0.2.3" libp2p-identity = { version = "=0.2.7", features = ["ed25519", "rand", "serde"] } mio = { version = "1.0.2", features = ["os-poll", "net"] } reqwest = { version = "0.12.8", features = ["blocking", "json"] } +aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } +aws-sdk-s3 = "1.73.0" +google-cloud-storage = "0.24.0" +google-cloud-auth = "0.17.2" + [features] p2p-webrtc = ["node/p2p-webrtc"] diff --git a/node/common/src/service/archive.rs b/node/common/src/service/archive.rs deleted file mode 100644 index 8b10393304..0000000000 --- a/node/common/src/service/archive.rs +++ /dev/null @@ -1,512 +0,0 @@ -use mina_p2p_messages::v2::{self, ArchiveTransitionFronntierDiff}; -use node::core::{channels::mpsc, thread}; -use std::net::SocketAddr; - -use super::NodeService; - -pub struct ArchiveService { - archive_sender: mpsc::UnboundedSender, -} - -const ARCHIVE_SEND_RETRIES: u8 = 5; -const MAX_EVENT_COUNT: u64 = 100; -const RETRY_INTERVAL_MS: u64 = 1000; - -impl ArchiveService { - fn new(archive_sender: mpsc::UnboundedSender) -> Self { - Self { archive_sender } - } - - #[cfg(not(target_arch = "wasm32"))] - fn run( - mut archive_receiver: mpsc::UnboundedReceiver, - address: SocketAddr, - ) { - while let Some(breadcrumb) = archive_receiver.blocking_recv() { - let mut retries = ARCHIVE_SEND_RETRIES; - while retries > 0 { - match rpc::send_diff(address, v2::ArchiveRpc::SendDiff(breadcrumb.clone())) { - Ok(result) => { - if result.should_retry() { - node::core::warn!( - summary = "Archive suddenly closed connection, retrying..." - ); - retries -= 1; - std::thread::sleep(std::time::Duration::from_millis(RETRY_INTERVAL_MS)); - } else { - node::core::warn!(summary = "Successfully sent diff to archive"); - break; - } - } - Err(e) => { - node::core::warn!( - summary = "Failed sending diff to archive", - error = e.to_string(), - retries = retries - ); - retries -= 1; - std::thread::sleep(std::time::Duration::from_millis(RETRY_INTERVAL_MS)); - } - } - } - } - } - - // Note: Placeholder for the wasm implementation, if we decide to include an archive mode in the future - #[cfg(target_arch = "wasm32")] - fn run( - mut archive_receiver: mpsc::UnboundedReceiver, - address: SocketAddr, - ) { - unimplemented!() - } - - pub fn start(address: SocketAddr) -> Self { - let (archive_sender, archive_receiver) = - mpsc::unbounded_channel::(); - - thread::Builder::new() - .name("openmina_archive".to_owned()) - .spawn(move || { - Self::run(archive_receiver, address); - }) - .unwrap(); - - Self::new(archive_sender) - } -} - -impl node::transition_frontier::archive::archive_service::ArchiveService for NodeService { - fn send_to_archive(&mut self, data: ArchiveTransitionFronntierDiff) { - if let Some(archive) = self.archive.as_mut() { - if let Err(e) = archive.archive_sender.send(data.clone()) { - node::core::warn!( - summary = "Failed sending diff to archive service", - error = e.to_string() - ); - } - } - } -} - -// We need to replicate the ocaml node's RPC like interface -#[cfg(not(target_arch = "wasm32"))] -mod rpc { - use binprot::BinProtWrite; - use mina_p2p_messages::rpc_kernel::{Message, NeedsLength, Query, RpcMethod}; - use mina_p2p_messages::v2::{self, ArchiveRpc}; - use mio::event::Event; - use mio::net::TcpStream; - use mio::{Events, Interest, Poll, Registry, Token}; - use std::io::{self, Read, Write}; - use std::net::SocketAddr; - - const MAX_RECURSION_DEPTH: u8 = 25; - - // messages - const HEADER_MSG: [u8; 7] = [2, 253, 82, 80, 67, 0, 1]; - const OK_MSG: [u8; 5] = [2, 1, 0, 1, 0]; - // Note: this is the close message that the ocaml node receives - const CLOSE_MSG: [u8; 7] = [2, 254, 167, 7, 0, 1, 0]; - const HEARTBEAT_MSG: [u8; 1] = [0]; - - fn prepend_length(message: &[u8]) -> Vec { - let length = message.len() as u64; - let mut length_bytes = length.to_le_bytes().to_vec(); - length_bytes.append(&mut message.to_vec()); - length_bytes - } - pub enum HandleResult { - MessageSent, - ConnectionClosed, - ConnectionAlive, - MessageWouldBlock, - } - - impl HandleResult { - pub fn should_retry(&self) -> bool { - matches!(self, Self::ConnectionClosed) - } - } - - pub fn send_diff(address: SocketAddr, data: v2::ArchiveRpc) -> io::Result { - let rpc = encode_to_rpc(data)?; - process_rpc(address, &rpc) - } - - fn encode_to_rpc(data: ArchiveRpc) -> io::Result> { - type Method = mina_p2p_messages::rpc::SendArchiveDiffUnversioned; - let mut v = vec![0; 8]; - - if let Err(e) = Message::Query(Query { - tag: Method::NAME.into(), - version: Method::VERSION, - id: 1, - data: NeedsLength(data), - }) - .binprot_write(&mut v) - { - node::core::warn!( - summary = "Failed binprot serializastion", - error = e.to_string() - ); - return Err(e); - } - - let payload_length = (v.len() - 8) as u64; - v[..8].copy_from_slice(&payload_length.to_le_bytes()); - // Bake in the heartbeat message - v.splice(0..0, prepend_length(&HEARTBEAT_MSG).iter().cloned()); - // also add the heartbeat message to the end of the message - v.extend_from_slice(&prepend_length(&HEARTBEAT_MSG)); - - Ok(v) - } - - fn process_rpc(address: SocketAddr, data: &[u8]) -> io::Result { - let mut poll = Poll::new()?; - let mut events = Events::with_capacity(128); - let mut event_count = 0; - - // We still need a token even for one connection - const TOKEN: Token = Token(0); - - let mut stream = TcpStream::connect(address)?; - - let mut handshake_received = false; - let mut handshake_sent = false; - let mut message_sent = false; - let mut first_heartbeat_received = false; - poll.registry() - .register(&mut stream, TOKEN, Interest::WRITABLE)?; - - loop { - if let Err(e) = poll.poll(&mut events, None) { - if interrupted(&e) { - continue; - } - return Err(e); - } - - for event in events.iter() { - event_count += 1; - // Failsafe to prevent infinite loops - if event_count > super::MAX_EVENT_COUNT { - return Err(io::Error::new( - io::ErrorKind::Other, - format!("FAILSAFE triggered, event count: {}", event_count), - )); - } - match event.token() { - TOKEN => { - match handle_connection_event( - poll.registry(), - &mut stream, - event, - data, - &mut handshake_received, - &mut handshake_sent, - &mut message_sent, - &mut first_heartbeat_received, - )? { - HandleResult::MessageSent => return Ok(HandleResult::MessageSent), - HandleResult::ConnectionClosed => { - return Ok(HandleResult::ConnectionClosed) - } - HandleResult::MessageWouldBlock => { - // do nothing, wait for the next event - continue; - } - HandleResult::ConnectionAlive => { - // keep swapping between readable and writable until we successfully send the message, then keep in read mode. - if message_sent { - poll.registry().reregister( - &mut stream, - TOKEN, - Interest::READABLE, - )?; - continue; - } - - if event.is_writable() { - poll.registry().reregister( - &mut stream, - TOKEN, - Interest::READABLE, - )?; - } else { - poll.registry().reregister( - &mut stream, - TOKEN, - Interest::WRITABLE, - )?; - } - continue; - } - } - } - _ => unreachable!(), - } - } - } - } - - fn _send_heartbeat(connection: &mut TcpStream) -> io::Result { - match connection.write_all(&HEARTBEAT_MSG) { - Ok(_) => { - connection.flush()?; - Ok(HandleResult::ConnectionAlive) - } - Err(ref err) if would_block(err) => Ok(HandleResult::MessageWouldBlock), - Err(ref err) if interrupted(err) => Ok(HandleResult::MessageWouldBlock), - Err(err) => Err(err), - } - } - - struct RecursionGuard { - count: u8, - max_depth: u8, - } - - impl RecursionGuard { - fn new(max_depth: u8) -> Self { - Self { - count: 0, - max_depth, - } - } - - fn increment(&mut self) -> io::Result<()> { - self.count += 1; - if self.count > self.max_depth { - Err(io::ErrorKind::WriteZero.into()) - } else { - Ok(()) - } - } - } - - fn send_data( - connection: &mut TcpStream, - data: &[u8], - recursion_guard: &mut RecursionGuard, - // closure that can be called when the data is sent - on_success: F, - ) -> io::Result - where - F: FnOnce() -> io::Result, - { - match connection.write(data) { - Ok(n) if n < data.len() => { - recursion_guard.increment()?; - let remaining_data = data[n..].to_vec(); - send_data(connection, &remaining_data, recursion_guard, on_success) - } - Ok(_) => { - connection.flush()?; - on_success() - } - Err(ref err) if would_block(err) => Ok(HandleResult::MessageWouldBlock), - Err(ref err) if interrupted(err) => { - recursion_guard - .increment() - .map_err(|_| io::ErrorKind::Interrupted)?; - send_data(connection, data, recursion_guard, on_success) - } - Err(err) => Err(err), - } - } - - #[allow(clippy::too_many_arguments)] - fn handle_connection_event( - registry: &Registry, - connection: &mut TcpStream, - event: &Event, - data: &[u8], - handshake_received: &mut bool, - handshake_sent: &mut bool, - message_sent: &mut bool, - first_heartbeat_received: &mut bool, - ) -> io::Result { - if event.is_writable() { - if !*handshake_sent { - let msg = prepend_length(&HEADER_MSG); - send_data( - connection, - &msg, - &mut RecursionGuard::new(MAX_RECURSION_DEPTH), - || { - *handshake_sent = true; - Ok(HandleResult::ConnectionAlive) - }, - )?; - return Ok(HandleResult::ConnectionAlive); - } - - if *handshake_received && *handshake_sent && !*message_sent && *first_heartbeat_received - { - send_data( - connection, - data, - &mut RecursionGuard::new(MAX_RECURSION_DEPTH), - || { - *message_sent = true; - Ok(HandleResult::ConnectionAlive) - }, - )?; - } - } - - if event.is_readable() { - let mut connection_closed = false; - let mut received_data = vec![0; 4096]; - let mut bytes_read = 0; - - loop { - match connection.read(&mut received_data[bytes_read..]) { - Ok(0) => { - connection_closed = true; - break; - } - Ok(n) => { - bytes_read += n; - if bytes_read == received_data.len() { - received_data.resize(received_data.len() + 1024, 0); - } - } - // Would block "errors" are the OS's way of saying that the - // connection is not actually ready to perform this I/O operation. - Err(ref err) if would_block(err) => break, - Err(ref err) if interrupted(err) => continue, - // Other errors we'll consider fatal. - Err(err) => return Err(err), - } - } - - if connection_closed { - registry.deregister(connection)?; - connection.shutdown(std::net::Shutdown::Both)?; - return Ok(HandleResult::ConnectionClosed); - } - - if bytes_read < 8 { - // malformed message, at least the length should be present - return Ok(HandleResult::ConnectionAlive); - } - - let raw_message = RawMessage::from_bytes(&received_data[..bytes_read]); - let messages = raw_message.parse_raw()?; - - for message in messages { - match message { - ParsedMessage::Header => { - *handshake_received = true; - } - ParsedMessage::Ok | ParsedMessage::Close => { - connection.flush()?; - registry.deregister(connection)?; - connection.shutdown(std::net::Shutdown::Both)?; - return Ok(HandleResult::MessageSent); - } - ParsedMessage::Heartbeat => { - *first_heartbeat_received = true; - } - ParsedMessage::Unknown(msg) => { - registry.deregister(connection)?; - connection.shutdown(std::net::Shutdown::Both)?; - node::core::warn!( - summary = "Received unknown message", - msg = format!("{:?}", msg) - ); - return Ok(HandleResult::ConnectionClosed); - } - } - } - } - - Ok(HandleResult::ConnectionAlive) - } - - fn would_block(err: &io::Error) -> bool { - err.kind() == io::ErrorKind::WouldBlock - } - - fn interrupted(err: &io::Error) -> bool { - err.kind() == io::ErrorKind::Interrupted - } - - enum ParsedMessage { - Heartbeat, - Ok, - Close, - Header, - Unknown(Vec), - } - - struct RawMessage { - length: usize, - data: Vec, - } - - impl RawMessage { - fn from_bytes(bytes: &[u8]) -> Self { - Self { - length: bytes.len(), - data: bytes.to_vec(), - } - } - - fn parse_raw(&self) -> io::Result> { - let mut parsed_bytes: usize = 0; - - // more than one message can be sent in a single packet - let mut messages = Vec::new(); - - while parsed_bytes < self.length { - // first 8 bytes are the length in little endian - let length = u64::from_le_bytes( - self.data[parsed_bytes..parsed_bytes + 8] - .try_into() - .unwrap(), - ) as usize; - parsed_bytes += 8; - - if parsed_bytes + length > self.length { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "Message length exceeds raw message length", - )); - } - - if length == HEADER_MSG.len() - && self.data[parsed_bytes..parsed_bytes + length] == HEADER_MSG - { - messages.push(ParsedMessage::Header); - } else if length == OK_MSG.len() - && self.data[parsed_bytes..parsed_bytes + length] == OK_MSG - { - messages.push(ParsedMessage::Ok); - } else if length == HEARTBEAT_MSG.len() - && self.data[parsed_bytes..parsed_bytes + length] == HEARTBEAT_MSG - { - messages.push(ParsedMessage::Heartbeat); - } else if length == CLOSE_MSG.len() - && self.data[parsed_bytes..parsed_bytes + length] == CLOSE_MSG - { - messages.push(ParsedMessage::Close); - } else { - messages.push(ParsedMessage::Unknown( - self.data[parsed_bytes..parsed_bytes + length].to_vec(), - )); - } - - parsed_bytes += length; - } - Ok(messages) - } - } -} - -// Note: Placeholder for the wasm implementation, if we decide to include an archive mode in the future -#[cfg(target_arch = "wasm32")] -mod rpc {} diff --git a/node/common/src/service/archive/aws.rs b/node/common/src/service/archive/aws.rs new file mode 100644 index 0000000000..2a768de670 --- /dev/null +++ b/node/common/src/service/archive/aws.rs @@ -0,0 +1,39 @@ +use std::env; + +use super::Error; + +pub(crate) struct ArchiveAWSClient { + client: aws_sdk_s3::Client, + bucket_name: String, + bucket_path: String, +} + +impl ArchiveAWSClient { + pub async fn new() -> Result { + let config = aws_config::load_from_env().await; + let bucket_name = env::var("OPENMINA_AWS_BUCKET_NAME").map_err(|_| { + Error::EnvironmentVariableNotSet("OPENMINA_AWS_BUCKET_NAME".to_string()) + })?; + let bucket_path = env::var("OPENMINA_AWS_BUCKET_PATH").map_err(|_| { + Error::EnvironmentVariableNotSet("OPENMINA_AWS_BUCKET_PATH".to_string()) + })?; + Ok(Self { + client: aws_sdk_s3::Client::new(&config), + bucket_name, + bucket_path, + }) + } + + pub async fn upload_block(&self, key: &str, data: &[u8]) -> Result<(), Error> { + self.client + .put_object() + .bucket(self.bucket_name.clone()) + .key(format!("{}/{}", self.bucket_path, key)) + .body(data.to_vec().into()) + .send() + .await + .map_err(|e| Error::UploadError(e.to_string()))?; + + Ok(()) + } +} diff --git a/node/common/src/service/archive/config.rs b/node/common/src/service/archive/config.rs new file mode 100644 index 0000000000..490d55e249 --- /dev/null +++ b/node/common/src/service/archive/config.rs @@ -0,0 +1,110 @@ +use bitflags::bitflags; +use std::env; + +bitflags! { + #[derive(Debug, Clone, Default)] + pub struct ArchiveStorageOptions: u8 { + const ARCHIVER_PROCESS = 0b0001; + const LOCAL_PRECOMPUTED_STORAGE = 0b0010; + const GCP_PRECOMPUTED_STORAGE = 0b0100; + const AWS_PRECOMPUTED_STORAGE = 0b1000; + } +} + +impl ArchiveStorageOptions { + pub fn is_enabled(&self) -> bool { + !self.is_empty() + } + + pub fn requires_precomputed_block(&self) -> bool { + self.uses_aws_precomputed_storage() + || self.uses_gcp_precomputed_storage() + || self.uses_local_precomputed_storage() + } + + pub fn validate_env_vars(&self) -> Result<(), String> { + if self.contains(ArchiveStorageOptions::ARCHIVER_PROCESS) + && env::var("OPENMINA_ARCHIVE_ADDRESS").is_err() + { + return Err( + "OPENMINA_ARCHIVE_ADDRESS is required when ARCHIVER_PROCESS is enabled".to_string(), + ); + } + + if self.uses_aws_precomputed_storage() { + if env::var("AWS_ACCESS_KEY_ID").is_err() { + return Err( + "AWS_ACCESS_KEY_ID is required when AWS_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + if env::var("AWS_SECRET_ACCESS_KEY").is_err() { + return Err( + "AWS_SECRET_ACCESS_KEY is required when AWS_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + if env::var("AWS_SESSION_TOKEN").is_err() { + return Err( + "AWS_SESSION_TOKEN is required when AWS_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + + if env::var("AWS_DEFAULT_REGION").is_err() { + return Err( + "AWS_DEFAULT_REGION is required when AWS_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + + if env::var("OPENMINA_AWS_BUCKET_NAME").is_err() { + return Err( + "OPENMINA_AWS_BUCKET_NAME is required when AWS_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + + // if env::var("OPENMINA_AWS_BUCKET_PATH").is_err() { + // return Err( + // "OPENMINA_AWS_BUCKET_PATH is required when AWS_PRECOMPUTED_STORAGE is enabled" + // .to_string(), + // ); + // } + } + + if self.uses_gcp_precomputed_storage() { + if env::var("GCP_CREDENTIALS_JSON").is_err() { + return Err( + "GCP_CREDENTIALS_JSON is required when GCP_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + + if env::var("GCP_BUCKET_NAME").is_err() { + return Err( + "GCP_BUCKET_NAME is required when GCP_PRECOMPUTED_STORAGE is enabled" + .to_string(), + ); + } + } + + Ok(()) + } + + pub fn uses_local_precomputed_storage(&self) -> bool { + self.contains(ArchiveStorageOptions::LOCAL_PRECOMPUTED_STORAGE) + } + + pub fn uses_archiver_process(&self) -> bool { + self.contains(ArchiveStorageOptions::ARCHIVER_PROCESS) + } + + pub fn uses_gcp_precomputed_storage(&self) -> bool { + self.contains(ArchiveStorageOptions::GCP_PRECOMPUTED_STORAGE) + } + + pub fn uses_aws_precomputed_storage(&self) -> bool { + self.contains(ArchiveStorageOptions::AWS_PRECOMPUTED_STORAGE) + } +} diff --git a/node/common/src/service/archive/gcp.rs b/node/common/src/service/archive/gcp.rs new file mode 100644 index 0000000000..b7a816d8df --- /dev/null +++ b/node/common/src/service/archive/gcp.rs @@ -0,0 +1,54 @@ +use gcs::http::objects::upload as gcs_upload; +use google_cloud_auth::credentials::CredentialsFile as GcpCredentialsFile; +use google_cloud_storage as gcs; + +use super::Error; +use std::env; + +pub(crate) struct ArchiveGCPClient { + client: gcs::client::Client, + bucket_name: String, +} + +impl ArchiveGCPClient { + pub async fn new() -> Result { + let get_env_var = |var: &str| -> Result { + env::var(var).map_err(|_| Error::EnvironmentVariableNotSet(var.to_string())) + }; + + let cred_file = get_env_var("GCP_CREDENTIALS_JSON")?; + let bucket_name = get_env_var("GCP_BUCKET_NAME")?; + + let credentials = GcpCredentialsFile::new_from_file(cred_file) + .await + .map_err(|e| Error::UploadError(format!("GCP credentials error: {}", e)))?; + + let config = gcs::client::ClientConfig::default() + .with_credentials(credentials) + .await + .map_err(|e| Error::UploadError(format!("GCP config error: {}", e)))?; + + Ok(ArchiveGCPClient { + client: gcs::client::Client::new(config), + bucket_name, + }) + } + + pub async fn upload_block(&self, key: &str, data: &[u8]) -> Result<(), Error> { + let upload_type = gcs_upload::UploadType::Simple(gcs_upload::Media::new(key.to_string())); + + self.client + .upload_object( + &gcs_upload::UploadObjectRequest { + bucket: self.bucket_name.clone(), + ..Default::default() + }, + data.to_vec(), + &upload_type, + ) + .await + .map_err(|e| Error::UploadError(format!("GCP upload failed: {}", e)))?; + + Ok(()) + } +} diff --git a/node/common/src/service/archive/mod.rs b/node/common/src/service/archive/mod.rs new file mode 100644 index 0000000000..c05d921b11 --- /dev/null +++ b/node/common/src/service/archive/mod.rs @@ -0,0 +1,334 @@ +use mina_p2p_messages::v2::{self}; +use node::core::{channels::mpsc, thread}; +use node::ledger::write::BlockApplyResult; +use std::env; +use std::io::Write; + +use mina_p2p_messages::v2::PrecomputedBlock; +use openmina_core::NetworkConfig; +use std::net::SocketAddr; + +use super::NodeService; + +#[cfg(not(target_arch = "wasm32"))] +pub mod aws; +#[cfg(not(target_arch = "wasm32"))] +pub mod gcp; +#[cfg(not(target_arch = "wasm32"))] +pub mod rpc; + +pub mod config; + +use config::ArchiveStorageOptions; + +const ARCHIVE_SEND_RETRIES: u8 = 5; +const MAX_EVENT_COUNT: u64 = 100; +const RETRY_INTERVAL_MS: u64 = 1000; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Environment variable {0} is not set")] + EnvironmentVariableNotSet(String), + #[error("Failed to upload block to AWS: {0}")] + UploadError(String), +} + +pub struct ArchiveService { + archive_sender: mpsc::UnboundedSender, +} + +#[cfg(not(target_arch = "wasm32"))] +struct ArchiveServiceClients { + archiver_address: Option, + aws_client: Option, + gcp_client: Option, + local_path: Option, +} + +#[cfg(not(target_arch = "wasm32"))] +impl ArchiveServiceClients { + async fn new(options: &ArchiveStorageOptions, work_dir: String) -> Result { + let aws_client = if options.uses_aws_precomputed_storage() { + let client = aws::ArchiveAWSClient::new().await?; + Some(client) + } else { + None + }; + + let gcp_client = if options.uses_gcp_precomputed_storage() { + let client = gcp::ArchiveGCPClient::new().await?; + Some(client) + } else { + None + }; + + let local_path = if options.uses_local_precomputed_storage() { + let env_path = env::var("OPENMINA_LOCAL_PRECOMPUTED_STORAGE_PATH"); + let default = format!("{}/archive-precomputed", work_dir); + Some(env_path.unwrap_or(default)) + } else { + None + }; + + let archiver_address = if options.uses_archiver_process() { + let address = std::env::var("OPENMINA_ARCHIVE_ADDRESS") + .expect("OPENMINA_ARCHIVE_ADDRESS is not set"); + let address = reqwest::Url::parse(&address).expect("Invalid URL"); + + // Convert URL to SocketAddr + let socket_addrs = address.socket_addrs(|| None).expect("Invalid URL"); + + let socket_addr = socket_addrs.first().expect("No socket address found"); + + Some(*socket_addr) + } else { + None + }; + + Ok(Self { + archiver_address, + aws_client, + gcp_client, + local_path, + }) + } + + pub async fn send_block(&self, breadcrumb: BlockApplyResult, options: &ArchiveStorageOptions) { + if options.uses_archiver_process() { + if let Some(socket_addr) = self.archiver_address { + Self::handle_archiver_process(&breadcrumb, &socket_addr).await; + } else { + node::core::warn!(summary = "Archiver address not set"); + } + } + + if options.requires_precomputed_block() { + let network_name = NetworkConfig::global().name; + let height = breadcrumb.block.height(); + let state_hash = breadcrumb.block.hash(); + + let key = format!("{network_name}-{height}-{state_hash}.json"); + + node::core::info!( + summary = "Uploading precomputed block to archive", + key = key.clone() + ); + + let precomputed_block: PrecomputedBlock = match breadcrumb.try_into() { + Ok(block) => block, + Err(_) => { + node::core::warn!( + summary = "Failed to convert breadcrumb to precomputed block" + ); + return; + } + }; + + let data = match serde_json::to_vec(&precomputed_block) { + Ok(data) => data, + Err(e) => { + node::core::warn!( + summary = "Failed to serialize precomputed block", + error = e.to_string() + ); + return; + } + }; + + if options.uses_local_precomputed_storage() { + if let Some(path) = &self.local_path { + let key_clone = key.clone(); + match write_to_local_storage(path, &key, &data) { + Ok(_) => node::core::info!( + summary = "Successfully wrote precomputed block to local storage", + key = key_clone + ), + Err(e) => node::core::warn!( + summary = "Failed to write precomputed block to local storage", + key = key_clone, + error = e.to_string() + ), + } + } else { + node::core::warn!(summary = "Local precomputed storage path not set"); + } + } + + if options.uses_gcp_precomputed_storage() { + if let Some(client) = &self.gcp_client { + if let Err(e) = client.upload_block(&key, &data).await { + node::core::warn!( + summary = "Failed to upload precomputed block to GCP", + error = e.to_string() + ); + } + } else { + node::core::warn!(summary = "GCP client not initialized"); + } + } + if options.uses_aws_precomputed_storage() { + if let Some(client) = &self.aws_client { + if let Err(e) = client.upload_block(&key, &data).await { + node::core::warn!( + summary = "Failed to upload precomputed block to AWS", + error = e.to_string() + ); + } + } else { + node::core::warn!(summary = "AWS client not initialized"); + } + } + } + } + + async fn handle_archiver_process(breadcrumb: &BlockApplyResult, socket_addr: &SocketAddr) { + let mut retries = ARCHIVE_SEND_RETRIES; + + let archive_transition_frontier_diff: v2::ArchiveTransitionFrontierDiff = + breadcrumb.clone().try_into().unwrap(); + + for _ in 0..ARCHIVE_SEND_RETRIES { + match rpc::send_diff( + *socket_addr, + v2::ArchiveRpc::SendDiff(archive_transition_frontier_diff.clone()), + ) { + Ok(result) if result.should_retry() => { + node::core::warn!(summary = "Archive closed connection, retrying..."); + tokio::time::sleep(tokio::time::Duration::from_millis(RETRY_INTERVAL_MS)).await; + } + Ok(_) => { + node::core::info!(summary = "Successfully sent diff to archive"); + return; + } + Err(e) => { + node::core::warn!( + summary = "Failed sending diff to archive", + error = e.to_string(), + retries = retries + ); + tokio::time::sleep(tokio::time::Duration::from_millis(RETRY_INTERVAL_MS)).await; + } + } + retries -= 1; + } + } +} + +impl ArchiveService { + fn new(archive_sender: mpsc::UnboundedSender) -> Self { + Self { archive_sender } + } + + #[cfg(not(target_arch = "wasm32"))] + async fn run( + mut archive_receiver: mpsc::UnboundedReceiver, + options: ArchiveStorageOptions, + work_dir: String, + ) { + let clients = match ArchiveServiceClients::new(&options, work_dir).await { + Ok(clients) => clients, + Err(e) => { + node::core::error!( + summary = "Failed to initialize archive service clients", + error = e.to_string() + ); + return; + } + }; + + while let Some(breadcrumb) = archive_receiver.recv().await { + clients.send_block(breadcrumb, &options).await; + } + } + + // Note: Placeholder for the wasm implementation, if we decide to include an archive mode in the future + #[cfg(target_arch = "wasm32")] + fn run( + mut archive_receiver: mpsc::UnboundedReceiver, + options: ArchiveStorageOptions, + work_dir: String, + ) { + unimplemented!() + } + + pub fn start(options: ArchiveStorageOptions, work_dir: String) -> Self { + let (archive_sender, archive_receiver) = mpsc::unbounded_channel::(); + + #[cfg(not(target_arch = "wasm32"))] + Self::start_native(archive_receiver, options, work_dir); + + #[cfg(target_arch = "wasm32")] + Self::start_wasm(archive_receiver, options, work_dir); + + Self::new(archive_sender) + } + + #[cfg(not(target_arch = "wasm32"))] + fn start_native( + archive_receiver: mpsc::UnboundedReceiver, + options: ArchiveStorageOptions, + work_dir: String, + ) { + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + + thread::Builder::new() + .name("openmina_archive".to_owned()) + .spawn(move || { + runtime.block_on(Self::run(archive_receiver, options, work_dir)); + }) + .unwrap(); + } + + #[cfg(target_arch = "wasm32")] + fn start_wasm( + archive_receiver: mpsc::UnboundedReceiver, + options: ArchiveStorageOptions, + work_dir: String, + ) { + thread::Builder::new() + .name("openmina_archive".to_owned()) + .spawn(move || { + Self::run(archive_receiver, options, work_dir); + }) + .unwrap(); + } +} + +impl node::transition_frontier::archive::archive_service::ArchiveService for NodeService { + fn send_to_archive(&mut self, data: BlockApplyResult) { + if let Some(archive) = self.archive.as_mut() { + if let Err(e) = archive.archive_sender.send(data) { + node::core::warn!( + summary = "Failed sending diff to archive service", + error = e.to_string() + ); + } + } + } +} + +// Note: Placeholder for the wasm implementation, if we decide to include an archive mode in the future +#[cfg(target_arch = "wasm32")] +mod rpc {} + +fn write_to_local_storage(base_path: &str, key: &str, data: &[u8]) -> Result<(), Error> { + use std::fs::{create_dir_all, File}; + use std::path::Path; + + let path = Path::new(base_path).join(key); + if let Some(parent) = path.parent() { + create_dir_all(parent) + .map_err(|e| Error::UploadError(format!("Directory creation failed: {}", e)))?; + } + + let mut file = File::create(&path) + .map_err(|e| Error::UploadError(format!("File creation failed: {}", e)))?; + + file.write_all(data) + .map_err(|e| Error::UploadError(format!("File write failed: {}", e)))?; + + Ok(()) +} diff --git a/node/common/src/service/archive/rpc.rs b/node/common/src/service/archive/rpc.rs new file mode 100644 index 0000000000..cd31ea593d --- /dev/null +++ b/node/common/src/service/archive/rpc.rs @@ -0,0 +1,412 @@ +use binprot::BinProtWrite; +use mina_p2p_messages::rpc_kernel::{Message, NeedsLength, Query, RpcMethod}; +use mina_p2p_messages::v2::{self, ArchiveRpc}; +use mio::event::Event; +use mio::net::TcpStream; +use mio::{Events, Interest, Poll, Registry, Token}; +use std::io::{self, Read, Write}; +use std::net::SocketAddr; + +const MAX_RECURSION_DEPTH: u8 = 25; + +// messages +const HEADER_MSG: [u8; 7] = [2, 253, 82, 80, 67, 0, 1]; +const OK_MSG: [u8; 5] = [2, 1, 0, 1, 0]; +// Note: this is the close message that the ocaml node receives +const CLOSE_MSG: [u8; 7] = [2, 254, 167, 7, 0, 1, 0]; +const HEARTBEAT_MSG: [u8; 1] = [0]; + +fn prepend_length(message: &[u8]) -> Vec { + let length = message.len() as u64; + let mut length_bytes = length.to_le_bytes().to_vec(); + length_bytes.append(&mut message.to_vec()); + length_bytes +} +pub enum HandleResult { + MessageSent, + ConnectionClosed, + ConnectionAlive, + MessageWouldBlock, +} + +impl HandleResult { + pub fn should_retry(&self) -> bool { + matches!(self, Self::ConnectionClosed) + } +} + +pub fn send_diff(address: SocketAddr, data: v2::ArchiveRpc) -> io::Result { + let rpc = encode_to_rpc(data)?; + process_rpc(address, &rpc) +} + +fn encode_to_rpc(data: ArchiveRpc) -> io::Result> { + type Method = mina_p2p_messages::rpc::SendArchiveDiffUnversioned; + let mut v = vec![0; 8]; + + if let Err(e) = Message::Query(Query { + tag: Method::NAME.into(), + version: Method::VERSION, + id: 1, + data: NeedsLength(data), + }) + .binprot_write(&mut v) + { + node::core::warn!( + summary = "Failed binprot serializastion", + error = e.to_string() + ); + return Err(e); + } + + let payload_length = (v.len() - 8) as u64; + v[..8].copy_from_slice(&payload_length.to_le_bytes()); + // Bake in the heartbeat message + v.splice(0..0, prepend_length(&HEARTBEAT_MSG).iter().cloned()); + // also add the heartbeat message to the end of the message + v.extend_from_slice(&prepend_length(&HEARTBEAT_MSG)); + + Ok(v) +} + +fn process_rpc(address: SocketAddr, data: &[u8]) -> io::Result { + let mut poll = Poll::new()?; + let mut events = Events::with_capacity(128); + let mut event_count = 0; + + // We still need a token even for one connection + const TOKEN: Token = Token(0); + + let mut stream = TcpStream::connect(address)?; + + let mut handshake_received = false; + let mut handshake_sent = false; + let mut message_sent = false; + let mut first_heartbeat_received = false; + poll.registry() + .register(&mut stream, TOKEN, Interest::WRITABLE)?; + + loop { + if let Err(e) = poll.poll(&mut events, None) { + if interrupted(&e) { + continue; + } + return Err(e); + } + + for event in events.iter() { + event_count += 1; + // Failsafe to prevent infinite loops + if event_count > super::MAX_EVENT_COUNT { + return Err(io::Error::new( + io::ErrorKind::Other, + format!("FAILSAFE triggered, event count: {}", event_count), + )); + } + match event.token() { + TOKEN => { + match handle_connection_event( + poll.registry(), + &mut stream, + event, + data, + &mut handshake_received, + &mut handshake_sent, + &mut message_sent, + &mut first_heartbeat_received, + )? { + HandleResult::MessageSent => return Ok(HandleResult::MessageSent), + HandleResult::ConnectionClosed => { + return Ok(HandleResult::ConnectionClosed) + } + HandleResult::MessageWouldBlock => { + // do nothing, wait for the next event + continue; + } + HandleResult::ConnectionAlive => { + // keep swapping between readable and writable until we successfully send the message, then keep in read mode. + if message_sent { + poll.registry().reregister( + &mut stream, + TOKEN, + Interest::READABLE, + )?; + continue; + } + + if event.is_writable() { + poll.registry().reregister( + &mut stream, + TOKEN, + Interest::READABLE, + )?; + } else { + poll.registry().reregister( + &mut stream, + TOKEN, + Interest::WRITABLE, + )?; + } + continue; + } + } + } + _ => unreachable!(), + } + } + } +} + +fn _send_heartbeat(connection: &mut TcpStream) -> io::Result { + match connection.write_all(&HEARTBEAT_MSG) { + Ok(_) => { + connection.flush()?; + Ok(HandleResult::ConnectionAlive) + } + Err(ref err) if would_block(err) => Ok(HandleResult::MessageWouldBlock), + Err(ref err) if interrupted(err) => Ok(HandleResult::MessageWouldBlock), + Err(err) => Err(err), + } +} + +struct RecursionGuard { + count: u8, + max_depth: u8, +} + +impl RecursionGuard { + fn new(max_depth: u8) -> Self { + Self { + count: 0, + max_depth, + } + } + + fn increment(&mut self) -> io::Result<()> { + self.count += 1; + if self.count > self.max_depth { + Err(io::ErrorKind::WriteZero.into()) + } else { + Ok(()) + } + } +} + +fn send_data( + connection: &mut TcpStream, + data: &[u8], + recursion_guard: &mut RecursionGuard, + // closure that can be called when the data is sent + on_success: F, +) -> io::Result +where + F: FnOnce() -> io::Result, +{ + match connection.write(data) { + Ok(n) if n < data.len() => { + recursion_guard.increment()?; + let remaining_data = data[n..].to_vec(); + send_data(connection, &remaining_data, recursion_guard, on_success) + } + Ok(_) => { + connection.flush()?; + on_success() + } + Err(ref err) if would_block(err) => Ok(HandleResult::MessageWouldBlock), + Err(ref err) if interrupted(err) => { + recursion_guard + .increment() + .map_err(|_| io::ErrorKind::Interrupted)?; + send_data(connection, data, recursion_guard, on_success) + } + Err(err) => Err(err), + } +} + +#[allow(clippy::too_many_arguments)] +fn handle_connection_event( + registry: &Registry, + connection: &mut TcpStream, + event: &Event, + data: &[u8], + handshake_received: &mut bool, + handshake_sent: &mut bool, + message_sent: &mut bool, + first_heartbeat_received: &mut bool, +) -> io::Result { + if event.is_writable() { + if !*handshake_sent { + let msg = prepend_length(&HEADER_MSG); + send_data( + connection, + &msg, + &mut RecursionGuard::new(MAX_RECURSION_DEPTH), + || { + *handshake_sent = true; + Ok(HandleResult::ConnectionAlive) + }, + )?; + return Ok(HandleResult::ConnectionAlive); + } + + if *handshake_received && *handshake_sent && !*message_sent && *first_heartbeat_received { + send_data( + connection, + data, + &mut RecursionGuard::new(MAX_RECURSION_DEPTH), + || { + *message_sent = true; + Ok(HandleResult::ConnectionAlive) + }, + )?; + } + } + + if event.is_readable() { + let mut connection_closed = false; + let mut received_data = vec![0; 4096]; + let mut bytes_read = 0; + + loop { + match connection.read(&mut received_data[bytes_read..]) { + Ok(0) => { + connection_closed = true; + break; + } + Ok(n) => { + bytes_read += n; + if bytes_read == received_data.len() { + received_data.resize(received_data.len() + 1024, 0); + } + } + // Would block "errors" are the OS's way of saying that the + // connection is not actually ready to perform this I/O operation. + Err(ref err) if would_block(err) => break, + Err(ref err) if interrupted(err) => continue, + // Other errors we'll consider fatal. + Err(err) => return Err(err), + } + } + + if connection_closed { + registry.deregister(connection)?; + connection.shutdown(std::net::Shutdown::Both)?; + return Ok(HandleResult::ConnectionClosed); + } + + if bytes_read < 8 { + // malformed message, at least the length should be present + return Ok(HandleResult::ConnectionAlive); + } + + let raw_message = RawMessage::from_bytes(&received_data[..bytes_read]); + let messages = raw_message.parse_raw()?; + + for message in messages { + match message { + ParsedMessage::Header => { + *handshake_received = true; + } + ParsedMessage::Ok | ParsedMessage::Close => { + connection.flush()?; + registry.deregister(connection)?; + connection.shutdown(std::net::Shutdown::Both)?; + return Ok(HandleResult::MessageSent); + } + ParsedMessage::Heartbeat => { + *first_heartbeat_received = true; + } + ParsedMessage::Unknown(msg) => { + registry.deregister(connection)?; + connection.shutdown(std::net::Shutdown::Both)?; + node::core::warn!( + summary = "Received unknown message", + msg = format!("{:?}", msg) + ); + return Ok(HandleResult::ConnectionClosed); + } + } + } + } + + Ok(HandleResult::ConnectionAlive) +} + +fn would_block(err: &io::Error) -> bool { + err.kind() == io::ErrorKind::WouldBlock +} + +fn interrupted(err: &io::Error) -> bool { + err.kind() == io::ErrorKind::Interrupted +} + +enum ParsedMessage { + Heartbeat, + Ok, + Close, + Header, + Unknown(Vec), +} + +struct RawMessage { + length: usize, + data: Vec, +} + +impl RawMessage { + fn from_bytes(bytes: &[u8]) -> Self { + Self { + length: bytes.len(), + data: bytes.to_vec(), + } + } + + fn parse_raw(&self) -> io::Result> { + let mut parsed_bytes: usize = 0; + + // more than one message can be sent in a single packet + let mut messages = Vec::new(); + + while parsed_bytes < self.length { + // first 8 bytes are the length in little endian + let length = u64::from_le_bytes( + self.data[parsed_bytes..parsed_bytes + 8] + .try_into() + .unwrap(), + ) as usize; + parsed_bytes += 8; + + if parsed_bytes + length > self.length { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Message length exceeds raw message length", + )); + } + + if length == HEADER_MSG.len() + && self.data[parsed_bytes..parsed_bytes + length] == HEADER_MSG + { + messages.push(ParsedMessage::Header); + } else if length == OK_MSG.len() + && self.data[parsed_bytes..parsed_bytes + length] == OK_MSG + { + messages.push(ParsedMessage::Ok); + } else if length == HEARTBEAT_MSG.len() + && self.data[parsed_bytes..parsed_bytes + length] == HEARTBEAT_MSG + { + messages.push(ParsedMessage::Heartbeat); + } else if length == CLOSE_MSG.len() + && self.data[parsed_bytes..parsed_bytes + length] == CLOSE_MSG + { + messages.push(ParsedMessage::Close); + } else { + messages.push(ParsedMessage::Unknown( + self.data[parsed_bytes..parsed_bytes + length].to_vec(), + )); + } + + parsed_bytes += length; + } + Ok(messages) + } +} diff --git a/node/common/src/service/block_producer/mod.rs b/node/common/src/service/block_producer/mod.rs index 2212f1efd9..da23afdb26 100644 --- a/node/common/src/service/block_producer/mod.rs +++ b/node/common/src/service/block_producer/mod.rs @@ -3,7 +3,8 @@ mod vrf_evaluator; use std::sync::Arc; use ledger::proofs::{ - block::BlockParams, generate_block_proof, provers::BlockProver, transaction::ProofError, + block::BlockParams, generate_block_proof, provers::BlockProver, + transaction::debug::KimchiProofError, }; use mina_p2p_messages::{ bigint::BigInt, @@ -22,8 +23,8 @@ use crate::EventSender; pub struct BlockProducerService { provers: Option, keypair: AccountSecretKey, - vrf_evaluation_sender: mpsc::UnboundedSender, - prove_sender: mpsc::UnboundedSender<( + vrf_evaluation_sender: mpsc::TrackedUnboundedSender, + prove_sender: mpsc::TrackedUnboundedSender<( BlockProver, StateHash, Box, @@ -33,8 +34,8 @@ pub struct BlockProducerService { impl BlockProducerService { pub fn new( keypair: AccountSecretKey, - vrf_evaluation_sender: mpsc::UnboundedSender, - prove_sender: mpsc::UnboundedSender<( + vrf_evaluation_sender: mpsc::TrackedUnboundedSender, + prove_sender: mpsc::TrackedUnboundedSender<( BlockProver, StateHash, Box, @@ -82,26 +83,38 @@ impl BlockProducerService { pub fn keypair(&self) -> AccountSecretKey { self.keypair.clone() } + + pub fn vrf_pending_requests(&self) -> usize { + self.vrf_evaluation_sender.len() + } + + pub fn prove_pending_requests(&self) -> usize { + self.prove_sender.len() + } } fn prover_loop( keypair: AccountSecretKey, event_sender: EventSender, - mut rx: mpsc::UnboundedReceiver<( + mut rx: mpsc::TrackedUnboundedReceiver<( BlockProver, StateHash, Box, )>, ) { - while let Some((provers, block_hash, mut input)) = rx.blocking_recv() { - let res = prove(provers, &mut input, &keypair, false).map_err(|err| format!("{err:?}")); - if res.is_err() { - if let Err(error) = dump_failed_block_proof_input(block_hash.clone(), input) { + while let Some(msg) = rx.blocking_recv() { + let (provers, block_hash, mut input) = msg.0; + let res = prove(provers, &mut input, &keypair, false); + if let Err(error) = &res { + openmina_core::error!(message = "Block proof failed", error = format!("{error:?}")); + if let Err(error) = dump_failed_block_proof_input(block_hash.clone(), input, error) { openmina_core::error!( - openmina_core::log::system_time(); - message = "Failure when dumping failed block proof inputs", error = format!("{error}")); + message = "Failure when dumping failed block proof inputs", + error = format!("{error}") + ); } } + let res = res.map_err(|err| err.to_string()); let _ = event_sender.send(BlockProducerEvent::BlockProve(block_hash, res).into()); } } @@ -111,7 +124,7 @@ pub fn prove( input: &mut ProverExtendBlockchainInputStableV2, keypair: &AccountSecretKey, only_verify_constraints: bool, -) -> Result, ProofError> { +) -> anyhow::Result> { let height = input .next_state .body @@ -160,7 +173,7 @@ impl node::service::BlockProducerService for crate::NodeService { .as_ref() .expect("prove shouldn't be requested if block producer isn't initialized") .prove_sender - .send((provers, block_hash, input)); + .tracked_send((provers, block_hash, input)); } fn with_producer_keypair(&self, f: impl FnOnce(&AccountSecretKey) -> T) -> Option { @@ -171,7 +184,9 @@ impl node::service::BlockProducerService for crate::NodeService { fn dump_failed_block_proof_input( block_hash: StateHash, mut input: Box, + error: &anyhow::Error, ) -> std::io::Result<()> { + use ledger::proofs::transaction::ProofError; use rsa::Pkcs1v15Encrypt; const PUBLIC_KEY: &str = "-----BEGIN RSA PUBLIC KEY----- @@ -187,6 +202,8 @@ kGqG7QLzSPjAtP/YbUponwaD+t+A0kBg0hV4hhcJOkPeA2NOi04K93bz3HuYCVRe struct DumpBlockProof { input: Box, key: Vec, + error: Vec, + kimchi_error_with_context: Option, } let producer_private_key = { @@ -210,9 +227,16 @@ kGqG7QLzSPjAtP/YbUponwaD+t+A0kBg0hV4hhcJOkPeA2NOi04K93bz3HuYCVRe // IMPORTANT: Make sure that `input` doesn't leak the private key. input.prover_state.producer_private_key = v2::SignatureLibPrivateKeyStableV1(BigInt::one()); + let error_str = error.to_string(); + let input = DumpBlockProof { input, key: encrypted_producer_private_key, + error: error_str.as_bytes().to_vec(), + kimchi_error_with_context: match error.downcast_ref::() { + Some(ProofError::ProvingErrorWithContext(context)) => Some(context.clone()), + _ => None, + }, }; let debug_dir = openmina_core::get_debug_dir(); diff --git a/node/common/src/service/block_producer/vrf_evaluator.rs b/node/common/src/service/block_producer/vrf_evaluator.rs index e34cc3c29a..11bfa54c72 100644 --- a/node/common/src/service/block_producer/vrf_evaluator.rs +++ b/node/common/src/service/block_producer/vrf_evaluator.rs @@ -5,7 +5,7 @@ use node::{ vrf_evaluator::{VrfEvaluationOutputWithHash, VrfEvaluatorInput}, BlockProducerEvent, }, - core::channels::mpsc::{UnboundedReceiver, UnboundedSender}, + core::channels::mpsc::{TrackedUnboundedReceiver, UnboundedSender}, event_source::Event, }; use vrf::{VrfEvaluationInput, VrfEvaluationOutput}; @@ -14,7 +14,7 @@ use crate::NodeService; pub fn vrf_evaluator( event_sender: UnboundedSender, - mut vrf_evaluation_receiver: UnboundedReceiver, + mut vrf_evaluation_receiver: TrackedUnboundedReceiver, keypair: Keypair, ) { while let Some(vrf_evaluator_input) = vrf_evaluation_receiver.blocking_recv() { @@ -28,7 +28,7 @@ pub fn vrf_evaluator( global_slot, total_currency, staking_ledger_hash: _, - } = &vrf_evaluator_input; + } = &*vrf_evaluator_input; let vrf_result = delegator_table .iter() @@ -73,7 +73,7 @@ impl node::block_producer_effectful::vrf_evaluator_effectful::BlockProducerVrfEv { fn evaluate(&mut self, data: VrfEvaluatorInput) { if let Some(bp) = self.block_producer.as_mut() { - let _ = bp.vrf_evaluation_sender.send(data); + let _ = bp.vrf_evaluation_sender.tracked_send(data); } } } diff --git a/node/common/src/service/builder.rs b/node/common/src/service/builder.rs index 6c2015586a..586ee9d4d0 100644 --- a/node/common/src/service/builder.rs +++ b/node/common/src/service/builder.rs @@ -1,5 +1,3 @@ -use std::net::SocketAddr; - use ledger::proofs::provers::BlockProver; use node::{ account::AccountSecretKey, @@ -25,7 +23,10 @@ use crate::{ EventReceiver, EventSender, NodeService, }; -use super::{archive::ArchiveService, block_producer::BlockProducerService}; +use super::{ + archive::{config::ArchiveStorageOptions, ArchiveService}, + block_producer::BlockProducerService, +}; pub struct NodeServiceCommonBuilder { rng_seed: [u8; 32], @@ -98,8 +99,8 @@ impl NodeServiceCommonBuilder { self } - pub fn archive_init(&mut self, address: SocketAddr) -> &mut Self { - self.archive = Some(ArchiveService::start(address)); + pub fn archive_init(&mut self, options: ArchiveStorageOptions, work_dir: String) -> &mut Self { + self.archive = Some(ArchiveService::start(options, work_dir)); self } @@ -111,6 +112,7 @@ impl NodeServiceCommonBuilder { self.p2p = Some(::init( secret_key.clone(), task_spawner, + self.rng_seed, )); self } @@ -144,6 +146,8 @@ impl NodeServiceCommonBuilder { ), ledger_manager, block_producer: self.block_producer, + // initialized in state machine. + snark_worker: None, archive: self.archive, p2p, stats: self.gather_stats.then(Stats::new), diff --git a/node/common/src/service/event_receiver.rs b/node/common/src/service/event_receiver.rs index ad35fb0d95..957c9324ba 100644 --- a/node/common/src/service/event_receiver.rs +++ b/node/common/src/service/event_receiver.rs @@ -9,6 +9,14 @@ pub struct EventReceiver { } impl EventReceiver { + pub fn is_empty(&self) -> bool { + !self.has_next() + } + + pub fn len(&self) -> usize { + self.rx.len() + self.queue.len() + } + /// If `Err(())`, `mpsc::Sender` for this channel was dropped. pub async fn wait_for_events(&mut self) -> Result<(), ()> { if !self.queue.is_empty() { @@ -19,17 +27,8 @@ impl EventReceiver { Ok(()) } - pub fn has_next(&mut self) -> bool { - if self.queue.is_empty() { - if let Some(event) = self.try_next() { - self.queue.push(event); - true - } else { - false - } - } else { - true - } + pub fn has_next(&self) -> bool { + !self.queue.is_empty() || !self.rx.is_empty() } pub fn try_next(&mut self) -> Option { diff --git a/node/common/src/service/p2p.rs b/node/common/src/service/p2p.rs index 53e696872b..0586619aa3 100644 --- a/node/common/src/service/p2p.rs +++ b/node/common/src/service/p2p.rs @@ -32,7 +32,7 @@ impl webrtc::P2pServiceWebrtc for NodeService { self.event_sender() } - fn cmd_sender(&self) -> &mpsc::UnboundedSender { + fn cmd_sender(&self) -> &mpsc::TrackedUnboundedSender { &self.p2p.webrtc.cmd_sender } diff --git a/node/common/src/service/rpc/mod.rs b/node/common/src/service/rpc/mod.rs index 8d0d906e44..c4e21f3c41 100644 --- a/node/common/src/service/rpc/mod.rs +++ b/node/common/src/service/rpc/mod.rs @@ -9,12 +9,15 @@ pub mod transition_frontier; use node::rpc::{ RpcBestChainResponse, RpcBlockProducerStatsGetResponse, RpcConsensusConstantsGetResponse, - RpcDiscoveryBoostrapStatsResponse, RpcDiscoveryRoutingTableResponse, RpcHealthCheckResponse, - RpcHeartbeatGetResponse, RpcLedgerAccountsResponse, RpcLedgerSlimAccountsResponse, - RpcMessageProgressResponse, RpcPeersGetResponse, RpcReadinessCheckResponse, RpcRequest, - RpcStateGetError, RpcStatusGetResponse, RpcTransactionInjectResponse, - RpcTransactionPoolResponse, RpcTransactionStatusGetResponse, - RpcTransitionFrontierUserCommandsResponse, + RpcConsensusTimeGetResponse, RpcDiscoveryBoostrapStatsResponse, + RpcDiscoveryRoutingTableResponse, RpcGenesisBlockResponse, RpcGetBlockResponse, + RpcHealthCheckResponse, RpcHeartbeatGetResponse, RpcLedgerAccountDelegatorsGetResponse, + RpcLedgerAccountsResponse, RpcLedgerSlimAccountsResponse, RpcLedgerStatusGetResponse, + RpcMessageProgressResponse, RpcPeersGetResponse, RpcPooledUserCommandsResponse, + RpcPooledZkappCommandsResponse, RpcReadinessCheckResponse, RpcRequest, + RpcSnarkPoolCompletedJobsResponse, RpcSnarkPoolPendingJobsGetResponse, RpcStateGetError, + RpcStatusGetResponse, RpcTransactionInjectResponse, RpcTransactionPoolResponse, + RpcTransactionStatusGetResponse, RpcTransitionFrontierUserCommandsResponse, }; use serde::{Deserialize, Serialize}; @@ -271,6 +274,14 @@ impl node::rpc_effectful::RpcService for NodeService { ); rpc_service_impl!(respond_snark_pool_get, RpcSnarkPoolGetResponse); rpc_service_impl!(respond_snark_pool_job_get, RpcSnarkPoolJobGetResponse); + rpc_service_impl!( + respond_snark_pool_completed_jobs_get, + RpcSnarkPoolCompletedJobsResponse + ); + rpc_service_impl!( + respond_snark_pool_pending_jobs_get, + RpcSnarkPoolPendingJobsGetResponse + ); rpc_service_impl!(respond_snarker_job_commit, RpcSnarkerJobCommitResponse); rpc_service_impl!( respond_snarker_job_spec, @@ -308,6 +319,19 @@ impl node::rpc_effectful::RpcService for NodeService { RpcConsensusConstantsGetResponse ); rpc_service_impl!(respond_transaction_status, RpcTransactionStatusGetResponse); + rpc_service_impl!(respond_block_get, RpcGetBlockResponse); + rpc_service_impl!(respond_pooled_user_commands, RpcPooledUserCommandsResponse); + rpc_service_impl!( + respond_pooled_zkapp_commands, + RpcPooledZkappCommandsResponse + ); + rpc_service_impl!(respond_genesis_block, RpcGenesisBlockResponse); + rpc_service_impl!(respond_consensus_time_get, RpcConsensusTimeGetResponse); + rpc_service_impl!(respond_ledger_status_get, RpcLedgerStatusGetResponse); + rpc_service_impl!( + respond_ledger_account_delegators_get, + RpcLedgerAccountDelegatorsGetResponse + ); } #[cfg(test)] diff --git a/node/common/src/service/service.rs b/node/common/src/service/service.rs index 8cfd81cf75..2daa4fb9b0 100644 --- a/node/common/src/service/service.rs +++ b/node/common/src/service/service.rs @@ -23,6 +23,7 @@ use super::{ p2p::webrtc_with_libp2p::P2pServiceCtx, replay::ReplayerState, rpc::{RpcSender, RpcService}, + snark_worker::SnarkWorker, snarks::SnarkBlockVerifyArgs, EventReceiver, EventSender, }; @@ -38,9 +39,10 @@ pub struct NodeService { pub event_sender: EventSender, pub event_receiver: EventReceiver, - pub snark_block_proof_verify: mpsc::UnboundedSender, + pub snark_block_proof_verify: mpsc::TrackedUnboundedSender, pub ledger_manager: LedgerManager, + pub snark_worker: Option, pub block_producer: Option, pub archive: Option, pub p2p: P2pServiceCtx, @@ -116,6 +118,7 @@ impl NodeService { event_receiver: mpsc::unbounded_channel().1.into(), snark_block_proof_verify: mpsc::unbounded_channel().0, ledger_manager: LedgerManager::spawn(Default::default()), + snark_worker: None, block_producer: None, archive: None, p2p: P2pServiceCtx::mocked(p2p_sec_key), @@ -142,6 +145,26 @@ impl AsMut for NodeService { impl redux::Service for NodeService {} impl node::Service for NodeService { + fn queues(&mut self) -> node::service::Queues { + node::service::Queues { + events: self.event_receiver.len(), + snark_block_verify: self.snark_block_proof_verify.len(), + ledger: self.ledger_manager.pending_calls(), + vrf_evaluator: self + .block_producer + .as_ref() + .map(|v| v.vrf_pending_requests()), + block_prover: self + .block_producer + .as_ref() + .map(|v| v.prove_pending_requests()), + p2p_webrtc: self.p2p.webrtc.pending_cmds(), + #[cfg(feature = "p2p-libp2p")] + p2p_libp2p: self.p2p.mio.pending_cmds(), + rpc: self.rpc.req_receiver().len(), + } + } + fn stats(&mut self) -> Option<&mut Stats> { self.stats() } @@ -185,9 +208,14 @@ impl node::service::TransitionFrontierGenesisService for NodeService { let res = match config.load() { Err(err) => Err(err.to_string()), Ok((masks, data)) => { - masks - .into_iter() - .for_each(|mask| self.ledger_manager.insert_genesis_ledger(mask)); + let is_archive = self.archive().is_some(); + masks.into_iter().for_each(|mut mask| { + if !is_archive { + // Optimization: We don't need token owners if the node is not an archive + mask.unset_token_owners(); + } + self.ledger_manager.insert_genesis_ledger(mask); + }); Ok(data) } }; diff --git a/node/common/src/service/snark_worker.rs b/node/common/src/service/snark_worker.rs index f14bc94ba0..8c408e593f 100644 --- a/node/common/src/service/snark_worker.rs +++ b/node/common/src/service/snark_worker.rs @@ -1,40 +1,242 @@ +use ledger::proofs::provers::{TransactionProver, ZkappProver}; +use ledger::proofs::zkapp::ZkappParams; +use ledger::scan_state::scan_state::transaction_snark::SokMessage; use mina_p2p_messages::v2; -use node::external_snark_worker::{ExternalSnarkWorkerError, SnarkWorkSpec}; +use mina_signer::CompressedPubKey; +use node::core::channels::mpsc; +use node::event_source::ExternalSnarkWorkerEvent; +use node::external_snark_worker::{ + ExternalSnarkWorkerError, ExternalSnarkWorkerWorkError, SnarkWorkResult, SnarkWorkSpec, + SnarkWorkSpecError, +}; +use node::snark::TransactionVerifier; use crate::NodeService; -pub struct SnarkWorker {} +use super::EventSender; + +pub struct SnarkWorker { + cmd_sender: mpsc::UnboundedSender, +} + +enum Cmd { + Submit(Box), + Cancel, + Kill, +} impl node::service::ExternalSnarkWorkerService for NodeService { fn start( &mut self, - _public_key: v2::NonZeroCurvePoint, - _fee: v2::CurrencyFeeStableV1, + pub_key: v2::NonZeroCurvePoint, + fee: v2::CurrencyFeeStableV1, + work_verifier: TransactionVerifier, ) -> Result<(), ExternalSnarkWorkerError> { if self.replayer.is_some() { return Ok(()); } - todo!() + let (cmd_sender, cmd_receiver) = mpsc::unbounded_channel(); + // TODO(binier): improve pub key conv + let sok_message = SokMessage::create( + (&fee).into(), + CompressedPubKey::from_address(&pub_key.to_string()).unwrap(), + ); + self.snark_worker = Some(SnarkWorker { cmd_sender }); + let event_sender = self.event_sender().clone(); + + node::core::thread::Builder::new() + .name("snark_worker".to_owned()) + .spawn(move || worker_thread(cmd_receiver, event_sender, sok_message, work_verifier)) + .map(|_| ()) + .map_err(|err| ExternalSnarkWorkerError::Error(err.to_string())) } fn kill(&mut self) -> Result<(), ExternalSnarkWorkerError> { if self.replayer.is_some() { return Ok(()); } - todo!() + + if self + .snark_worker + .as_ref() + .and_then(|s| s.cmd_sender.send(Cmd::Kill).ok()) + .is_none() + { + return Err(ExternalSnarkWorkerError::NotRunning); + } + Ok(()) } - fn submit(&mut self, _spec: SnarkWorkSpec) -> Result<(), ExternalSnarkWorkerError> { + fn submit(&mut self, spec: SnarkWorkSpec) -> Result<(), ExternalSnarkWorkerError> { if self.replayer.is_some() { return Ok(()); } - todo!() + + if self + .snark_worker + .as_ref() + .and_then(|s| s.cmd_sender.send(Cmd::Submit(spec.into())).ok()) + .is_none() + { + return Err(ExternalSnarkWorkerError::NotRunning); + } + Ok(()) } fn cancel(&mut self) -> Result<(), ExternalSnarkWorkerError> { if self.replayer.is_some() { return Ok(()); } - todo!() + + // TODO(binier): for wasm threads, call terminate: + // https://developer.mozilla.org/en-US/docs/Web/API/Worker/terminate + if self + .snark_worker + .as_ref() + .and_then(|s| s.cmd_sender.send(Cmd::Cancel).ok()) + .is_none() + { + return Err(ExternalSnarkWorkerError::NotRunning); + } + Ok(()) + } +} + +fn worker_thread( + mut cmd_receiver: mpsc::UnboundedReceiver, + event_sender: EventSender, + sok_message: SokMessage, + work_verifier: TransactionVerifier, +) { + let _ = event_sender.send(ExternalSnarkWorkerEvent::Started.into()); + let tx_prover = TransactionProver::make(Some(work_verifier.clone())); + let zkapp_prover = ZkappProver::make(Some(work_verifier)); + while let Some(cmd) = cmd_receiver.blocking_recv() { + match cmd { + Cmd::Kill => { + let _ = event_sender.send(ExternalSnarkWorkerEvent::Killed.into()); + return; + } + Cmd::Cancel => { + // can't cancel as it's a blocking thread. Once this + // is moved to another process, kill it. + let _ = event_sender.send(ExternalSnarkWorkerEvent::WorkCancelled.into()); + } + Cmd::Submit(spec) => { + let event = match prove_spec(&tx_prover, &zkapp_prover, *spec, &sok_message) { + Err(err) => ExternalSnarkWorkerEvent::WorkError(err), + Ok(res) => ExternalSnarkWorkerEvent::WorkResult(res), + }; + + let _ = event_sender.send(event.into()); + } + } } } + +fn prove_spec( + tx_prover: &TransactionProver, + zkapp_prover: &ZkappProver, + spec: SnarkWorkSpec, + sok_message: &SokMessage, +) -> Result { + match spec { + SnarkWorkSpec::One(single) => prove_single(tx_prover, zkapp_prover, single, sok_message) + .map(v2::TransactionSnarkWorkTStableV2Proofs::One), + SnarkWorkSpec::Two((one, two)) => Ok(v2::TransactionSnarkWorkTStableV2Proofs::Two(( + prove_single(tx_prover, zkapp_prover, one, sok_message)?, + prove_single(tx_prover, zkapp_prover, two, sok_message)?, + ))), + } + .map(Into::into) +} + +fn invalid_bigint_err() -> ExternalSnarkWorkerWorkError { + ExternalSnarkWorkerWorkError::WorkSpecError(SnarkWorkSpecError::InvalidBigInt) +} + +fn prove_single( + tx_prover: &TransactionProver, + zkapp_prover: &ZkappProver, + single: v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponseA0Single, + sok_message: &SokMessage, +) -> Result { + use ledger::proofs::{merge::MergeParams, transaction::TransactionParams}; + + let (snarked_ledger_state, res) = match single { + v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponseA0Single::Transition( + snarked_ledger_state, + witness, + ) => { + if let v2::MinaTransactionTransactionStableV2::Command(cmd) = &witness.transaction { + if matches!(&**cmd, v2::MinaBaseUserCommandStableV2::ZkappCommand(_)) { + return prove_zkapp(zkapp_prover, snarked_ledger_state, witness, sok_message); + } + } + let res = ledger::proofs::generate_tx_proof(TransactionParams { + statement: &snarked_ledger_state.0, + tx_witness: &witness, + message: sok_message, + tx_step_prover: &tx_prover.tx_step_prover, + tx_wrap_prover: &tx_prover.tx_wrap_prover, + only_verify_constraints: false, + expected_step_proof: None, + ocaml_wrap_witness: None, + }); + (snarked_ledger_state.0, res) + } + v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponseA0Single::Merge(data) => { + let (snarked_ledger_state, proof_1, proof_2) = *data; + let res = ledger::proofs::generate_merge_proof(MergeParams { + statement: (&snarked_ledger_state.0) + .try_into() + .map_err(|_| invalid_bigint_err())?, + proofs: &[proof_1, proof_2], + message: sok_message, + step_prover: &tx_prover.merge_step_prover, + wrap_prover: &tx_prover.tx_wrap_prover, + only_verify_constraints: false, + expected_step_proof: None, + ocaml_wrap_witness: None, + }); + (snarked_ledger_state.0, res) + } + }; + res.map_err(|err| ExternalSnarkWorkerWorkError::Error(err.to_string())) + .map(|proof| { + v2::LedgerProofProdStableV2(v2::TransactionSnarkStableV2 { + statement: v2::MinaStateSnarkedLedgerStateWithSokStableV2 { + source: snarked_ledger_state.source, + target: snarked_ledger_state.target, + connecting_ledger_left: snarked_ledger_state.connecting_ledger_left, + connecting_ledger_right: snarked_ledger_state.connecting_ledger_right, + supply_increase: snarked_ledger_state.supply_increase, + fee_excess: snarked_ledger_state.fee_excess, + sok_digest: (&sok_message.digest()).into(), + }, + proof: v2::TransactionSnarkProofStableV2((&proof).into()), + }) + }) +} + +fn prove_zkapp( + zkapp_prover: &ZkappProver, + snarked_ledger_state: v2::MinaStateSnarkedLedgerStateStableV2, + witness: v2::TransactionWitnessStableV2, + sok_message: &SokMessage, +) -> Result { + ledger::proofs::generate_zkapp_proof(ZkappParams { + statement: &snarked_ledger_state.0, + tx_witness: &witness, + message: sok_message, + step_opt_signed_opt_signed_prover: &zkapp_prover.step_opt_signed_opt_signed_prover, + step_opt_signed_prover: &zkapp_prover.step_opt_signed_prover, + step_proof_prover: &zkapp_prover.step_proof_prover, + merge_step_prover: &zkapp_prover.merge_step_prover, + tx_wrap_prover: &zkapp_prover.tx_wrap_prover, + opt_signed_path: None, + proved_path: None, + }) + .map(|proof| (&proof).into()) + .map_err(|err| ExternalSnarkWorkerWorkError::Error(err.to_string())) +} diff --git a/node/common/src/service/snarks.rs b/node/common/src/service/snarks.rs index 7d2d496c84..d450973b56 100644 --- a/node/common/src/service/snarks.rs +++ b/node/common/src/service/snarks.rs @@ -37,18 +37,18 @@ pub struct SnarkBlockVerifyArgs { impl NodeService { pub fn snark_block_proof_verifier_spawn( event_sender: EventSender, - ) -> mpsc::UnboundedSender { - let (tx, mut rx) = mpsc::unbounded_channel(); + ) -> mpsc::TrackedUnboundedSender { + let (tx, mut rx) = mpsc::tracked_unbounded_channel(); thread::Builder::new() .name("block_proof_verifier".to_owned()) .spawn(move || { - while let Some(SnarkBlockVerifyArgs { - req_id, - verifier_index, - verifier_srs, - block, - }) = rx.blocking_recv() - { + while let Some(msg) = rx.blocking_recv() { + let SnarkBlockVerifyArgs { + req_id, + verifier_index, + verifier_srs, + block, + } = msg.0; eprintln!("verify({}) - start", block.hash_ref()); let header = block.header_ref(); let result = { @@ -90,7 +90,7 @@ impl node::service::SnarkBlockVerifyService for NodeService { verifier_srs, block, }; - let _ = self.snark_block_proof_verify.send(args); + let _ = self.snark_block_proof_verify.tracked_send(args); } } @@ -155,31 +155,33 @@ impl node::service::SnarkUserCommandVerifyService for NodeService { } let tx = self.event_sender().clone(); - let result = { - let (verified, invalid): (Vec<_>, Vec<_>) = ledger::verifier::Verifier - .verify_commands(commands, None) - .into_iter() - .partition(Result::is_ok); - - let verified: Vec<_> = verified.into_iter().map(Result::unwrap).collect(); - let invalid: Vec<_> = invalid.into_iter().map(Result::unwrap_err).collect(); - - if !invalid.is_empty() { - let transaction_pool_errors = invalid + rayon::spawn_fifo(move || { + let result = { + let (verified, invalid): (Vec<_>, Vec<_>) = ledger::verifier::Verifier + .verify_commands(commands, None) .into_iter() - .map(TransactionError::Verifier) - .collect(); - Err(TransactionPoolErrors::BatchedErrors( - transaction_pool_errors, - )) - } else { - Ok(verified) - } - }; + .partition(Result::is_ok); + + let verified: Vec<_> = verified.into_iter().map(Result::unwrap).collect(); + let invalid: Vec<_> = invalid.into_iter().map(Result::unwrap_err).collect(); + + if !invalid.is_empty() { + let transaction_pool_errors = invalid + .into_iter() + .map(TransactionError::Verifier) + .collect(); + Err(TransactionPoolErrors::BatchedErrors( + transaction_pool_errors, + )) + } else { + Ok(verified) + } + }; - let result = result.map_err(|err| err.to_string()); + let result = result.map_err(|err| err.to_string()); - let _ = tx.send(SnarkEvent::UserCommandVerify(req_id, result).into()); + let _ = tx.send(SnarkEvent::UserCommandVerify(req_id, result).into()); + }); } } diff --git a/node/invariants/Cargo.toml b/node/invariants/Cargo.toml index 0710b4df74..89713b43a5 100644 --- a/node/invariants/Cargo.toml +++ b/node/invariants/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-node-invariants" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/node/native/Cargo.toml b/node/native/Cargo.toml index 932584918e..4556a5ecbf 100644 --- a/node/native/Cargo.toml +++ b/node/native/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-node-native" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" @@ -22,6 +22,7 @@ redux = { workspace = true, features=["serializable_callbacks"] } ledger = { workspace = true } mina-p2p-messages = { workspace = true } mina-signer = { workspace = true } +o1-utils = { workspace = true } bytes = "1.4.0" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } tracing = "0.1.37" @@ -33,6 +34,8 @@ jsonpath-rust = "0.5.0" sha3 = "0.10.8" strum = "0.26.2" strum_macros = "0.26.4" +hex = { version = "0.4.3" } +dataloader = "0.18" openmina-core = { path = "../../core" } openmina-node-common = { path = "../common" } diff --git a/node/native/src/graphql/account.rs b/node/native/src/graphql/account.rs index 1d661b869b..f2d8db6b14 100644 --- a/node/native/src/graphql/account.rs +++ b/node/native/src/graphql/account.rs @@ -1,5 +1,11 @@ -use juniper::{GraphQLInputObject, GraphQLObject}; -use ledger::FpExt; +use std::{collections::HashMap, sync::Arc}; + +use dataloader::non_cached::Loader; +use juniper::{graphql_object, FieldResult, GraphQLInputObject, GraphQLObject}; +use ledger::{ + scan_state::currency::{Balance, Magnitude, Slot}, + Account, AccountId, FpExt, Timing, +}; use mina_p2p_messages::{ string::{TokenSymbol, ZkAppUri}, v2::{ @@ -7,39 +13,211 @@ use mina_p2p_messages::{ ReceiptChainHash, TokenIdKeyHash, }, }; +use mina_signer::CompressedPubKey; +use node::rpc::{AccountQuery, RpcRequest}; +use openmina_node_common::rpc::RpcSender; -use super::ConversionError; +use super::{Context, ConversionError}; -#[derive(GraphQLObject, Debug)] -#[graphql(description = "A Mina account")] -pub struct GraphQLAccount { - pub public_key: String, - pub token_id: String, - pub token: String, - pub token_symbol: String, - pub balance: GraphQLBalance, - pub nonce: String, - pub receipt_chain_hash: String, - // TODO(adonagy): this should be GraphQLAccount recursively - pub delegate_account: Option, - pub voting_for: String, - pub timing: GraphQLTiming, - pub permissions: GraphQLPermissions, +pub(crate) type AccountLoader = + Loader>, AccountBatcher>; + +pub(crate) struct AccountBatcher { + rpc_sender: RpcSender, +} + +impl dataloader::BatchFn>> + for AccountBatcher +{ + async fn load( + &mut self, + keys: &[AccountId], + ) -> HashMap>> { + self.rpc_sender + .oneshot_request::>(RpcRequest::LedgerAccountsGet( + AccountQuery::MultipleIds(keys.to_vec()), + )) + .await + .unwrap_or_default() + .into_iter() + .map(|account| (account.id(), account.try_into().map_err(Arc::new))) + .collect() + } +} + +pub(crate) fn create_account_loader(rpc_sender: RpcSender) -> AccountLoader { + // TODO(adonagy): is 25 enough? + Loader::new(AccountBatcher { rpc_sender }).with_yield_count(25) +} + +#[derive(Debug, Clone)] +pub(crate) struct GraphQLAccount { + inner: Account, + public_key: String, + token_id: String, + token: String, + token_symbol: String, + // balance: GraphQLBalance, + nonce: String, + receipt_chain_hash: String, + // Storing the key for later + delegate_key: Option, + voting_for: String, + timing: GraphQLTiming, + permissions: GraphQLPermissions, // can we flatten? // pub zkapp: Option, - pub zkapp_state: Option>, - pub verification_key: Option, - pub action_state: Option>, - pub proved_state: Option, - pub zkapp_uri: Option, + zkapp_state: Option>, + verification_key: Option, + action_state: Option>, + proved_state: Option, + zkapp_uri: Option, +} + +impl GraphQLAccount { + fn min_balance(&self, global_slot: Option) -> Option { + global_slot.map(|slot| match self.inner.timing { + Timing::Untimed => Balance::zero(), + Timing::Timed { .. } => self.inner.min_balance_at_slot(Slot::from_u32(slot)), + }) + } + + fn liquid_balance(&self, global_slot: Option) -> Option { + let min_balance = self.min_balance(global_slot); + let total = self.inner.balance; + min_balance.map(|mb| { + if total > mb { + total.checked_sub(&mb).expect("overflow") + } else { + Balance::zero() + } + }) + } +} + +#[graphql_object(context = Context)] +#[graphql(description = "A Mina account")] +impl GraphQLAccount { + fn public_key(&self) -> &str { + &self.public_key + } + + fn token_id(&self) -> &str { + &self.token_id + } + + fn token(&self) -> &str { + &self.token + } + + fn token_symbol(&self) -> &str { + &self.token_symbol + } + + async fn balance(&self, context: &Context) -> GraphQLBalance { + let best_tip = context.get_or_fetch_best_tip().await; + let global_slot = best_tip.as_ref().map(|bt| bt.global_slot()); + + GraphQLBalance { + total: self.inner.balance.as_u64().to_string(), + block_height: best_tip + .as_ref() + .map(|bt| bt.height()) + .unwrap_or_default() + .to_string(), + state_hash: best_tip.as_ref().map(|bt| bt.hash().to_string()), + liquid: self + .liquid_balance(global_slot) + .map(|b| b.as_u64().to_string()), + locked: self + .min_balance(global_slot) + .map(|b| b.as_u64().to_string()), + unknown: self.inner.balance.as_u64().to_string(), + } + } + + fn nonce(&self) -> &str { + &self.nonce + } + + fn receipt_chain_hash(&self) -> &str { + &self.receipt_chain_hash + } + + async fn delegate_account( + &self, + context: &Context, + ) -> FieldResult>> { + // If we have a delegate key + if let Some(delegate_key) = self.delegate_key.as_ref() { + // A delegate always has the default token id + let delegate_id = AccountId::new_with_default_token(delegate_key.clone()); + // Use the loader to fetch the delegate account + Ok(context.load_account(delegate_id).await.map(Box::new)) + } else { + // No delegate + Ok(None) + } + } + + pub async fn delegators(&self, context: &Context) -> FieldResult> { + if let Some(best_tip) = context.get_or_fetch_best_tip().await { + let staking_ledger_hash = best_tip.staking_epoch_ledger_hash(); + + let id = self.inner.id(); + let delegators = context + .fetch_delegators(staking_ledger_hash.clone(), id.clone()) + .await + .unwrap_or_default(); + + Ok(delegators + .into_iter() + .map(GraphQLAccount::try_from) + .collect::, _>>()?) + } else { + Ok(vec![]) + } + } + + fn voting_for(&self) -> &str { + &self.voting_for + } + + fn timing(&self) -> &GraphQLTiming { + &self.timing + } + + fn permissions(&self) -> &GraphQLPermissions { + &self.permissions + } + + fn zkapp_state(&self) -> &Option> { + &self.zkapp_state + } + + fn verification_key(&self) -> &Option { + &self.verification_key + } + + fn action_state(&self) -> &Option> { + &self.action_state + } + + fn proved_state(&self) -> &Option { + &self.proved_state + } + + fn zkapp_uri(&self) -> &Option { + &self.zkapp_uri + } } -#[derive(GraphQLObject, Debug)] +#[derive(GraphQLObject, Debug, Clone)] pub struct GraphQLDelegateAccount { pub public_key: String, } -#[derive(GraphQLObject, Debug)] +#[derive(GraphQLObject, Debug, Clone)] pub struct GraphQLTiming { // pub is_timed: bool, pub initial_minimum_balance: Option, @@ -49,7 +227,7 @@ pub struct GraphQLTiming { pub vesting_increment: Option, } -#[derive(GraphQLInputObject, Debug)] +#[derive(GraphQLInputObject, Debug, Clone)] pub struct InputGraphQLTiming { // pub is_timed: bool, pub initial_minimum_balance: String, @@ -70,7 +248,8 @@ impl From for GraphQLTiming { } } } -#[derive(GraphQLObject, Debug)] + +#[derive(GraphQLObject, Debug, Clone)] pub struct GraphQLPermissions { pub edit_state: String, pub access: String, @@ -87,15 +266,20 @@ pub struct GraphQLPermissions { pub set_timing: String, } -#[derive(GraphQLObject, Debug)] +#[derive(GraphQLObject, Debug, Clone)] pub struct GraphQLSetVerificationKey { pub auth: String, pub txn_version: String, } -#[derive(GraphQLObject, Debug)] +#[derive(GraphQLObject, Debug, Clone)] pub struct GraphQLBalance { pub total: String, + pub block_height: String, + pub state_hash: Option, + pub liquid: Option, + pub locked: Option, + pub unknown: String, } // #[derive(GraphQLObject, Debug)] @@ -109,7 +293,7 @@ pub struct GraphQLBalance { // pub zkapp_uri: String, // } -#[derive(GraphQLObject, Debug)] +#[derive(GraphQLObject, Debug, Clone)] pub struct GraphQLVerificationKey { // pub max_proofs_verified: String, // pub actual_wrap_domain_size: String, @@ -174,15 +358,6 @@ impl From for GraphQLTiming { } } -// TODO(adonagy) -impl From for GraphQLBalance { - fn from(value: ledger::scan_state::currency::Balance) -> Self { - Self { - total: value.as_u64().to_string(), - } - } -} - impl TryFrom for GraphQLAccount { type Error = ConversionError; @@ -204,16 +379,15 @@ impl TryFrom for GraphQLAccount { .transpose()?; // Transpose Option> to Result> Ok(Self { + inner: value.clone(), public_key: value.public_key.into_address(), token_id: TokenIdKeyHash::from(value.token_id.clone()).to_string(), token: TokenIdKeyHash::from(value.token_id).to_string(), token_symbol: TokenSymbol::from(&value.token_symbol).to_string(), - balance: GraphQLBalance::from(value.balance), + // balance: GraphQLBalance::from(value.balance), nonce: value.nonce.as_u32().to_string(), receipt_chain_hash: ReceiptChainHash::from(value.receipt_chain_hash).to_string(), - delegate_account: value.delegate.map(|d| GraphQLDelegateAccount { - public_key: d.into_address(), - }), + delegate_key: value.delegate, voting_for: value.voting_for.to_base58check_graphql(), timing: GraphQLTiming::from(value.timing), permissions: GraphQLPermissions::from(value.permissions), diff --git a/node/native/src/graphql/block.rs b/node/native/src/graphql/block.rs index 5a28a06b51..e253023a0f 100644 --- a/node/native/src/graphql/block.rs +++ b/node/native/src/graphql/block.rs @@ -1,24 +1,132 @@ -use juniper::GraphQLObject; +use crate::graphql::{ + account::GraphQLAccount, + zkapp::{GraphQLFailureReason, GraphQLFeePayer, GraphQLZkappCommand}, +}; +use juniper::{graphql_object, FieldResult, GraphQLEnum, GraphQLObject}; +use ledger::AccountId; +use mina_p2p_messages::v2::{ + MinaBaseSignedCommandPayloadBodyStableV2, MinaBaseSignedCommandStableV2, + MinaBaseStakeDelegationStableV2, TransactionSnarkWorkTStableV2, +}; +use mina_signer::CompressedPubKey; +use node::account::AccountPublicKey; use openmina_core::block::AppliedBlock; -use crate::graphql::zkapp::{GraphQLFailureReason, GraphQLFeePayer, GraphQLZkappCommand}; +use super::{zkapp::GraphQLZkapp, Context, ConversionError}; -use super::{zkapp::GraphQLZkapp, ConversionError}; +#[derive(Debug)] +/// Location [src/lib/mina_graphql/types.ml:2095](https://github.com/MinaProtocol/mina/blob/develop/src/lib/mina_graphql/types.ml#L2095-L2151) +pub(crate) struct GraphQLBlock { + creator: String, + creator_account_key: CompressedPubKey, + winner_account_key: CompressedPubKey, + state_hash: String, + /// Experimental: Bigint field-element representation of stateHash + state_hash_field: String, + protocol_state: GraphQLProtocolState, + /// Public key of account that produced this block + /// use creatorAccount field instead + transactions: GraphQLTransactions, + /// Base58Check-encoded hash of the state after this block + /// Count of user command transactions in the block + command_transaction_count: i32, + snark_jobs: Vec, +} -#[derive(GraphQLObject, Debug)] +#[graphql_object(context = Context)] #[graphql(description = "A Mina block")] -pub struct GraphQLBestChainBlock { - pub protocol_state: GraphQLProtocolState, - pub state_hash: String, - pub transactions: GraphQLTransactions, +impl GraphQLBlock { + fn creator(&self) -> &str { + &self.creator + } + + async fn creator_account(&self, context: &Context) -> FieldResult> { + let account_id = AccountId::new_with_default_token(self.creator_account_key.clone()); + if let Some(account) = context.load_account(account_id).await { + Ok(Box::new(account)) + } else { + Err(juniper::FieldError::new( + "Failed to load creator account".to_string(), + juniper::Value::null(), + )) + } + } + async fn winner_account(&self, context: &Context) -> FieldResult> { + let account_id = AccountId::new_with_default_token(self.winner_account_key.clone()); + if let Some(account) = context.load_account(account_id).await { + Ok(Box::new(account)) + } else { + Err(juniper::FieldError::new( + "Failed to load winner account".to_string(), + juniper::Value::null(), + )) + } + } + + async fn state_hash(&self) -> &str { + &self.state_hash + } + + /// Experimental: Bigint field-element representation of stateHash + async fn state_hash_field(&self) -> &str { + &self.state_hash_field + } + + async fn protocol_state(&self) -> &GraphQLProtocolState { + &self.protocol_state + } + + async fn transactions(&self) -> &GraphQLTransactions { + &self.transactions + } + + async fn command_transaction_count(&self) -> i32 { + self.command_transaction_count + } + + async fn snark_jobs(&self) -> &Vec { + &self.snark_jobs + } +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLSnarkJob { + pub fee: String, + pub prover: String, } #[derive(GraphQLObject, Debug)] pub struct GraphQLTransactions { pub zkapp_commands: Vec, + pub user_commands: Vec, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLUserCommands { + pub amount: Option, + pub failure_reason: Option, + pub fee: String, + pub fee_token: String, + pub from: String, + pub hash: String, + pub id: String, + pub is_delegation: bool, + pub kind: GraphQLUserCommandsKind, + pub memo: String, + pub nonce: i32, + pub to: String, + pub token: String, + pub valid_until: String, +} + +#[derive(Clone, Copy, Debug, GraphQLEnum)] +#[allow(non_camel_case_types)] +pub enum GraphQLUserCommandsKind { + PAYMENT, + STAKE_DELEGATION, } -impl TryFrom for GraphQLBestChainBlock { +impl TryFrom for GraphQLBlock { type Error = ConversionError; fn try_from(value: AppliedBlock) -> Result { let block = value.block; @@ -58,10 +166,28 @@ impl TryFrom for GraphQLBestChainBlock { .into(), }; + let command_transaction_count = block.body().diff().0.commands.len() as i32; + + let snark_jobs = block + .body() + .completed_works_iter() + .map(GraphQLSnarkJob::from) + .collect(); + Ok(Self { + creator_account_key: AccountPublicKey::from(block.producer().clone()) + .try_into() + .map_err(|_| ConversionError::Custom("Invalid public key".to_string()))?, + winner_account_key: AccountPublicKey::from(block.block_stake_winner().clone()) + .try_into() + .map_err(|_| ConversionError::Custom("Invalid public key".to_string()))?, protocol_state, state_hash: block.hash.to_string(), + state_hash_field: block.hash.to_decimal(), + creator: block.producer().to_string(), transactions: block.body().diff().clone().try_into()?, + command_transaction_count, + snark_jobs, }) } } @@ -123,25 +249,25 @@ impl TryFrom for GraphQ .1 .map_or_else(Vec::new, |v| v.commands.into_iter().collect::>()); - let zkapp_commands = value + let commands = value .0 .commands .into_iter() .chain(also_zkapp_commands) - .rev() - .map(|cmd| { - // std::fs::create_dir_all("zkapps").unwrap(); - // let zkapp_path = format!("zkapps/{}", zkapp.hash().unwrap()); - // let path = PathBuf::from(zkapp_path.clone()); - // if !path.exists() { - // let mut buff = Vec::new(); - // zkapp.binprot_write(&mut buff).unwrap(); - // std::fs::write(zkapp_path, buff).unwrap(); - // } - if let MinaBaseUserCommandStableV2::ZkappCommand(zkapp) = cmd.data { + .rev(); + + let mut zkapp_commands = Vec::new(); + let mut user_commands = Vec::new(); + + for command in commands { + match command.data { + MinaBaseUserCommandStableV2::SignedCommand(user_command) => { + user_commands.push(GraphQLUserCommands::try_from(user_command)?); + } + MinaBaseUserCommandStableV2::ZkappCommand(zkapp) => { let failure_reason = if let MinaBaseTransactionStatusStableV2::Failed(failure_collection) = - cmd.status + command.status { let res = failure_collection .0 @@ -158,17 +284,20 @@ impl TryFrom for GraphQ }) .rev() .collect(); + Some(res) } else { None }; + let account_updates = zkapp .account_updates .clone() .into_iter() .map(|v| v.elt.account_update.try_into()) .collect::, _>>()?; - Ok(Some(GraphQLZkapp { + + zkapp_commands.push(GraphQLZkapp { hash: zkapp.hash()?.to_string(), failure_reason, id: zkapp.to_base64()?, @@ -177,16 +306,15 @@ impl TryFrom for GraphQ account_updates, fee_payer: GraphQLFeePayer::from(zkapp.fee_payer), }, - })) - } else { - Ok(None) + }); } - }) - .collect::, Self::Error>>()? - .into_iter() - .flatten() - .collect::>(); - Ok(Self { zkapp_commands }) + } + } + + Ok(Self { + zkapp_commands, + user_commands, + }) } } @@ -258,3 +386,62 @@ impl From for GraphQLSnarkJob { + fn from(value: &TransactionSnarkWorkTStableV2) -> Self { + Self { + fee: value.fee.to_string(), + prover: value.prover.to_string(), + } + } +} + +impl TryFrom for GraphQLUserCommands { + type Error = ConversionError; + + fn try_from(user_command: MinaBaseSignedCommandStableV2) -> Result { + let is_delegation = matches!( + user_command.payload.body, + MinaBaseSignedCommandPayloadBodyStableV2::StakeDelegation(_) + ); + let hash = user_command.hash()?.to_string(); + let id = user_command.to_base64()?; + + let fee = user_command.payload.common.fee.to_string(); + let memo = user_command.payload.common.memo.to_base58check(); + let nonce = user_command.payload.common.nonce.as_u32() as i32; + let valid_until = user_command.payload.common.valid_until.as_u32().to_string(); + + let (to, amount, kind) = match user_command.payload.body { + MinaBaseSignedCommandPayloadBodyStableV2::Payment(payment) => ( + payment.receiver_pk.to_string(), + Some(payment.amount.to_string()), + GraphQLUserCommandsKind::PAYMENT, + ), + MinaBaseSignedCommandPayloadBodyStableV2::StakeDelegation( + MinaBaseStakeDelegationStableV2::SetDelegate { new_delegate }, + ) => ( + new_delegate.to_string(), + None, + GraphQLUserCommandsKind::STAKE_DELEGATION, + ), + }; + + Ok(GraphQLUserCommands { + hash, + from: user_command.signer.to_string(), + to, + is_delegation, + amount, + failure_reason: Default::default(), + fee, + fee_token: Default::default(), + id, + kind, + memo, + nonce, + token: Default::default(), + valid_until, + }) + } +} diff --git a/node/native/src/graphql/constants.rs b/node/native/src/graphql/constants.rs index 1551736b21..2067b00356 100644 --- a/node/native/src/graphql/constants.rs +++ b/node/native/src/graphql/constants.rs @@ -1,7 +1,292 @@ use juniper::GraphQLObject; -use openmina_core::{consensus::ConsensusConstants, constants::ConstraintConstants}; +use node::{ + rpc::{ + ConsensusTimeQuery, PeerConnectionStatus, RpcConsensusTimeGetResponse, + RpcNodeStatusNetworkInfo, RpcPeerInfo, RpcRequest, + }, + BuildEnv, +}; +use openmina_core::{ + consensus::{ConsensusConstants, ConsensusTime}, + constants::ConstraintConstants, +}; -use super::ConversionError; +use super::{Context, ConversionError, Error}; + +#[derive(Clone, Debug, Copy)] +pub(crate) struct GraphQLDaemonStatus; + +#[juniper::graphql_object(context = Context)] +impl GraphQLDaemonStatus { + async fn consensus_configuration( + &self, + context: &Context, + ) -> juniper::FieldResult { + let consensus_constants: ConsensusConstants = context + .rpc_sender + .oneshot_request(RpcRequest::ConsensusConstantsGet) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + Ok(GraphQLConsensusConfiguration::from(consensus_constants)) + } + + async fn peers(&self, context: &Context) -> juniper::FieldResult> { + let peers: Vec = context + .rpc_sender + .oneshot_request(RpcRequest::PeersGet) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + let connected_peers = peers + .iter() + .filter(|peer| matches!(peer.connection_status, PeerConnectionStatus::Connected)) + .map(GraphQLRpcPeerInfo::from) + .collect(); + + Ok(connected_peers) + } + + async fn consensus_time_now( + &self, + context: &Context, + ) -> juniper::FieldResult { + let consensus_time: RpcConsensusTimeGetResponse = context + .rpc_sender + .oneshot_request(RpcRequest::ConsensusTimeGet(ConsensusTimeQuery::Now)) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + match consensus_time { + Some(consensus_time) => Ok(GraphQLConsensusTime::from(consensus_time)), + None => Err(juniper::FieldError::new( + "No consensus time found", + juniper::Value::Null, + )), + } + } + + async fn consensus_time_best_tip( + &self, + context: &Context, + ) -> juniper::FieldResult { + let consensus_time_res: RpcConsensusTimeGetResponse = context + .rpc_sender + .oneshot_request(RpcRequest::ConsensusTimeGet(ConsensusTimeQuery::BestTip)) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + match consensus_time_res { + Some(consensus_time) => Ok(GraphQLConsensusTime::from(consensus_time)), + None => Err(juniper::FieldError::new( + "No consensus time found", + juniper::Value::Null, + )), + } + } + + async fn consensus_mechanism(&self, _context: &Context) -> juniper::FieldResult { + Ok("proof_of_stake".to_string()) + } + + async fn blockchain_length(&self, context: &Context) -> juniper::FieldResult> { + let status = context.get_or_fetch_status().await; + + Ok(status.and_then(|status| { + status + .transition_frontier + .best_tip + .map(|block_summary| block_summary.height as i32) + })) + } + + async fn chain_id(&self, context: &Context) -> juniper::FieldResult> { + let status = context.get_or_fetch_status().await; + + Ok(status.and_then(|status| status.chain_id)) + } + + async fn commit_id(&self, _context: &Context) -> juniper::FieldResult { + Ok(BuildEnv::get().git.commit_hash.to_string()) + } + + async fn global_slot_since_genesis_best_tip( + &self, + context: &Context, + ) -> juniper::FieldResult> { + let best_tip = context.get_or_fetch_best_tip().await; + Ok(best_tip.and_then(|best_tip| { + println!("best_tip OK"); + best_tip.global_slot_since_genesis().try_into().ok() + })) + } + + async fn ledger_merkle_root(&self, context: &Context) -> juniper::FieldResult> { + let best_tip = context.get_or_fetch_best_tip().await; + + Ok(best_tip.map(|best_tip| best_tip.merkle_root_hash().to_string())) + // match best_tip { + // Some(best_tip) => { + // println!("best_tip_ledger_merkle_root {:?}", best_tip.merkle_root_hash()); + // let ledger_status = context + // .get_or_fetch_ledger_status(best_tip.merkle_root_hash()) + // .await; + // Ok(ledger_status + // .map(|ledger_status| ledger_status.best_tip_staged_ledger_hash.to_string())) + // } + // None => Ok(None), + // } + } + + async fn state_hash(&self, context: &Context) -> juniper::FieldResult> { + let best_tip = context.get_or_fetch_best_tip().await; + Ok(best_tip.map(|best_tip| best_tip.hash().to_string())) + } + + async fn num_accounts(&self, context: &Context) -> juniper::FieldResult> { + let best_tip = context.get_or_fetch_best_tip().await; + + match best_tip { + Some(best_tip) => { + let ledger_status = context + .get_or_fetch_ledger_status(best_tip.merkle_root_hash()) + .await; + Ok(ledger_status.map(|ledger_status| ledger_status.num_accounts as i32)) + } + None => Ok(None), + } + } + + async fn highest_unvalidated_block_length_received( + &self, + context: &Context, + ) -> juniper::FieldResult> { + let status = context.get_or_fetch_status().await; + Ok(status.and_then(|status| { + status + .transition_frontier + .best_tip + .map(|best_tip| best_tip.height as i32) + .or_else(|| { + status + .transition_frontier + .sync + .target + .map(|target| target.height as i32) + }) + })) + } + + async fn highest_block_length_received( + &self, + context: &Context, + ) -> juniper::FieldResult> { + let status = context.get_or_fetch_status().await; + Ok(status.and_then(|status| { + status + .transition_frontier + .best_tip + .map(|best_tip| best_tip.height as i32) + .or_else(|| { + status + .transition_frontier + .sync + .target + .map(|target| target.height as i32) + }) + })) + } + + async fn addrs_and_ports( + &self, + context: &Context, + ) -> juniper::FieldResult { + let status = context.get_or_fetch_status().await; + + match status { + Some(status) => Ok(GraphQLAddrsAndPorts::from(&status.network_info)), + None => Ok(Default::default()), + } + } + + async fn block_production_keys(&self, context: &Context) -> juniper::FieldResult> { + let status = context.get_or_fetch_status().await; + Ok(status.map_or(vec![], |status| { + status + .block_producer + .map_or(vec![], |key| vec![key.to_string()]) + })) + } + + async fn coinbase_receiver(&self, context: &Context) -> juniper::FieldResult> { + let status = context.get_or_fetch_status().await; + Ok(status.and_then(|status| status.coinbase_receiver.map(|key| key.to_string()))) + } +} + +#[derive(GraphQLObject, Clone, Debug)] +pub struct GraphQLAddrsAndPorts { + pub bind_ip: String, + pub external_ip: Option, + pub client_port: Option, + pub libp2p_port: Option, +} + +impl Default for GraphQLAddrsAndPorts { + fn default() -> Self { + Self { + bind_ip: "0.0.0.0".to_string(), + external_ip: None, + client_port: None, + libp2p_port: None, + } + } +} + +impl From<&RpcNodeStatusNetworkInfo> for GraphQLAddrsAndPorts { + fn from(network_info: &RpcNodeStatusNetworkInfo) -> Self { + Self { + bind_ip: network_info.bind_ip.clone(), + external_ip: network_info.external_ip.clone(), + client_port: network_info.client_port.map(|port| port.into()), + libp2p_port: network_info.libp2p_port.map(|port| port.into()), + } + } +} + +#[derive(GraphQLObject, Clone, Debug)] +pub struct GraphQLRpcPeerInfo { + pub peer_id: String, + pub best_tip: Option, + pub best_tip_height: Option, + pub best_tip_global_slot: Option, + pub best_tip_timestamp: Option, + pub connection_status: String, + pub connecting_details: Option, + pub address: Option, + pub incoming: bool, + pub is_libp2p: bool, + pub time: String, +} + +impl From<&RpcPeerInfo> for GraphQLRpcPeerInfo { + fn from(peer: &RpcPeerInfo) -> Self { + Self { + peer_id: peer.peer_id.to_string(), + best_tip: peer.best_tip.as_ref().map(|hash| hash.to_string()), + best_tip_height: peer.best_tip_height.map(|height| height.to_string()), + best_tip_global_slot: peer.best_tip_global_slot.map(|slot| slot.to_string()), + best_tip_timestamp: peer + .best_tip_timestamp + .map(|timestamp| timestamp.to_string()), + connection_status: peer.connection_status.to_string(), + connecting_details: peer.connecting_details.clone(), + address: peer.address.clone(), + incoming: peer.incoming, + is_libp2p: peer.is_libp2p, + time: peer.time.to_string(), + } + } +} #[derive(GraphQLObject, Debug)] pub struct GraphQLGenesisConstants { @@ -25,11 +310,6 @@ impl GraphQLGenesisConstants { } } -#[derive(GraphQLObject, Debug)] -pub struct GraphQLDaemonStatus { - pub consensus_configuration: GraphQLConsensusConfiguration, -} - #[derive(GraphQLObject, Debug)] pub struct GraphQLConsensusConfiguration { pub epoch_duration: i32, @@ -48,3 +328,30 @@ impl From for GraphQLConsensusConfiguration { } } } + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLConsensusTime { + pub start_time: String, + pub end_time: String, + pub epoch: String, + pub global_slot: String, + pub slot: String, +} + +impl From for GraphQLConsensusTime { + fn from(consensus_time: ConsensusTime) -> Self { + let start_time: u64 = consensus_time.start_time.into(); + let end_time: u64 = consensus_time.end_time.into(); + + let start_time_ms = start_time.checked_div(1_000_000).expect("division by zero"); + let end_time_ms = end_time.checked_div(1_000_000).expect("division by zero"); + + GraphQLConsensusTime { + start_time: start_time_ms.to_string(), + end_time: end_time_ms.to_string(), + epoch: consensus_time.epoch.to_string(), + global_slot: consensus_time.global_slot.to_string(), + slot: consensus_time.slot.to_string(), + } + } +} diff --git a/node/native/src/graphql/mod.rs b/node/native/src/graphql/mod.rs index 067c16aa7b..819bf2d5d6 100644 --- a/node/native/src/graphql/mod.rs +++ b/node/native/src/graphql/mod.rs @@ -1,31 +1,51 @@ -use std::str::FromStr; - -use juniper::{graphql_value, FieldError}; -use juniper::{EmptySubscription, GraphQLEnum, RootNode}; -use ledger::Account; -use mina_p2p_messages::v2::MinaBaseSignedCommandStableV2; -use mina_p2p_messages::v2::MinaBaseUserCommandStableV2; -use mina_p2p_messages::v2::MinaBaseZkappCommandTStableV1WireStableV1; -use mina_p2p_messages::v2::TokenIdKeyHash; -use node::rpc::RpcTransactionInjectResponse; -use node::rpc::RpcTransactionInjectedCommand; -use node::rpc::RpcTransactionStatusGetResponse; +use account::{create_account_loader, AccountLoader, GraphQLAccount}; +use block::{GraphQLBlock, GraphQLSnarkJob, GraphQLUserCommands}; +use juniper::{graphql_value, EmptySubscription, FieldError, GraphQLEnum, RootNode}; +use ledger::{Account, AccountId}; +use mina_p2p_messages::v2::{ + conv, LedgerHash, MinaBaseSignedCommandStableV2, MinaBaseUserCommandStableV2, + MinaBaseZkappCommandTStableV1WireStableV1, TokenIdKeyHash, TransactionHash, +}; +use mina_signer::CompressedPubKey; +use node::rpc::RpcSnarkerConfig; use node::{ account::AccountPublicKey, - rpc::{AccountQuery, RpcRequest, RpcSyncStatsGetResponse, SyncStatsQuery}, + ledger::read::LedgerStatus, + rpc::{ + AccountQuery, GetBlockQuery, PooledCommandsQuery, RpcBestChainResponse, + RpcGenesisBlockResponse, RpcGetBlockResponse, RpcLedgerAccountDelegatorsGetResponse, + RpcLedgerStatusGetResponse, RpcNodeStatus, RpcPooledUserCommandsResponse, + RpcPooledZkappCommandsResponse, RpcRequest, RpcSnarkPoolCompletedJobsResponse, + RpcSnarkPoolPendingJobsGetResponse, RpcStatusGetResponse, RpcSyncStatsGetResponse, + RpcTransactionInjectResponse, RpcTransactionStatusGetResponse, SyncStatsQuery, + }, stats::sync::SyncKind, + BuildEnv, +}; +use o1_utils::field_helpers::FieldHelpersError; +use openmina_core::{ + block::AppliedBlock, consensus::ConsensusConstants, constants::constraint_constants, + NetworkConfig, }; -use openmina_core::block::AppliedBlock; -use openmina_core::consensus::ConsensusConstants; -use openmina_core::constants::constraint_constants; use openmina_node_common::rpc::RpcSender; +use snark::{GraphQLPendingSnarkWork, GraphQLSnarkWorker}; +use std::str::FromStr; +use tokio::sync::OnceCell; +use transaction::GraphQLTransactionStatus; use warp::{Filter, Rejection, Reply}; +use zkapp::GraphQLZkapp; pub mod account; pub mod block; pub mod constants; +pub mod snark; +pub mod transaction; +pub mod user_command; pub mod zkapp; +/// Base58 encoded public key +pub type GraphQLPublicKey = String; + #[derive(Debug, thiserror::Error)] pub enum Error { #[error("Conversion error: {0}")] @@ -48,10 +68,14 @@ pub enum ConversionError { SerdeJson(#[from] serde_json::Error), #[error("Base58Check: {0}")] Base58Check(#[from] mina_p2p_messages::b58::FromBase58CheckError), + #[error("Base58 error: {0}")] + Base58(#[from] bs58::decode::Error), #[error(transparent)] InvalidDecimalNumber(#[from] mina_p2p_messages::bigint::InvalidDecimalNumber), #[error("Invalid bigint")] InvalidBigInt, + #[error("Invalid hex")] + InvalidHex, #[error(transparent)] ParseInt(#[from] std::num::ParseIntError), #[error(transparent)] @@ -64,12 +88,105 @@ pub enum ConversionError { InvalidLength, #[error("Custom: {0}")] Custom(String), + #[error(transparent)] + FieldHelpers(#[from] FieldHelpersError), + #[error("Failed to convert integer to i32")] + Integer, +} + +impl From for Error { + fn from(value: ConversionError) -> Self { + Error::Conversion(value) + } } -struct Context(RpcSender); +/// Context for the GraphQL API +/// +/// This is used to share state between the GraphQL queries and mutations. +/// +/// The caching used here is only valid for the lifetime of the context +/// i.e. for one request which is the goal as we can have multiple sources for one request. +/// This optimizes the number of request to the state machine +pub(crate) struct Context { + rpc_sender: RpcSender, + account_loader: AccountLoader, + // Caches + statemachine_status_cache: OnceCell>, + best_tip_cache: OnceCell>, + ledger_status_cache: OnceCell>, +} impl juniper::Context for Context {} +impl Context { + pub fn new(rpc_sender: RpcSender) -> Self { + Self { + rpc_sender: rpc_sender.clone(), + statemachine_status_cache: OnceCell::new(), + best_tip_cache: OnceCell::new(), + ledger_status_cache: OnceCell::new(), + account_loader: create_account_loader(rpc_sender.clone()), + } + } + + pub(crate) async fn get_or_fetch_status(&self) -> RpcStatusGetResponse { + self.statemachine_status_cache + .get_or_init(|| async { + self.rpc_sender + .oneshot_request(RpcRequest::StatusGet) + .await + .flatten() + }) + .await + .clone() + } + + pub(crate) async fn get_or_fetch_best_tip(&self) -> Option { + self.best_tip_cache + .get_or_init(|| async { + self.rpc_sender + .oneshot_request(RpcRequest::BestChain(1)) + .await + .and_then(|blocks: RpcBestChainResponse| blocks.first().cloned()) + }) + .await + .clone() + } + + pub(crate) async fn get_or_fetch_ledger_status( + &self, + ledger_hash: &LedgerHash, + ) -> RpcLedgerStatusGetResponse { + self.ledger_status_cache + .get_or_init(|| async { + self.rpc_sender + .oneshot_request(RpcRequest::LedgerStatusGet(ledger_hash.clone())) + .await + .flatten() + }) + .await + .clone() + } + + pub(crate) async fn load_account(&self, account_id: AccountId) -> Option { + self.account_loader.try_load(account_id).await.ok()?.ok() + } + + pub async fn fetch_delegators( + &self, + ledger_hash: LedgerHash, + account_id: AccountId, + ) -> RpcLedgerAccountDelegatorsGetResponse { + self.rpc_sender + .oneshot_request(RpcRequest::LedgerAccountDelegatorsGet( + ledger_hash.clone(), + account_id.clone(), + )) + .await + .flatten() + } +} + #[derive(Clone, Copy, Debug, GraphQLEnum)] #[allow(clippy::upper_case_acronyms)] enum SyncStatus { @@ -146,16 +263,20 @@ struct Query; impl Query { async fn account( public_key: String, - token: String, + token: Option, context: &Context, ) -> juniper::FieldResult { - let token_id = TokenIdKeyHash::from_str(&token)?; let public_key = AccountPublicKey::from_str(&public_key)?; + let req = match token { + None => AccountQuery::SinglePublicKey(public_key), + Some(token) => { + let token_id = TokenIdKeyHash::from_str(&token)?; + AccountQuery::PubKeyWithTokenId(public_key, token_id) + } + }; let accounts: Vec = context - .0 - .oneshot_request(RpcRequest::LedgerAccountsGet( - AccountQuery::PubKeyWithTokenId(public_key, token_id), - )) + .rpc_sender + .oneshot_request(RpcRequest::LedgerAccountsGet(req)) .await .ok_or(Error::StateMachineEmptyResponse)?; @@ -168,7 +289,7 @@ impl Query { async fn sync_status(context: &Context) -> juniper::FieldResult { let state: RpcSyncStatsGetResponse = context - .0 + .rpc_sender .oneshot_request(RpcRequest::SyncStatsGet(SyncStatsQuery { limit: Some(1) })) .await .ok_or(Error::StateMachineEmptyResponse)?; @@ -186,12 +307,13 @@ impl Query { Ok(SyncStatus::LISTENING) } } + async fn best_chain( max_length: i32, context: &Context, - ) -> juniper::FieldResult> { + ) -> juniper::FieldResult> { let best_chain: Vec = context - .0 + .rpc_sender .oneshot_request(RpcRequest::BestChain(max_length as u32)) .await .ok_or(Error::StateMachineEmptyResponse)?; @@ -203,23 +325,16 @@ impl Query { } async fn daemon_status( - context: &Context, + _context: &Context, ) -> juniper::FieldResult { - let consensus_constants: ConsensusConstants = context - .0 - .oneshot_request(RpcRequest::ConsensusConstantsGet) - .await - .ok_or(Error::StateMachineEmptyResponse)?; - Ok(constants::GraphQLDaemonStatus { - consensus_configuration: consensus_constants.into(), - }) + Ok(constants::GraphQLDaemonStatus) } async fn genesis_constants( context: &Context, ) -> juniper::FieldResult { let consensus_constants: ConsensusConstants = context - .0 + .rpc_sender .oneshot_request(RpcRequest::ConsensusConstantsGet) .await .ok_or(Error::StateMachineEmptyResponse)?; @@ -235,7 +350,7 @@ impl Query { payment: Option, zkapp_transaction: Option, context: &Context, - ) -> juniper::FieldResult { + ) -> juniper::FieldResult { if payment.is_some() && zkapp_transaction.is_some() { return Err(Error::Custom( "Cannot provide both payment and zkapp transaction".to_string(), @@ -258,11 +373,250 @@ impl Query { .into()); }; let res: RpcTransactionStatusGetResponse = context - .0 + .rpc_sender .oneshot_request(RpcRequest::TransactionStatusGet(tx)) .await .ok_or(Error::StateMachineEmptyResponse)?; - Ok(res.to_string()) + + Ok(GraphQLTransactionStatus::from(res)) + } + + /// Retrieve a block with the given state hash or height, if contained in the transition frontier + async fn block( + height: Option, + state_hash: Option, + context: &Context, + ) -> juniper::FieldResult { + let query = match (height, state_hash) { + (Some(height), None) => GetBlockQuery::Height(height.try_into().unwrap_or(u32::MAX)), + (None, Some(state_hash)) => GetBlockQuery::Hash(state_hash.parse()?), + _ => { + return Err(Error::Custom( + "Must provide exactly one of state hash, height".to_owned(), + ) + .into()); + } + }; + + let res: Option = context + .rpc_sender + .oneshot_request(RpcRequest::GetBlock(query.clone())) + .await; + + match res { + None => Err(Error::Custom("response channel dropped".to_owned()).into()), + Some(None) => match query { + GetBlockQuery::Hash(hash) => Err(Error::Custom(format!( + "Could not find block with hash: `{}` in transition frontier", + hash + )) + .into()), + GetBlockQuery::Height(height) => Err(Error::Custom(format!( + "Could not find block with height: `{}` in transition frontier", + height + )) + .into()), + }, + Some(Some(block)) => Ok(GraphQLBlock::try_from(block)?), + } + } + + /// Retrieve all the scheduled user commands for a specified sender that + /// the current daemon sees in its transaction pool. All scheduled + /// commands are queried if no sender is specified + /// + /// Arguments: + /// - `public_key`: base58 encoded [`AccountPublicKey`] + /// - `hashes`: list of base58 encoded [`TransactionHash`]es + /// - `ids`: list of base64 encoded [`MinaBaseZkappCommandTStableV1WireStableV1`] + async fn pooled_user_commands( + &self, + public_key: Option, + hashes: Option>, + ids: Option>, + context: &Context, + ) -> juniper::FieldResult> { + let query = parse_pooled_commands_query( + public_key, + hashes, + ids, + MinaBaseSignedCommandStableV2::from_base64, + )?; + + let res: RpcPooledUserCommandsResponse = context + .rpc_sender + .oneshot_request(RpcRequest::PooledUserCommands(query)) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + Ok(res + .into_iter() + .map(GraphQLUserCommands::try_from) + .collect::, _>>()?) + } + + /// Retrieve all the scheduled zkApp commands for a specified sender that + /// the current daemon sees in its transaction pool. All scheduled + /// commands are queried if no sender is specified + /// + /// Arguments: + /// - `public_key`: base58 encoded [`AccountPublicKey`] + /// - `hashes`: list of base58 encoded [`TransactionHash`]es + /// - `ids`: list of base64 encoded [`MinaBaseZkappCommandTStableV1WireStableV1`] + async fn pooled_zkapp_commands( + public_key: Option, + hashes: Option>, + ids: Option>, + context: &Context, + ) -> juniper::FieldResult> { + let query = parse_pooled_commands_query( + public_key, + hashes, + ids, + MinaBaseZkappCommandTStableV1WireStableV1::from_base64, + )?; + + let res: RpcPooledZkappCommandsResponse = context + .rpc_sender + .oneshot_request(RpcRequest::PooledZkappCommands(query)) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + Ok(res + .into_iter() + .map(GraphQLZkapp::try_from) + .collect::, _>>()?) + } + + async fn genesis_block(context: &Context) -> juniper::FieldResult { + let block = context + .rpc_sender + .oneshot_request::(RpcRequest::GenesisBlockGet) + .await + .ok_or(Error::StateMachineEmptyResponse)? + .ok_or(Error::StateMachineEmptyResponse)?; + + Ok(GraphQLBlock::try_from(AppliedBlock { + block, + just_emitted_a_proof: false, + })?) + } + + async fn snark_pool(context: &Context) -> juniper::FieldResult> { + let jobs: RpcSnarkPoolCompletedJobsResponse = context + .rpc_sender + .oneshot_request(RpcRequest::SnarkPoolCompletedJobsGet) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + Ok(jobs.iter().map(GraphQLSnarkJob::from).collect()) + } + + async fn pending_snark_work( + context: &Context, + ) -> juniper::FieldResult> { + let jobs: RpcSnarkPoolPendingJobsGetResponse = context + .rpc_sender + .oneshot_request(RpcRequest::SnarkPoolPendingJobsGet) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + Ok(jobs + .into_iter() + .map(GraphQLPendingSnarkWork::try_from) + .collect::, _>>()?) + } + + /// The chain-agnostic identifier of the network + #[graphql(name = "networkID")] + async fn network_id(_context: &Context) -> juniper::FieldResult { + let res = format!("mina:{}", NetworkConfig::global().name); + Ok(res) + } + + /// The version of the node (git commit hash) + async fn version(_context: &Context) -> juniper::FieldResult { + let res = BuildEnv::get().git.commit_hash; + Ok(res) + } + + async fn current_snark_worker( + &self, + context: &Context, + ) -> juniper::FieldResult> { + let config: Option = context + .rpc_sender + .oneshot_request(RpcRequest::SnarkerConfig) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + let Some(config) = config else { + return Ok(None); + }; + + let account = context + .load_account(AccountId { + public_key: CompressedPubKey::try_from(&config.public_key)?, + token_id: TokenIdKeyHash::default().into(), + }) + .await; + + Ok(Some(GraphQLSnarkWorker { + key: config.public_key.to_string(), + account, + fee: config.fee.to_string(), + })) + } +} + +async fn inject_tx( + cmd: MinaBaseUserCommandStableV2, + context: &Context, +) -> juniper::FieldResult +where + R: TryFrom, +{ + let res: RpcTransactionInjectResponse = context + .rpc_sender + .oneshot_request(RpcRequest::TransactionInject(vec![cmd])) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + match res { + RpcTransactionInjectResponse::Success(res) => { + let cmd: MinaBaseUserCommandStableV2 = match res.first().cloned() { + Some(cmd) => cmd.into(), + _ => unreachable!(), + }; + cmd.try_into().map_err(|_| { + FieldError::new( + "Failed to convert transaction to the required type".to_string(), + graphql_value!(null), + ) + }) + } + RpcTransactionInjectResponse::Rejected(rejected) => { + let error_list = rejected + .into_iter() + .map(|(_, err)| graphql_value!({ "message": err.to_string() })) + .collect::>(); + + Err(FieldError::new( + "Transaction rejected", + graphql_value!(juniper::Value::List(error_list)), + )) + } + RpcTransactionInjectResponse::Failure(failure) => { + let error_list = failure + .into_iter() + .map(|err| graphql_value!({ "message": err.to_string() })) + .collect::>(); + + Err(FieldError::new( + "Transaction failed", + graphql_value!(juniper::Value::List(error_list)), + )) + } } } @@ -275,50 +629,71 @@ impl Mutation { input: zkapp::SendZkappInput, context: &Context, ) -> juniper::FieldResult { - let res: RpcTransactionInjectResponse = context - .0 - .oneshot_request(RpcRequest::TransactionInject(vec![input.try_into()?])) + inject_tx(input.try_into()?, context).await + } + + async fn send_payment( + input: user_command::InputGraphQLPayment, + signature: user_command::UserCommandSignature, + context: &Context, + ) -> juniper::FieldResult { + // Grab the sender's account to get the infered nonce + let token_id = TokenIdKeyHash::default(); + let public_key = AccountPublicKey::from_str(&input.from) + .map_err(|e| Error::Conversion(ConversionError::Base58Check(e)))?; + + let accounts: Vec = context + .rpc_sender + .oneshot_request(RpcRequest::LedgerAccountsGet( + AccountQuery::PubKeyWithTokenId(public_key, token_id), + )) .await .ok_or(Error::StateMachineEmptyResponse)?; - match res { - RpcTransactionInjectResponse::Success(res) => { - let zkapp_cmd: MinaBaseUserCommandStableV2 = match res.first().cloned() { - Some(RpcTransactionInjectedCommand::Zkapp(zkapp_cmd)) => zkapp_cmd.into(), - _ => unreachable!(), - }; - Ok(zkapp_cmd.try_into()?) - } - RpcTransactionInjectResponse::Rejected(rejected) => { - let error_list = rejected - .into_iter() - .map(|(_, err)| graphql_value!({ "message": err.to_string() })) - .collect::>(); - - Err(FieldError::new( - "Transaction rejected", - graphql_value!(juniper::Value::List(error_list)), - )) - } - RpcTransactionInjectResponse::Failure(failure) => { - let error_list = failure - .into_iter() - .map(|err| graphql_value!({ "message": err.to_string() })) - .collect::>(); - - Err(FieldError::new( - "Transaction failed", - graphql_value!(juniper::Value::List(error_list)), - )) - } - } + let infered_nonce = accounts + .first() + .ok_or(Error::StateMachineEmptyResponse)? + .nonce; + + let command = input + .create_user_command(infered_nonce, signature) + .map_err(Error::Conversion)?; + + inject_tx(command, context).await + } + + async fn send_delegation( + input: user_command::InputGraphQLDelegation, + signature: user_command::UserCommandSignature, + context: &Context, + ) -> juniper::FieldResult { + // Payment commands are always for the default (MINA) token + let token_id = TokenIdKeyHash::default(); + let public_key = AccountPublicKey::from_str(&input.from)?; + + // Grab the sender's account to get the infered nonce + let accounts: Vec = context + .rpc_sender + .oneshot_request(RpcRequest::LedgerAccountsGet( + AccountQuery::PubKeyWithTokenId(public_key, token_id), + )) + .await + .ok_or(Error::StateMachineEmptyResponse)?; + + let infered_nonce = accounts + .first() + .ok_or(Error::StateMachineEmptyResponse)? + .nonce; + let command = input.create_user_command(infered_nonce, signature)?; + + inject_tx(command, context).await } } pub fn routes( rpc_sernder: RpcSender, ) -> impl Filter + Clone { - let state = warp::any().map(move || Context(rpc_sernder.clone())); + let state = warp::any().map(move || Context::new(rpc_sernder.clone())); let schema = RootNode::new(Query, Mutation, EmptySubscription::::new()); let graphql_filter = juniper_warp::make_graphql_filter(schema, state.boxed()); let graphiql_filter = juniper_warp::graphiql_filter("/graphql", None); @@ -362,3 +737,44 @@ pub fn routes( // ))) // .or(homepage) // .with(log); + +/// Helper function used by [`Query::pooled_user_commands`] and [`Query::pooled_zkapp_commands`] to parse public key, transaction hashes and command ids +fn parse_pooled_commands_query( + public_key: Option, + hashes: Option>, + ids: Option>, + id_map_fn: F, +) -> Result, ConversionError> +where + F: Fn(&str) -> Result, +{ + let public_key = match public_key { + Some(public_key) => Some(AccountPublicKey::from_str(&public_key)?), + None => None, + }; + + let hashes = match hashes { + Some(hashes) => Some( + hashes + .into_iter() + .map(|tx| TransactionHash::from_str(tx.as_str())) + .collect::, _>>()?, + ), + None => None, + }; + + let ids = match ids { + Some(ids) => Some( + ids.into_iter() + .map(|id| id_map_fn(id.as_str())) + .collect::, _>>()?, + ), + None => None, + }; + + Ok(PooledCommandsQuery { + public_key, + hashes, + ids, + }) +} diff --git a/node/native/src/graphql/snark.rs b/node/native/src/graphql/snark.rs new file mode 100644 index 0000000000..4b540d1880 --- /dev/null +++ b/node/native/src/graphql/snark.rs @@ -0,0 +1,173 @@ +use juniper::{graphql_object, GraphQLObject}; +use ledger::scan_state::scan_state::{AvailableJobMessage, ParallelScanAvailableJob}; +use mina_p2p_messages::v2::{ + MinaBaseFeeExcessStableV1, MinaStateBlockchainStateValueStableV2SignedAmount, + TransactionSnarkScanStateTransactionWithWitnessStableV2, TransactionSnarkStableV2, +}; +use node::snark_pool::JobState; + +use super::{account::GraphQLAccount, Context, ConversionError, GraphQLPublicKey}; + +#[derive(GraphQLObject, Debug)] +#[graphql(description = "A Mina block")] +pub struct GraphQLPendingSnarkWork { + /// Work bundle with one or two snark work + pub work_bundle: Vec, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLWorkDescription { + /// Base58Check-encoded hash of the source first-pass ledger + pub source_first_pass_ledger_hash: String, + /// Base58Check-encoded hash of the target first-pass ledger + pub target_first_pass_ledger_hash: String, + /// Base58Check-encoded hash of the source second-pass ledger + pub source_second_pass_ledger_hash: String, + /// Base58Check-encoded hash of the target second-pass ledger + pub target_second_pass_ledger_hash: String, + /// Total transaction fee that is not accounted for in the transition from source ledger to target ledger + pub fee_excess: GraphQLFeeExcesses, + /// Increase/Decrease in total supply + pub supply_change: GraphQLSupplyChange, + /// Increase in total supply + pub supply_increase: String, + /// Unique identifier for a snark work + pub work_id: i32, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLFeeExcesses { + pub fee_token_left: String, + pub fee_excess_left: GraphQLFeeExcess, + pub fee_token_right: String, + pub fee_excess_right: GraphQLFeeExcess, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLFeeExcess { + pub fee_magnitude: String, + pub sign: String, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLSupplyChange { + pub amount_magnitude: String, + pub sign: String, +} + +impl TryFrom for GraphQLPendingSnarkWork { + type Error = ConversionError; + + fn try_from(value: JobState) -> Result { + let mut work_bundle = Vec::new(); + + for job in value.job.into_iter() { + work_bundle.push(GraphQLWorkDescription::try_from(job)?); + } + + Ok(Self { work_bundle }) + } +} + +impl TryFrom for GraphQLWorkDescription { + type Error = ConversionError; + + fn try_from(value: AvailableJobMessage) -> Result { + match value { + ParallelScanAvailableJob::Base(base) => GraphQLWorkDescription::try_from(base), + ParallelScanAvailableJob::Merge { left, .. } => { + GraphQLWorkDescription::try_from(left.0 .0) + } + } + } +} + +impl TryFrom for GraphQLFeeExcesses { + type Error = ConversionError; + + fn try_from(value: MinaBaseFeeExcessStableV1) -> Result { + Ok(Self { + fee_token_left: value.0.token.to_string(), + fee_excess_left: GraphQLFeeExcess { + fee_magnitude: value.0.amount.magnitude.to_string(), + sign: value.0.amount.sgn.to_string(), + }, + fee_token_right: value.1.token.to_string(), + fee_excess_right: GraphQLFeeExcess { + fee_magnitude: value.1.amount.magnitude.to_string(), + sign: value.1.amount.sgn.to_string(), + }, + }) + } +} + +impl TryFrom for GraphQLSupplyChange { + type Error = ConversionError; + + fn try_from( + value: MinaStateBlockchainStateValueStableV2SignedAmount, + ) -> Result { + Ok(Self { + amount_magnitude: value.magnitude.to_string(), + sign: value.sgn.to_string(), + }) + } +} + +impl TryFrom for GraphQLWorkDescription { + type Error = ConversionError; + + fn try_from( + value: TransactionSnarkScanStateTransactionWithWitnessStableV2, + ) -> Result { + Ok(Self { + source_first_pass_ledger_hash: value.statement.source.first_pass_ledger.to_string(), + target_first_pass_ledger_hash: value.statement.target.first_pass_ledger.to_string(), + source_second_pass_ledger_hash: value.statement.source.second_pass_ledger.to_string(), + target_second_pass_ledger_hash: value.statement.target.second_pass_ledger.to_string(), + fee_excess: GraphQLFeeExcesses::try_from(value.statement.fee_excess.clone())?, + supply_change: GraphQLSupplyChange::try_from(value.statement.supply_increase.clone())?, + supply_increase: value.statement.supply_increase.magnitude.to_string(), + work_id: 0, + }) + } +} + +impl TryFrom for GraphQLWorkDescription { + type Error = ConversionError; + + fn try_from(value: TransactionSnarkStableV2) -> Result { + Ok(Self { + source_first_pass_ledger_hash: value.statement.source.first_pass_ledger.to_string(), + target_first_pass_ledger_hash: value.statement.target.first_pass_ledger.to_string(), + source_second_pass_ledger_hash: value.statement.source.second_pass_ledger.to_string(), + target_second_pass_ledger_hash: value.statement.target.second_pass_ledger.to_string(), + fee_excess: GraphQLFeeExcesses::try_from(value.statement.fee_excess.clone())?, + supply_change: GraphQLSupplyChange::try_from(value.statement.supply_increase.clone())?, + supply_increase: value.statement.supply_increase.magnitude.to_string(), + work_id: 0, + }) + } +} + +pub(crate) struct GraphQLSnarkWorker { + pub key: GraphQLPublicKey, + pub account: Option, + pub fee: String, +} + +#[graphql_object(context = Context)] +#[graphql(description = "A snark worker")] +impl GraphQLSnarkWorker { + pub fn key(&self) -> String { + self.key.to_string() + } + + pub fn fee(&self) -> String { + self.fee.to_string() + } + + pub fn account(&self) -> Option { + self.account.clone() + } +} diff --git a/node/native/src/graphql/transaction.rs b/node/native/src/graphql/transaction.rs new file mode 100644 index 0000000000..ce3813f966 --- /dev/null +++ b/node/native/src/graphql/transaction.rs @@ -0,0 +1,20 @@ +use juniper::GraphQLEnum; +use node::rpc::TransactionStatus; + +#[derive(Clone, Copy, Debug, GraphQLEnum)] +#[allow(non_camel_case_types)] +pub enum GraphQLTransactionStatus { + INCLUDED, + PENDING, + UNKNOWN, +} + +impl From for GraphQLTransactionStatus { + fn from(value: TransactionStatus) -> Self { + match value { + TransactionStatus::Included => Self::INCLUDED, + TransactionStatus::Pending => Self::PENDING, + TransactionStatus::Unknown => Self::UNKNOWN, + } + } +} diff --git a/node/native/src/graphql/user_command.rs b/node/native/src/graphql/user_command.rs new file mode 100644 index 0000000000..26b672c4da --- /dev/null +++ b/node/native/src/graphql/user_command.rs @@ -0,0 +1,374 @@ +use std::str::FromStr; + +use juniper::{GraphQLInputObject, GraphQLObject}; +use ledger::scan_state::{ + currency::{Amount, Fee, Magnitude, Nonce, Slot}, + transaction_logic::{signed_command, Memo}, +}; +use mina_p2p_messages::{ + bigint::BigInt, + v2::{self, TokenIdKeyHash}, +}; +use mina_signer::CompressedPubKey; +use node::account::AccountPublicKey; +use o1_utils::field_helpers::FieldHelpers; + +use super::zkapp::GraphQLFailureReason; + +// #[derive(GraphQLInputObject, Debug)] +// pub struct InputGraphQLSendPayment { +// pub input: InputGraphQLPayment, +// pub signature: UserCommandSignature, +// } + +#[derive(GraphQLInputObject, Debug)] +pub struct InputGraphQLPayment { + pub from: String, + pub to: String, + pub amount: String, + pub valid_until: Option, + pub fee: String, + pub memo: Option, + pub nonce: Option, +} + +#[derive(GraphQLInputObject, Debug)] +pub struct InputGraphQLDelegation { + pub from: String, + pub to: String, + pub valid_until: Option, + pub fee: String, + pub memo: Option, + pub nonce: Option, +} + +#[derive(GraphQLInputObject, Debug, Clone)] +pub struct UserCommandSignature { + pub field: Option, + pub scalar: Option, + // Note: either raw_signature or scalar + field must be provided + pub raw_signature: Option, +} + +impl TryFrom for mina_signer::Signature { + type Error = super::ConversionError; + + fn try_from(value: UserCommandSignature) -> Result { + let UserCommandSignature { + field, + scalar, + raw_signature, + } = value; + + if let Some(raw_signature) = raw_signature { + let sig_parts_len = raw_signature + .len() + .checked_div(2) + .ok_or(super::ConversionError::InvalidLength)?; + let (rx_hex, s_hex) = raw_signature.split_at(sig_parts_len); + + let rx_bytes = hex::decode(rx_hex).map_err(|_| super::ConversionError::InvalidHex)?; + let s_bytes = hex::decode(s_hex).map_err(|_| super::ConversionError::InvalidHex)?; + + let rx = mina_signer::BaseField::from_bytes(&rx_bytes)?; + let s = mina_signer::ScalarField::from_bytes(&s_bytes)?; + + Ok(Self { rx, s }) + } else if let (Some(field), Some(scalar)) = (field, scalar) { + let sig = Self { + rx: BigInt::from_decimal(&field)? + .try_into() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + s: BigInt::from_decimal(&scalar)? + .try_into() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + }; + + Ok(sig) + } else { + Err(super::ConversionError::MissingField( + "Either raw_signature or scalar + field must be provided".to_string(), + )) + } + } +} + +impl TryFrom<&UserCommandSignature> for mina_signer::Signature { + type Error = super::ConversionError; + + fn try_from(value: &UserCommandSignature) -> Result { + value.clone().try_into() + } +} + +impl UserCommandSignature { + pub fn validate(&self) -> Result<(), super::Error> { + if self.raw_signature.is_some() || (self.scalar.is_some() && self.field.is_some()) { + Ok(()) + } else { + Err(super::Error::Custom( + "Either raw_signature or scalar + field must be provided".to_string(), + )) + } + } +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLSendPaymentResponse { + pub payment: GraphQLUserCommand, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLSendDelegationResponse { + pub delegation: GraphQLUserCommand, +} + +#[derive(GraphQLObject, Debug)] +pub struct GraphQLUserCommand { + pub amount: String, + pub fee: String, + pub failure_reason: Option, + // TODO: add the account type + pub fee_payer: String, + pub fee_token: String, + pub hash: String, + pub id: String, + pub is_delegation: bool, + pub kind: String, + pub memo: String, + pub nonce: String, + // TODO: add the account type + pub receiver: String, + // TODO: add the account type + pub source: String, + pub token: String, + pub valid_until: String, +} + +impl TryFrom for GraphQLSendPaymentResponse { + type Error = super::ConversionError; + fn try_from(value: v2::MinaBaseUserCommandStableV2) -> Result { + if let v2::MinaBaseUserCommandStableV2::SignedCommand(ref signed_cmd) = value { + if let v2::MinaBaseSignedCommandPayloadBodyStableV2::Payment(ref payment) = + signed_cmd.payload.body + { + let res = GraphQLSendPaymentResponse { + payment: GraphQLUserCommand { + amount: payment.amount.to_string(), + fee: signed_cmd.payload.common.fee.to_string(), + failure_reason: None, + fee_payer: signed_cmd.payload.common.fee_payer_pk.to_string(), + fee_token: TokenIdKeyHash::default().to_string(), + hash: signed_cmd.hash()?.to_string(), + id: signed_cmd.to_base64()?, + is_delegation: false, + kind: "PAYMENT".to_string(), + memo: signed_cmd.payload.common.memo.to_base58check(), + nonce: signed_cmd.payload.common.nonce.to_string(), + receiver: payment.receiver_pk.to_string(), + source: signed_cmd.payload.common.fee_payer_pk.to_string(), + token: TokenIdKeyHash::default().to_string(), + valid_until: signed_cmd.payload.common.valid_until.as_u32().to_string(), + }, + }; + Ok(res) + } else { + Err(super::ConversionError::WrongVariant) + } + } else { + Err(super::ConversionError::WrongVariant) + } + } +} + +impl TryFrom for GraphQLSendDelegationResponse { + type Error = super::ConversionError; + fn try_from(value: v2::MinaBaseUserCommandStableV2) -> Result { + if let v2::MinaBaseUserCommandStableV2::SignedCommand(ref signed_cmd) = value { + if let v2::MinaBaseSignedCommandPayloadBodyStableV2::StakeDelegation(ref delegation) = + signed_cmd.payload.body + { + let v2::MinaBaseStakeDelegationStableV2::SetDelegate { new_delegate } = delegation; + let res = GraphQLSendDelegationResponse { + delegation: GraphQLUserCommand { + amount: "0".to_string(), + fee: signed_cmd.payload.common.fee.to_string(), + failure_reason: None, + fee_payer: signed_cmd.payload.common.fee_payer_pk.to_string(), + fee_token: TokenIdKeyHash::default().to_string(), + hash: signed_cmd.hash()?.to_string(), + id: signed_cmd.to_base64()?, + is_delegation: true, + kind: "STAKE_DELEGATION".to_string(), + memo: signed_cmd.payload.common.memo.to_base58check(), + nonce: signed_cmd.payload.common.nonce.to_string(), + receiver: new_delegate.to_string(), + source: signed_cmd.payload.common.fee_payer_pk.to_string(), + token: TokenIdKeyHash::default().to_string(), + valid_until: signed_cmd.payload.common.valid_until.as_u32().to_string(), + }, + }; + Ok(res) + } else { + Err(super::ConversionError::WrongVariant) + } + } else { + Err(super::ConversionError::WrongVariant) + } + } +} + +impl InputGraphQLPayment { + pub fn create_user_command( + &self, + infered_nonce: Nonce, + signature: UserCommandSignature, + ) -> Result { + let infered_nonce = infered_nonce.incr(); + + let nonce = if let Some(nonce) = &self.nonce { + let input_nonce = Nonce::from_u32( + nonce + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + ); + + if input_nonce.is_zero() || input_nonce > infered_nonce { + return Err(super::ConversionError::Custom( + "Provided nonce is zero or greater than infered nonce".to_string(), + )); + } else { + input_nonce + } + } else { + infered_nonce + }; + + let valid_until = if let Some(valid_until) = &self.valid_until { + Some(Slot::from_u32( + valid_until + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + )) + } else { + None + }; + + let memo = if let Some(memo) = &self.memo { + Memo::from_str(memo) + .map_err(|_| super::ConversionError::Custom("Invalid memo".to_string()))? + } else { + Memo::empty() + }; + + let from: CompressedPubKey = AccountPublicKey::from_str(&self.from)? + .try_into() + .map_err(|_| super::ConversionError::InvalidBigInt)?; + + let signature = signature.try_into()?; + + let sc: signed_command::SignedCommand = signed_command::SignedCommand { + payload: signed_command::SignedCommandPayload::create( + Fee::from_u64( + self.fee + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + ), + from.clone(), + nonce, + valid_until, + memo, + signed_command::Body::Payment(signed_command::PaymentPayload { + receiver_pk: AccountPublicKey::from_str(&self.to)? + .try_into() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + amount: Amount::from_u64( + self.amount + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + ), + }), + ), + signer: from.clone(), + signature, + }; + + Ok(v2::MinaBaseUserCommandStableV2::SignedCommand(sc.into())) + } +} + +impl InputGraphQLDelegation { + pub fn create_user_command( + &self, + infered_nonce: Nonce, + signature: UserCommandSignature, + ) -> Result { + let infered_nonce = infered_nonce.incr(); + + let nonce = if let Some(nonce) = &self.nonce { + let input_nonce = Nonce::from_u32( + nonce + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + ); + + if input_nonce.is_zero() || input_nonce > infered_nonce { + return Err(super::ConversionError::Custom( + "Provided nonce is zero or greater than infered nonce".to_string(), + )); + } else { + input_nonce + } + } else { + infered_nonce + }; + + let valid_until = if let Some(valid_until) = &self.valid_until { + Some(Slot::from_u32( + valid_until + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + )) + } else { + None + }; + + let memo = if let Some(memo) = &self.memo { + Memo::from_str(memo) + .map_err(|_| super::ConversionError::Custom("Invalid memo".to_string()))? + } else { + Memo::empty() + }; + + let from: CompressedPubKey = AccountPublicKey::from_str(&self.from)? + .try_into() + .map_err(|_| super::ConversionError::InvalidBigInt)?; + + let signature = signature.try_into()?; + + let sc: signed_command::SignedCommand = signed_command::SignedCommand { + payload: signed_command::SignedCommandPayload::create( + Fee::from_u64( + self.fee + .parse::() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + ), + from.clone(), + nonce, + valid_until, + memo, + signed_command::Body::StakeDelegation( + signed_command::StakeDelegationPayload::SetDelegate { + new_delegate: AccountPublicKey::from_str(&self.to)? + .try_into() + .map_err(|_| super::ConversionError::InvalidBigInt)?, + }, + ), + ), + signer: from.clone(), + signature, + }; + + Ok(v2::MinaBaseUserCommandStableV2::SignedCommand(sc.into())) + } +} diff --git a/node/native/src/graphql/zkapp.rs b/node/native/src/graphql/zkapp.rs index 76e231ce1f..b0e321d741 100644 --- a/node/native/src/graphql/zkapp.rs +++ b/node/native/src/graphql/zkapp.rs @@ -103,29 +103,37 @@ pub struct InputGraphQLZkappCommand { pub fee_payer: InputGraphQLFeePayer, } +impl TryFrom for GraphQLZkapp { + type Error = ConversionError; + + fn try_from(zkapp: MinaBaseZkappCommandTStableV1WireStableV1) -> Result { + let account_updates = zkapp + .account_updates + .clone() + .into_iter() + .map(|v| v.elt.account_update.try_into()) + .collect::, _>>()?; + + Ok(GraphQLZkapp { + hash: zkapp.hash()?.to_string(), + failure_reason: None, + id: zkapp.to_base64()?, + zkapp_command: GraphQLZkappCommand { + memo: zkapp.memo.to_base58check(), + account_updates, + fee_payer: GraphQLFeePayer::from(zkapp.fee_payer), + }, + }) + } +} + impl TryFrom for GraphQLSendZkappResponse { type Error = ConversionError; fn try_from(value: MinaBaseUserCommandStableV2) -> Result { if let MinaBaseUserCommandStableV2::ZkappCommand(zkapp) = value { - let account_updates = zkapp - .account_updates - .clone() - .into_iter() - .map(|v| v.elt.account_update.try_into()) - .collect::, _>>()?; - let res = GraphQLSendZkappResponse { - zkapp: GraphQLZkapp { - hash: zkapp.hash()?.to_string(), - failure_reason: None, - id: zkapp.to_base64()?, - zkapp_command: GraphQLZkappCommand { - memo: zkapp.memo.to_base58check(), - account_updates, - fee_payer: GraphQLFeePayer::from(zkapp.fee_payer), - }, - }, - }; - Ok(res) + Ok(GraphQLSendZkappResponse { + zkapp: GraphQLZkapp::try_from(zkapp)?, + }) } else { Err(ConversionError::WrongVariant) } diff --git a/node/native/src/node/builder.rs b/node/native/src/node/builder.rs index 3715470d82..1650514d48 100644 --- a/node/native/src/node/builder.rs +++ b/node/native/src/node/builder.rs @@ -1,7 +1,7 @@ use std::{ fs::File, io::{BufRead, BufReader, Read}, - net::{IpAddr, SocketAddr}, + net::IpAddr, path::Path, sync::Arc, time::Duration, @@ -24,7 +24,7 @@ use node::{ SnarkerStrategy, TransitionFrontierConfig, }; use openmina_core::{consensus::ConsensusConstants, constants::constraint_constants}; -use openmina_node_common::p2p::TaskSpawner; +use openmina_node_common::{archive::config::ArchiveStorageOptions, p2p::TaskSpawner}; use rand::Rng; use crate::NodeServiceBuilder; @@ -218,9 +218,9 @@ impl NodeBuilder { Ok(self.block_producer(key, provers)) } - pub fn archive(&mut self, address: SocketAddr) -> &mut Self { - self.archive = Some(ArchiveConfig::new(&address.to_string())); - self.service.archive_init(address); + pub fn archive(&mut self, options: ArchiveStorageOptions, work_dir: String) -> &mut Self { + self.archive = Some(ArchiveConfig::new(work_dir.clone())); + self.service.archive_init(options, work_dir.clone()); self } @@ -345,6 +345,7 @@ impl NodeBuilder { snarker: self.snarker, consensus_constants: consensus_consts.clone(), testing_run: false, + client_port: self.http_port, }, p2p: self.p2p, ledger: LedgerConfig {}, diff --git a/node/native/src/replay.rs b/node/native/src/replay.rs index 6c4336d2fc..0dcef6264e 100644 --- a/node/native/src/replay.rs +++ b/node/native/src/replay.rs @@ -1,8 +1,10 @@ use std::cell::RefCell; use node::{ - core::thread, recorder::StateWithInputActionsReader, snark::BlockVerifier, ActionWithMeta, - BuildEnv, Store, + core::thread, + recorder::StateWithInputActionsReader, + snark::{BlockVerifier, TransactionVerifier}, + ActionWithMeta, BuildEnv, Store, }; use crate::NodeService; @@ -10,7 +12,8 @@ use crate::NodeService; pub fn replay_state_with_input_actions( dir: &str, dynamic_effects_lib: Option, - mut check_build_env: impl FnMut(&BuildEnv, &BuildEnv) -> anyhow::Result<()>, + ignore_mismatch: bool, + mut check_build_env: impl FnMut(&BuildEnv, &BuildEnv, bool) -> anyhow::Result<()>, ) -> anyhow::Result { eprintln!("replaying node based on initial state and actions from the dir: {dir}"); let reader = StateWithInputActionsReader::new(dir); @@ -31,6 +34,8 @@ pub fn replay_state_with_input_actions( // index/srs doesn't match deserialized one. state.snark.block_verify.verifier_index = BlockVerifier::make(); state.snark.block_verify.verifier_srs = node::snark::get_srs(); + state.snark.user_command_verify.verifier_index = TransactionVerifier::make(); + state.snark.user_command_verify.verifier_srs = node::snark::get_srs(); state }; @@ -46,7 +51,7 @@ pub fn replay_state_with_input_actions( let store = node.store_mut(); let replay_env = BuildEnv::get(); - check_build_env(&store.state().config.build, &replay_env)?; + check_build_env(&store.state().config.build, &replay_env, ignore_mismatch)?; eprintln!("reading actions from dir: {dir}"); diff --git a/node/native/src/service/builder.rs b/node/native/src/service/builder.rs index 5f58d095a2..f3c1b271b8 100644 --- a/node/native/src/service/builder.rs +++ b/node/native/src/service/builder.rs @@ -1,5 +1,3 @@ -use std::net::SocketAddr; - use ledger::proofs::provers::BlockProver; use node::{ account::AccountSecretKey, core::thread, p2p::identity::SecretKey as P2pSecretKey, @@ -7,7 +5,8 @@ use node::{ }; pub use openmina_node_common::NodeServiceCommonBuildError; use openmina_node_common::{ - p2p::TaskSpawner, rpc::RpcSender, EventSender, NodeServiceCommonBuilder, + archive::config::ArchiveStorageOptions, p2p::TaskSpawner, rpc::RpcSender, EventSender, + NodeServiceCommonBuilder, }; use crate::{http_server, NodeService, P2pTaskSpawner}; @@ -55,8 +54,8 @@ impl NodeServiceBuilder { self } - pub fn archive_init(&mut self, address: SocketAddr) -> &mut Self { - self.common.archive_init(address); + pub fn archive_init(&mut self, options: ArchiveStorageOptions, work_dir: String) -> &mut Self { + self.common.archive_init(options, work_dir); self } diff --git a/node/src/action_kind.rs b/node/src/action_kind.rs index 6a44f7bb37..d01e8d4ccf 100644 --- a/node/src/action_kind.rs +++ b/node/src/action_kind.rs @@ -470,17 +470,26 @@ pub enum ActionKind { P2pPeerRemove, RpcActionStatsGet, RpcBestChain, + RpcBlockGet, RpcBlockProducerStatsGet, RpcConsensusConstantsGet, + RpcConsensusTimeGet, RpcDiscoveryBoostrapStats, RpcDiscoveryRoutingTable, RpcFinish, + RpcGenesisBlock, RpcGlobalStateGet, RpcHealthCheck, RpcHeartbeatGet, + RpcLedgerAccountDelegatorsGetInit, + RpcLedgerAccountDelegatorsGetPending, + RpcLedgerAccountDelegatorsGetSuccess, RpcLedgerAccountsGetInit, RpcLedgerAccountsGetPending, RpcLedgerAccountsGetSuccess, + RpcLedgerStatusGetInit, + RpcLedgerStatusGetPending, + RpcLedgerStatusGetSuccess, RpcMessageProgressGet, RpcP2pConnectionIncomingAnswerReady, RpcP2pConnectionIncomingError, @@ -493,13 +502,17 @@ pub enum ActionKind { RpcP2pConnectionOutgoingPending, RpcP2pConnectionOutgoingSuccess, RpcPeersGet, + RpcPooledUserCommands, + RpcPooledZkappCommands, RpcReadinessCheck, RpcScanStateSummaryGetInit, RpcScanStateSummaryGetPending, RpcScanStateSummaryGetSuccess, RpcScanStateSummaryLedgerGetInit, RpcSnarkPoolAvailableJobsGet, + RpcSnarkPoolCompletedJobsGet, RpcSnarkPoolJobGet, + RpcSnarkPoolPendingJobsGet, RpcSnarkerConfigGet, RpcSnarkerJobCommit, RpcSnarkerJobSpec, @@ -516,14 +529,19 @@ pub enum ActionKind { RpcTransitionFrontierUserCommandsGet, RpcEffectfulActionStatsGet, RpcEffectfulBestChain, + RpcEffectfulBlockGet, RpcEffectfulBlockProducerStatsGet, RpcEffectfulConsensusConstantsGet, + RpcEffectfulConsensusTimeGet, RpcEffectfulDiscoveryBoostrapStats, RpcEffectfulDiscoveryRoutingTable, + RpcEffectfulGenesisBlock, RpcEffectfulGlobalStateGet, RpcEffectfulHealthCheck, RpcEffectfulHeartbeatGet, + RpcEffectfulLedgerAccountDelegatorsGetSuccess, RpcEffectfulLedgerAccountsGetSuccess, + RpcEffectfulLedgerStatusGetSuccess, RpcEffectfulMessageProgressGet, RpcEffectfulP2pConnectionIncomingError, RpcEffectfulP2pConnectionIncomingRespond, @@ -531,10 +549,14 @@ pub enum ActionKind { RpcEffectfulP2pConnectionOutgoingError, RpcEffectfulP2pConnectionOutgoingSuccess, RpcEffectfulPeersGet, + RpcEffectfulPooledUserCommands, + RpcEffectfulPooledZkappCommands, RpcEffectfulReadinessCheck, RpcEffectfulScanStateSummaryGetSuccess, RpcEffectfulSnarkPoolAvailableJobsGet, + RpcEffectfulSnarkPoolCompletedJobsGet, RpcEffectfulSnarkPoolJobGet, + RpcEffectfulSnarkPoolPendingJobsGet, RpcEffectfulSnarkerConfigGet, RpcEffectfulSnarkerJobCommit, RpcEffectfulSnarkerJobSpec, @@ -607,6 +629,7 @@ pub enum ActionKind { TransactionPoolCandidateFetchPending, TransactionPoolCandidateFetchSuccess, TransactionPoolCandidateInfoReceived, + TransactionPoolCandidateLibp2pTransactionsReceived, TransactionPoolCandidatePeerPrune, TransactionPoolCandidateVerifyError, TransactionPoolCandidateVerifyNext, @@ -617,7 +640,6 @@ pub enum ActionKind { TransitionFrontierGenesisProvenInject, TransitionFrontierSyncFailed, TransitionFrontierSynced, - TransitionFrontierCandidateBestTipUpdate, TransitionFrontierCandidateBlockChainProofUpdate, TransitionFrontierCandidateBlockPrevalidateError, TransitionFrontierCandidateBlockPrevalidateSuccess, @@ -625,11 +647,8 @@ pub enum ActionKind { TransitionFrontierCandidateBlockSnarkVerifyError, TransitionFrontierCandidateBlockSnarkVerifyPending, TransitionFrontierCandidateBlockSnarkVerifySuccess, - TransitionFrontierCandidateDetectForkRange, - TransitionFrontierCandidateLongRangeForkResolve, TransitionFrontierCandidateP2pBestTipUpdate, TransitionFrontierCandidatePrune, - TransitionFrontierCandidateShortRangeForkResolve, TransitionFrontierCandidateTransitionFrontierSyncTargetUpdate, TransitionFrontierGenesisLedgerLoadInit, TransitionFrontierGenesisLedgerLoadPending, @@ -719,7 +738,7 @@ pub enum ActionKind { } impl ActionKind { - pub const COUNT: u16 = 609; + pub const COUNT: u16 = 628; } impl std::fmt::Display for ActionKind { @@ -1065,6 +1084,8 @@ impl ActionKindGet for RpcAction { Self::ScanStateSummaryGetSuccess { .. } => ActionKind::RpcScanStateSummaryGetSuccess, Self::SnarkPoolAvailableJobsGet { .. } => ActionKind::RpcSnarkPoolAvailableJobsGet, Self::SnarkPoolJobGet { .. } => ActionKind::RpcSnarkPoolJobGet, + Self::SnarkPoolCompletedJobsGet { .. } => ActionKind::RpcSnarkPoolCompletedJobsGet, + Self::SnarkPoolPendingJobsGet { .. } => ActionKind::RpcSnarkPoolPendingJobsGet, Self::SnarkerConfigGet { .. } => ActionKind::RpcSnarkerConfigGet, Self::SnarkerJobCommit { .. } => ActionKind::RpcSnarkerJobCommit, Self::SnarkerJobSpec { .. } => ActionKind::RpcSnarkerJobSpec, @@ -1088,6 +1109,23 @@ impl ActionKindGet for RpcAction { Self::BestChain { .. } => ActionKind::RpcBestChain, Self::ConsensusConstantsGet { .. } => ActionKind::RpcConsensusConstantsGet, Self::TransactionStatusGet { .. } => ActionKind::RpcTransactionStatusGet, + Self::BlockGet { .. } => ActionKind::RpcBlockGet, + Self::ConsensusTimeGet { .. } => ActionKind::RpcConsensusTimeGet, + Self::LedgerStatusGetInit { .. } => ActionKind::RpcLedgerStatusGetInit, + Self::LedgerStatusGetPending { .. } => ActionKind::RpcLedgerStatusGetPending, + Self::LedgerStatusGetSuccess { .. } => ActionKind::RpcLedgerStatusGetSuccess, + Self::LedgerAccountDelegatorsGetInit { .. } => { + ActionKind::RpcLedgerAccountDelegatorsGetInit + } + Self::LedgerAccountDelegatorsGetPending { .. } => { + ActionKind::RpcLedgerAccountDelegatorsGetPending + } + Self::LedgerAccountDelegatorsGetSuccess { .. } => { + ActionKind::RpcLedgerAccountDelegatorsGetSuccess + } + Self::PooledUserCommands { .. } => ActionKind::RpcPooledUserCommands, + Self::PooledZkappCommands { .. } => ActionKind::RpcPooledZkappCommands, + Self::GenesisBlock { .. } => ActionKind::RpcGenesisBlock, Self::Finish { .. } => ActionKind::RpcFinish, } } @@ -1126,6 +1164,10 @@ impl ActionKindGet for RpcEffectfulAction { ActionKind::RpcEffectfulSnarkPoolAvailableJobsGet } Self::SnarkPoolJobGet { .. } => ActionKind::RpcEffectfulSnarkPoolJobGet, + Self::SnarkPoolCompletedJobsGet { .. } => { + ActionKind::RpcEffectfulSnarkPoolCompletedJobsGet + } + Self::SnarkPoolPendingJobsGet { .. } => ActionKind::RpcEffectfulSnarkPoolPendingJobsGet, Self::SnarkerConfigGet { .. } => ActionKind::RpcEffectfulSnarkerConfigGet, Self::SnarkerJobCommit { .. } => ActionKind::RpcEffectfulSnarkerJobCommit, Self::SnarkerJobSpec { .. } => ActionKind::RpcEffectfulSnarkerJobSpec, @@ -1153,6 +1195,15 @@ impl ActionKindGet for RpcEffectfulAction { Self::BestChain { .. } => ActionKind::RpcEffectfulBestChain, Self::ConsensusConstantsGet { .. } => ActionKind::RpcEffectfulConsensusConstantsGet, Self::TransactionStatusGet { .. } => ActionKind::RpcEffectfulTransactionStatusGet, + Self::BlockGet { .. } => ActionKind::RpcEffectfulBlockGet, + Self::PooledUserCommands { .. } => ActionKind::RpcEffectfulPooledUserCommands, + Self::PooledZkappCommands { .. } => ActionKind::RpcEffectfulPooledZkappCommands, + Self::GenesisBlock { .. } => ActionKind::RpcEffectfulGenesisBlock, + Self::ConsensusTimeGet { .. } => ActionKind::RpcEffectfulConsensusTimeGet, + Self::LedgerStatusGetSuccess { .. } => ActionKind::RpcEffectfulLedgerStatusGetSuccess, + Self::LedgerAccountDelegatorsGetSuccess { .. } => { + ActionKind::RpcEffectfulLedgerAccountDelegatorsGetSuccess + } } } } @@ -1433,6 +1484,9 @@ impl ActionKindGet for TransitionFrontierGenesisEffectfulAction { impl ActionKindGet for TransitionFrontierCandidateAction { fn kind(&self) -> ActionKind { match self { + Self::P2pBestTipUpdate { .. } => { + ActionKind::TransitionFrontierCandidateP2pBestTipUpdate + } Self::BlockReceived { .. } => ActionKind::TransitionFrontierCandidateBlockReceived, Self::BlockPrevalidateSuccess { .. } => { ActionKind::TransitionFrontierCandidateBlockPrevalidateSuccess @@ -1452,20 +1506,9 @@ impl ActionKindGet for TransitionFrontierCandidateAction { Self::BlockSnarkVerifyError { .. } => { ActionKind::TransitionFrontierCandidateBlockSnarkVerifyError } - Self::DetectForkRange { .. } => ActionKind::TransitionFrontierCandidateDetectForkRange, - Self::ShortRangeForkResolve { .. } => { - ActionKind::TransitionFrontierCandidateShortRangeForkResolve - } - Self::LongRangeForkResolve { .. } => { - ActionKind::TransitionFrontierCandidateLongRangeForkResolve - } - Self::BestTipUpdate { .. } => ActionKind::TransitionFrontierCandidateBestTipUpdate, Self::TransitionFrontierSyncTargetUpdate => { ActionKind::TransitionFrontierCandidateTransitionFrontierSyncTargetUpdate } - Self::P2pBestTipUpdate { .. } => { - ActionKind::TransitionFrontierCandidateP2pBestTipUpdate - } Self::Prune => ActionKind::TransitionFrontierCandidatePrune, } } @@ -1553,6 +1596,9 @@ impl ActionKindGet for TransactionPoolCandidateAction { Self::FetchPending { .. } => ActionKind::TransactionPoolCandidateFetchPending, Self::FetchError { .. } => ActionKind::TransactionPoolCandidateFetchError, Self::FetchSuccess { .. } => ActionKind::TransactionPoolCandidateFetchSuccess, + Self::Libp2pTransactionsReceived { .. } => { + ActionKind::TransactionPoolCandidateLibp2pTransactionsReceived + } Self::VerifyNext => ActionKind::TransactionPoolCandidateVerifyNext, Self::VerifyPending { .. } => ActionKind::TransactionPoolCandidateVerifyPending, Self::VerifyError { .. } => ActionKind::TransactionPoolCandidateVerifyError, diff --git a/node/src/block_producer/block_producer_actions.rs b/node/src/block_producer/block_producer_actions.rs index 5dd35f00d0..e1b14e9d80 100644 --- a/node/src/block_producer/block_producer_actions.rs +++ b/node/src/block_producer/block_producer_actions.rs @@ -111,7 +111,18 @@ impl redux::EnablingCondition for BlockProducerAction { || proven_block.is_some_and(|b| Arc::ptr_eq(&b.block, &tip.block)) }) }; - this.current.won_slot_should_produce(time) && has_genesis_proven_if_needed() + this.current.won_slot_should_produce(time) + && has_genesis_proven_if_needed() + // don't start block production (particularly staged ledger diff creation), + // if transition frontier sync commit is pending, + // as in case when fork is being committed, there + // is no guarantee that staged ledger for the current + // best tip (chosen as a parent for the new block being produced), + // will be still there, once the staged ledger + // diff creation request reaches the ledger service. + // So we would be trying to build on top of + // non-existent staged ledger causing a failure. + && !state.transition_frontier.sync.is_commit_pending() }) } BlockProducerAction::WonSlotTransactionsGet => { diff --git a/node/src/block_producer/block_producer_state.rs b/node/src/block_producer/block_producer_state.rs index c7d24078c5..9463a50b8b 100644 --- a/node/src/block_producer/block_producer_state.rs +++ b/node/src/block_producer/block_producer_state.rs @@ -319,6 +319,10 @@ impl BlockProducerCurrentState { &self, best_tip: &ArcBlockWithHash, ) -> Option { + if matches!(self, Self::WonSlotDiscarded { .. }) { + return None; + } + let won_slot = self.won_slot()?; if won_slot.global_slot() < best_tip.global_slot() { return Some(BlockProducerWonSlotDiscardReason::BestTipGlobalSlotHigher); diff --git a/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_actions.rs b/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_actions.rs index 6a3def263b..94fbf40997 100644 --- a/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_actions.rs +++ b/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_actions.rs @@ -105,6 +105,7 @@ pub enum BlockProducerVrfEvaluatorAction { /// Epoch evaluation finished. #[action_event(level = info, fields(epoch_number, latest_evaluated_global_slot))] FinishEpochEvaluation { + /// The evaluated epoch number. epoch_number: u32, latest_evaluated_global_slot: u32, }, diff --git a/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_reducer.rs b/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_reducer.rs index 669c00659a..9136b8c45c 100644 --- a/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_reducer.rs +++ b/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_reducer.rs @@ -424,7 +424,7 @@ impl BlockProducerVrfEvaluatorState { time: meta.time(), epoch_number: *epoch_number, }; - state.set_last_evaluated_epoch(); + state.set_last_evaluated_epoch(*epoch_number); } BlockProducerVrfEvaluatorAction::WaitForNextEvaluation => { state.status = diff --git a/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_state.rs b/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_state.rs index 94032af56a..69b4d51000 100644 --- a/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_state.rs +++ b/node/src/block_producer/vrf_evaluator/block_producer_vrf_evaluator_state.rs @@ -277,18 +277,8 @@ impl BlockProducerVrfEvaluatorState { pub fn initialize_evaluator(&mut self, _epoch: u32, _last_height: u32) {} - pub fn set_last_evaluated_epoch(&mut self) { - if let BlockProducerVrfEvaluatorStatus::EpochEvaluationSuccess { epoch_number, .. } = - &self.status - { - match self.epoch_context { - EpochContext::Current(_) => self.last_evaluated_epoch = Some(*epoch_number), - EpochContext::Next(_) => { - self.last_evaluated_epoch = Some(epoch_number.checked_add(1).expect("overflow")) - } - EpochContext::Waiting => {} - } - } + pub fn set_last_evaluated_epoch(&mut self, epoch_number: u32) { + self.last_evaluated_epoch = Some(epoch_number); } pub fn initial_slot(&self) -> Option { diff --git a/node/src/config.rs b/node/src/config.rs index e2223dbb74..2dd3713d15 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -35,6 +35,7 @@ pub struct GlobalConfig { pub build: Box, pub snarker: Option, pub consensus_constants: ConsensusConstants, + pub client_port: Option, pub testing_run: bool, } @@ -55,6 +56,7 @@ pub enum SnarkerStrategy { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct BuildEnv { pub time: String, + pub version: String, pub git: GitBuildEnv, pub cargo: CargoBuildEnv, pub rustc: RustCBuildEnv, @@ -90,6 +92,7 @@ impl BuildEnv { pub fn get() -> Self { Self { time: env!("VERGEN_BUILD_TIMESTAMP").to_owned(), + version: env!("VERGEN_GIT_DESCRIBE").to_owned(), git: GitBuildEnv { commit_time: env!("VERGEN_GIT_COMMIT_TIMESTAMP").to_owned(), commit_hash: env!("VERGEN_GIT_SHA").to_owned(), diff --git a/node/src/effects.rs b/node/src/effects.rs index 586f5f85c5..2939bc6d1a 100644 --- a/node/src/effects.rs +++ b/node/src/effects.rs @@ -113,20 +113,17 @@ pub fn effects(store: &mut Store, action: ActionWithMeta) { fn p2p_request_best_tip_if_needed(store: &mut Store) { // TODO(binier): refactor let state = store.state(); - let consensus_best_tip_hash = state.transition_frontier.candidates.best_tip.as_ref(); + let best_candidate = state.transition_frontier.candidates.best_verified(); + let best_candidate_hash = best_candidate.map(|s| s.block.hash()); let best_tip_hash = state.transition_frontier.best_tip().map(|v| &v.hash); let syncing_best_tip_hash = state.transition_frontier.sync.best_tip().map(|v| &v.hash); - if consensus_best_tip_hash.is_some() - && consensus_best_tip_hash != best_tip_hash - && consensus_best_tip_hash != syncing_best_tip_hash - && state - .transition_frontier - .candidates - .best_tip_chain_proof - .is_none() + if best_candidate.is_some() + && best_candidate_hash != best_tip_hash + && best_candidate_hash != syncing_best_tip_hash + && best_candidate.is_some_and(|s| s.chain_proof.is_none()) { - request_best_tip(store, consensus_best_tip_hash.cloned()); + request_best_tip(store, best_candidate_hash.cloned()); } } use mina_p2p_messages::v2::StateHash; diff --git a/node/src/event_source/event.rs b/node/src/event_source/event.rs index edd22bd477..c0c9001ea3 100644 --- a/node/src/event_source/event.rs +++ b/node/src/event_source/event.rs @@ -51,6 +51,8 @@ impl std::fmt::Display for Event { RpcRequest::SnarkPoolJobGet { job_id } => { write!(f, "SnarkPoolJobGet, {job_id}") } + RpcRequest::SnarkPoolCompletedJobsGet => write!(f, "SnarkPoolCompletedJobsGet"), + RpcRequest::SnarkPoolPendingJobsGet => write!(f, "SnarkPoolPendingJobsGet"), RpcRequest::SnarkerConfig => write!(f, "SnarkerConfig"), RpcRequest::SnarkerJobCommit { job_id } => { write!(f, "SnarkerJobCommit, {job_id}") @@ -72,6 +74,15 @@ impl std::fmt::Display for Event { RpcRequest::BestChain(..) => write!(f, "BestChain"), RpcRequest::ConsensusConstantsGet => write!(f, "ConsensusConstantsGet"), RpcRequest::TransactionStatusGet(..) => write!(f, "TransactionStatusGet"), + RpcRequest::GetBlock(..) => write!(f, "GetBlock"), + RpcRequest::PooledUserCommands(..) => write!(f, "PooledUserCommands"), + RpcRequest::PooledZkappCommands(..) => write!(f, "PooledZkappCommands"), + RpcRequest::GenesisBlockGet => write!(f, "GenesisBlock"), + RpcRequest::ConsensusTimeGet(..) => write!(f, "ConsensusTimeGet"), + RpcRequest::LedgerStatusGet(..) => write!(f, "LedgerStatusGet"), + RpcRequest::LedgerAccountDelegatorsGet(..) => { + write!(f, "LedgerAccountDelegatorsGet") + } } } Self::ExternalSnarkWorker(event) => { diff --git a/node/src/event_source/event_source_effects.rs b/node/src/event_source/event_source_effects.rs index 622e67990e..d9d54a0e4a 100644 --- a/node/src/event_source/event_source_effects.rs +++ b/node/src/event_source/event_source_effects.rs @@ -97,7 +97,7 @@ pub fn event_source_effects(store: &mut Store, action: EventSourc MioEvent::IncomingDataDidReceive(addr, result) => { store.dispatch(P2pNetworkSchedulerAction::IncomingDataDidReceive { addr, - result: result.map(From::from), + result, }); } MioEvent::OutgoingDataDidSend(_, _result) => {} @@ -340,6 +340,12 @@ pub fn event_source_effects(store: &mut Store, action: EventSourc RpcRequest::SnarkPoolJobGet { job_id } => { store.dispatch(RpcAction::SnarkPoolJobGet { rpc_id, job_id }); } + RpcRequest::SnarkPoolCompletedJobsGet => { + store.dispatch(RpcAction::SnarkPoolCompletedJobsGet { rpc_id }); + } + RpcRequest::SnarkPoolPendingJobsGet => { + store.dispatch(RpcAction::SnarkPoolPendingJobsGet { rpc_id }); + } RpcRequest::SnarkerConfig => { store.dispatch(RpcAction::SnarkerConfigGet { rpc_id }); } @@ -388,6 +394,34 @@ pub fn event_source_effects(store: &mut Store, action: EventSourc RpcRequest::TransactionStatusGet(tx) => { store.dispatch(RpcAction::TransactionStatusGet { rpc_id, tx }); } + RpcRequest::GetBlock(query) => { + store.dispatch(RpcAction::BlockGet { rpc_id, query }); + } + RpcRequest::PooledUserCommands(query) => { + store.dispatch(RpcAction::PooledUserCommands { rpc_id, query }); + } + RpcRequest::PooledZkappCommands(query) => { + store.dispatch(RpcAction::PooledZkappCommands { rpc_id, query }); + } + RpcRequest::ConsensusTimeGet(query) => { + store.dispatch(RpcAction::ConsensusTimeGet { rpc_id, query }); + } + RpcRequest::GenesisBlockGet => { + store.dispatch(RpcAction::GenesisBlock { rpc_id }); + } + RpcRequest::LedgerStatusGet(ledger_hash) => { + store.dispatch(RpcAction::LedgerStatusGetInit { + rpc_id, + ledger_hash, + }); + } + RpcRequest::LedgerAccountDelegatorsGet(ledger_hash, account_id) => { + store.dispatch(RpcAction::LedgerAccountDelegatorsGetInit { + rpc_id, + ledger_hash, + account_id, + }); + } }, Event::ExternalSnarkWorker(e) => match e { ExternalSnarkWorkerEvent::Started => { diff --git a/node/src/external_snark_worker_effectful/external_snark_worker_effectful_effects.rs b/node/src/external_snark_worker_effectful/external_snark_worker_effectful_effects.rs index e2487f0e67..99d3b83105 100644 --- a/node/src/external_snark_worker_effectful/external_snark_worker_effectful_effects.rs +++ b/node/src/external_snark_worker_effectful/external_snark_worker_effectful_effects.rs @@ -9,7 +9,8 @@ pub fn external_snark_worker_effectful_effects( let (action, _) = action.split(); match action { ExternalSnarkWorkerEffectfulAction::Start { public_key, fee } => { - if let Err(err) = store.service.start(public_key, fee) { + let work_verifier = store.state().snark.work_verify.verifier_index.clone(); + if let Err(err) = store.service.start(public_key, fee, work_verifier) { store.dispatch(ExternalSnarkWorkerAction::Error { error: err, permanent: true, diff --git a/node/src/external_snark_worker_effectful/external_snark_worker_service.rs b/node/src/external_snark_worker_effectful/external_snark_worker_service.rs index 730b12b195..2412f6fdb0 100644 --- a/node/src/external_snark_worker_effectful/external_snark_worker_service.rs +++ b/node/src/external_snark_worker_effectful/external_snark_worker_service.rs @@ -1,5 +1,6 @@ use mina_p2p_messages::v2::{CurrencyFeeStableV1, NonZeroCurvePoint}; use serde::{Deserialize, Serialize}; +use snark::TransactionVerifier; use crate::external_snark_worker::{ ExternalSnarkWorkerError, ExternalSnarkWorkerWorkError, SnarkWorkResult, SnarkWorkSpec, @@ -21,6 +22,7 @@ pub trait ExternalSnarkWorkerService { &mut self, public_key: NonZeroCurvePoint, fee: CurrencyFeeStableV1, + work_verifier: TransactionVerifier, ) -> Result<(), ExternalSnarkWorkerError>; /// Submits snark work diff --git a/node/src/ledger/ledger_manager.rs b/node/src/ledger/ledger_manager.rs index 99318272f9..5b407b3113 100644 --- a/node/src/ledger/ledger_manager.rs +++ b/node/src/ledger/ledger_manager.rs @@ -1,7 +1,7 @@ use super::{ - read::{LedgerReadId, LedgerReadRequest, LedgerReadResponse}, + read::{LedgerReadId, LedgerReadRequest, LedgerReadResponse, LedgerStatus}, write::{LedgerWriteRequest, LedgerWriteResponse}, - {LedgerCtx, LedgerService}, + LedgerCtx, LedgerService, }; use crate::{ account::AccountPublicKey, ledger::LedgerAddress, rpc::AccountQuery, @@ -236,9 +236,9 @@ impl LedgerRequest { } LedgerReadRequest::AccountsForRpc(rpc_id, ledger_hash, account_query) => { let res = match &account_query { + AccountQuery::All => ledger_ctx.get_accounts_for_rpc(ledger_hash, None), AccountQuery::SinglePublicKey(public_key) => ledger_ctx .get_accounts_for_rpc(ledger_hash, Some(public_key.clone())), - AccountQuery::All => ledger_ctx.get_accounts_for_rpc(ledger_hash, None), AccountQuery::PubKeyWithTokenId(public_key, token_id_key_hash) => { let id = AccountId { public_key: public_key.clone().try_into().unwrap(), @@ -246,10 +246,27 @@ impl LedgerRequest { }; ledger_ctx.get_accounts(ledger_hash, vec![id]) } + AccountQuery::MultipleIds(ids) => { + ledger_ctx.get_accounts(ledger_hash, ids.clone()) + } }; LedgerReadResponse::AccountsForRpc(rpc_id, res, account_query) } + LedgerReadRequest::GetLedgerStatus(rpc_id, ledger_hash) => { + let res = ledger_ctx.get_num_accounts(ledger_hash).map( + |(num_accounts, ledger_hash)| LedgerStatus { + num_accounts, + best_tip_staged_ledger_hash: ledger_hash, + }, + ); + + LedgerReadResponse::GetLedgerStatus(rpc_id, res) + } + LedgerReadRequest::GetAccountDelegators(rpc_id, ledger_hash, account_id) => { + let res = ledger_ctx.get_account_delegators(&ledger_hash, &account_id); + LedgerReadResponse::GetAccountDelegators(rpc_id, res) + } }, ), LedgerRequest::AccountsSet { @@ -350,17 +367,17 @@ pub struct LedgerManager { } #[derive(Clone)] -pub(super) struct LedgerCaller(mpsc::UnboundedSender); +pub(super) struct LedgerCaller(mpsc::TrackedUnboundedSender); impl LedgerManager { pub fn spawn(mut ledger_ctx: LedgerCtx) -> LedgerManager { - let (sender, mut receiver) = mpsc::unbounded_channel(); + let (sender, mut receiver) = mpsc::tracked_unbounded_channel(); let caller = LedgerCaller(sender); let ledger_caller = caller.clone(); let ledger_manager_loop = move || { - while let Some(LedgerRequestWithChan { request, responder }) = receiver.blocking_recv() - { + while let Some(msg) = receiver.blocking_recv() { + let LedgerRequestWithChan { request, responder } = msg.0; let response = request.handle(&mut ledger_ctx, &ledger_caller, responder.is_some()); match (response, responder) { (LedgerResponse::Write(resp), None) => { @@ -395,6 +412,10 @@ impl LedgerManager { } } + pub fn pending_calls(&self) -> usize { + self.caller.0.len() + } + pub(super) fn call(&self, request: LedgerRequest) { self.caller.call(request) } @@ -458,7 +479,7 @@ impl LedgerManager { impl LedgerCaller { pub fn call(&self, request: LedgerRequest) { self.0 - .send(LedgerRequestWithChan { + .tracked_send(LedgerRequestWithChan { request, responder: None, }) @@ -471,7 +492,7 @@ impl LedgerCaller { ) -> Result { let (responder, receiver) = std::sync::mpsc::sync_channel(0); self.0 - .send(LedgerRequestWithChan { + .tracked_send(LedgerRequestWithChan { request, responder: Some(responder), }) diff --git a/node/src/ledger/ledger_reducer.rs b/node/src/ledger/ledger_reducer.rs index 1e8cbb0504..7bdc1b00df 100644 --- a/node/src/ledger/ledger_reducer.rs +++ b/node/src/ledger/ledger_reducer.rs @@ -17,6 +17,16 @@ impl LedgerState { } = action { if let Ok(state) = state_context.get_substate_mut() { + if result.alive_masks > 294 { + // TODO(binier): should be a bug condition, but can't be + // because we get false positive during testing, since + // multiple nodes/ledger run in the same process. + openmina_core::log::warn!( + meta.time(); + "ledger mask leak: more than 294 ledger masks ({}) detected!", + result.alive_masks + ); + } state.alive_masks = result.alive_masks; } } diff --git a/node/src/ledger/ledger_service.rs b/node/src/ledger/ledger_service.rs index f8e34c15d3..29b7f574da 100644 --- a/node/src/ledger/ledger_service.rs +++ b/node/src/ledger/ledger_service.rs @@ -1,10 +1,7 @@ use super::{ ledger_empty_hash_at_depth, - read::LedgerReadResponse, - read::{LedgerReadId, LedgerReadRequest}, - write::CommitResult, - write::LedgerWriteRequest, - write::LedgerWriteResponse, + read::{LedgerReadId, LedgerReadRequest, LedgerReadResponse}, + write::{CommitResult, LedgerWriteRequest, LedgerWriteResponse, LedgersToKeep}, LedgerAddress, LedgerEvent, LEDGER_DEPTH, }; use crate::{ @@ -65,8 +62,8 @@ use mina_p2p_messages::{ }; use mina_signer::CompressedPubKey; use openmina_core::{ - block::AppliedBlock, - block::ArcBlockWithHash, + block::{AppliedBlock, ArcBlockWithHash}, + bug_condition, constants::constraint_constants, snark::{Snark, SnarkJobId}, thread, @@ -137,18 +134,18 @@ impl StagedLedgersStorage { fn retain(&mut self, fun: F) where - F: Fn(&LedgerHash, &[Arc]) -> bool, + F: Fn(&MinaBaseStagedLedgerHashStableV1) -> bool, { - self.by_merkle_root_hash - .retain(|merkle_root_hash, staged_ledger_hashes| { - let retain = fun(merkle_root_hash, staged_ledger_hashes.as_slice()); - if !retain { - for staged_ledger_hash in staged_ledger_hashes { - self.staged_ledgers.remove(staged_ledger_hash); - } + self.by_merkle_root_hash.retain(|_, staged_ledger_hashes| { + staged_ledger_hashes.retain(|hash| { + if fun(hash) { + return true; } - retain + self.staged_ledgers.remove(hash); + false }); + !staged_ledger_hashes.is_empty() + }); } fn extend(&mut self, iterator: I) @@ -525,6 +522,23 @@ impl LedgerCtx { Ok(()) } + pub fn get_account_delegators( + &self, + ledger_hash: &LedgerHash, + account_id: &AccountId, + ) -> Option> { + let (mask, _) = self.mask(ledger_hash)?; + let mut accounts = Vec::new(); + + mask.iter(|account| { + if account.delegate == Some(account_id.public_key.clone()) { + accounts.push(account.clone()); + } + }); + + Some(accounts) + } + #[allow(clippy::type_complexity)] pub fn producers_with_delegates bool>( &self, @@ -857,7 +871,7 @@ impl LedgerCtx { pub fn commit( &mut self, - ledgers_to_keep: BTreeSet, + ledgers_to_keep: LedgersToKeep, root_snarked_ledger_updates: TransitionFrontierRootSnarkedLedgerUpdates, needed_protocol_states: BTreeMap, new_root: &ArcBlockWithHash, @@ -902,13 +916,13 @@ impl LedgerCtx { ); self.staged_ledgers - .retain(|hash, _| ledgers_to_keep.contains(hash)); + .retain(|hash| ledgers_to_keep.contains(hash)); self.staged_ledgers.extend( self.sync .staged_ledgers .take() .into_iter() - .filter(|(hash, _)| ledgers_to_keep.contains(&hash.non_snark.ledger_hash)), + .filter(|(hash, _)| ledgers_to_keep.contains(&**hash)), ); for ledger_hash in [ @@ -973,6 +987,8 @@ impl LedgerCtx { .unwrap_or_default(), ); + // self.check_alive_masks(); + CommitResult { alive_masks: ::ledger::mask::alive_len(), available_jobs, @@ -980,6 +996,36 @@ impl LedgerCtx { } } + #[allow(dead_code)] + fn check_alive_masks(&mut self) { + let mut alive: BTreeSet<_> = ::ledger::mask::alive_collect(); + let staged_ledgers = self + .staged_ledgers + .staged_ledgers + .iter() + .map(|(hash, ledger)| (&hash.non_snark.ledger_hash, ledger.ledger_ref())); + + let alive_ledgers = self + .snarked_ledgers + .iter() + .chain(staged_ledgers) + .map(|(hash, mask)| { + let uuid = mask.get_uuid(); + if !alive.remove(&uuid) { + bug_condition!("mask not found among alive masks! uuid: {uuid}, hash: {hash}"); + } + (uuid, hash) + }) + .collect::>(); + openmina_core::debug!(redux::Timestamp::global_now(); "alive_ledgers_after_commit: {alive_ledgers:#?}"); + + if !alive.is_empty() { + bug_condition!( + "masks alive which are no longer part of the ledger service: {alive:#?}" + ); + } + } + pub fn get_num_accounts( &mut self, ledger_hash: v2::LedgerHash, diff --git a/node/src/ledger/read/ledger_read_reducer.rs b/node/src/ledger/read/ledger_read_reducer.rs index f49b4583f6..43d164a2b6 100644 --- a/node/src/ledger/read/ledger_read_reducer.rs +++ b/node/src/ledger/read/ledger_read_reducer.rs @@ -199,6 +199,18 @@ impl LedgerReadState { account_query, }); } + (_, LedgerReadResponse::GetLedgerStatus(rpc_id, resp)) => { + dispatcher.push(RpcAction::LedgerStatusGetSuccess { + rpc_id, + response: resp.clone(), + }); + } + (_, LedgerReadResponse::GetAccountDelegators(rpc_id, resp)) => { + dispatcher.push(RpcAction::LedgerAccountDelegatorsGetSuccess { + rpc_id, + response: resp.clone(), + }); + } } } diff --git a/node/src/ledger/read/mod.rs b/node/src/ledger/read/mod.rs index 87937fa621..f892fbd7b8 100644 --- a/node/src/ledger/read/mod.rs +++ b/node/src/ledger/read/mod.rs @@ -34,6 +34,8 @@ pub enum LedgerReadKind { GetStagedLedgerAuxAndPendingCoinbases, ScanStateSummary, AccountsForRpc, + GetLedgerStatus, + GetAccountDelegators, } #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -49,6 +51,14 @@ pub enum LedgerReadRequest { // rpcs ScanStateSummary(v2::MinaBaseStagedLedgerHashStableV1), AccountsForRpc(RpcId, v2::LedgerHash, AccountQuery), + GetLedgerStatus(RpcId, v2::LedgerHash), + GetAccountDelegators(RpcId, v2::LedgerHash, AccountId), +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] +pub struct LedgerStatus { + pub num_accounts: u64, + pub best_tip_staged_ledger_hash: v2::LedgerHash, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -64,6 +74,8 @@ pub enum LedgerReadResponse { // rpcs ScanStateSummary(Result>, String>), AccountsForRpc(RpcId, Vec, AccountQuery), + GetLedgerStatus(RpcId, Option), + GetAccountDelegators(RpcId, Option>), } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -85,6 +97,8 @@ impl LedgerReadRequest { } Self::ScanStateSummary(..) => LedgerReadKind::ScanStateSummary, Self::AccountsForRpc(..) => LedgerReadKind::AccountsForRpc, + Self::GetLedgerStatus(..) => LedgerReadKind::GetLedgerStatus, + Self::GetAccountDelegators(..) => LedgerReadKind::GetAccountDelegators, } } @@ -103,6 +117,8 @@ impl LedgerReadRequest { Self::ScanStateSummary(..) => 100, // TODO(adonagy): not sure Self::AccountsForRpc(..) => 10, + Self::GetLedgerStatus(..) => 1, + Self::GetAccountDelegators(..) => 10, }; cost.max(1) } @@ -121,6 +137,8 @@ impl LedgerReadResponse { } Self::ScanStateSummary(..) => LedgerReadKind::ScanStateSummary, Self::AccountsForRpc(..) => LedgerReadKind::AccountsForRpc, + Self::GetLedgerStatus(..) => LedgerReadKind::GetLedgerStatus, + Self::GetAccountDelegators(..) => LedgerReadKind::GetAccountDelegators, } } } @@ -145,5 +163,13 @@ pub enum LedgerReadInitCallback { callback: Callback<(bool, P2pRpcId, PeerId)>, args: (bool, P2pRpcId, PeerId), }, + RpcLedgerStatusGetPending { + callback: Callback>, + args: RequestId, + }, + RpcLedgerAccountDelegatorsGetPending { + callback: Callback>, + args: RequestId, + }, None, } diff --git a/node/src/ledger/write/ledger_write_reducer.rs b/node/src/ledger/write/ledger_write_reducer.rs index b8019024f4..5584d36f79 100644 --- a/node/src/ledger/write/ledger_write_reducer.rs +++ b/node/src/ledger/write/ledger_write_reducer.rs @@ -1,3 +1,4 @@ +use openmina_core::bug_condition; use redux::Dispatcher; use crate::{ @@ -120,7 +121,9 @@ impl LedgerWriteState { && global_slot_since_genesis == expected_global_slot { match result { - Err(err) => todo!("handle staged ledger diff creation err: {err}"), + Err(err) => { + bug_condition!("StagedLedgerDiffCreate error: {err}"); + } Ok(output) => { dispatcher.push(BlockProducerAction::StagedLedgerDiffCreateSuccess { output, diff --git a/node/src/ledger/write/ledger_write_state.rs b/node/src/ledger/write/ledger_write_state.rs index af3298ba84..225ca67633 100644 --- a/node/src/ledger/write/ledger_write_state.rs +++ b/node/src/ledger/write/ledger_write_state.rs @@ -39,6 +39,10 @@ impl LedgerWriteState { }) .flatten() } + + pub fn is_busy(&self) -> bool { + self.pending_requests().peekable().peek().is_some() + } } impl Default for LedgerWriteState { diff --git a/node/src/ledger/write/mod.rs b/node/src/ledger/write/mod.rs index 749f57c090..192c817c95 100644 --- a/node/src/ledger/write/mod.rs +++ b/node/src/ledger/write/mod.rs @@ -54,7 +54,7 @@ pub enum LedgerWriteRequest { skip_verification: bool, }, Commit { - ledgers_to_keep: BTreeSet, + ledgers_to_keep: LedgersToKeep, root_snarked_ledger_updates: TransitionFrontierRootSnarkedLedgerUpdates, needed_protocol_states: BTreeMap, new_root: AppliedBlock, @@ -98,7 +98,15 @@ pub struct BlockApplyResultArchive { pub sender_receipt_chains_from_parent_ledger: Vec<(AccountId, v2::ReceiptChainHash)>, } -impl TryFrom<&BlockApplyResult> for v2::ArchiveTransitionFronntierDiff { +impl TryFrom for v2::ArchiveTransitionFrontierDiff { + type Error = String; + + fn try_from(value: BlockApplyResult) -> Result { + (&value).try_into() + } +} + +impl TryFrom<&BlockApplyResult> for v2::ArchiveTransitionFrontierDiff { type Error = String; fn try_from(value: &BlockApplyResult) -> Result { @@ -162,6 +170,108 @@ impl TryFrom<&BlockApplyResult> for v2::ArchiveTransitionFronntierDiff { } } +impl TryFrom<&BlockApplyResult> for v2::PrecomputedBlock { + type Error = String; + + fn try_from(value: &BlockApplyResult) -> Result { + let archive_transition_frontier_diff: v2::ArchiveTransitionFrontierDiff = + value.try_into()?; + + let res = Self { + scheduled_time: value + .block + .header() + .protocol_state + .body + .blockchain_state + .timestamp, + protocol_state: value.block.header().protocol_state.clone(), + protocol_state_proof: value + .block + .header() + .protocol_state_proof + .as_ref() + .clone() + .into(), + staged_ledger_diff: value.block.body().staged_ledger_diff.clone(), + delta_transition_chain_proof: value.block.header().delta_block_chain_proof.clone(), + protocol_version: value.block.header().current_protocol_version.clone(), + proposed_protocol_version: None, + accounts_accessed: archive_transition_frontier_diff.accounts_accessed(), + accounts_created: archive_transition_frontier_diff.accounts_created(), + tokens_used: archive_transition_frontier_diff.tokens_used(), + }; + + Ok(res) + } +} + +#[derive(Serialize, Deserialize, Debug, Ord, PartialOrd, Eq, PartialEq, Default, Clone)] +pub struct LedgersToKeep { + snarked: BTreeSet, + staged: BTreeSet>, +} + +impl LedgersToKeep { + pub fn new() -> Self { + Self::default() + } + + pub fn contains<'a, T>(&self, key: T) -> bool + where + T: 'a + Into>, + { + match key.into() { + LedgerToKeep::Snarked(hash) => self.snarked.contains(hash), + LedgerToKeep::Staged(hash) => self.staged.contains(hash), + } + } + + pub fn add_snarked(&mut self, hash: v2::LedgerHash) -> bool { + self.snarked.insert(hash) + } + + pub fn add_staged(&mut self, hash: Arc) -> bool { + self.staged.insert(hash) + } +} + +impl<'a> FromIterator<&'a ArcBlockWithHash> for LedgersToKeep { + fn from_iter>(iter: T) -> Self { + let mut res = Self::new(); + let best_tip = iter.into_iter().fold(None, |best_tip, block| { + res.add_snarked(block.snarked_ledger_hash().clone()); + res.add_staged(Arc::new(block.staged_ledger_hashes().clone())); + match best_tip { + None => Some(block), + Some(tip) if tip.height() < block.height() => Some(block), + old => old, + } + }); + + if let Some(best_tip) = best_tip { + res.add_snarked(best_tip.staking_epoch_ledger_hash().clone()); + res.add_snarked(best_tip.next_epoch_ledger_hash().clone()); + } + + res + } +} + +#[derive(derive_more::From)] +pub enum LedgerToKeep<'a> { + Snarked(&'a v2::LedgerHash), + Staged(&'a v2::MinaBaseStagedLedgerHashStableV1), +} + +impl TryFrom for v2::PrecomputedBlock { + type Error = String; + + fn try_from(value: BlockApplyResult) -> Result { + (&value).try_into() + } +} + #[derive(Serialize, Deserialize, Debug, Default, Clone)] pub struct CommitResult { pub alive_masks: usize, diff --git a/node/src/ledger_effectful/ledger_effectful_effects.rs b/node/src/ledger_effectful/ledger_effectful_effects.rs index ff3c20e831..7ccdca0358 100644 --- a/node/src/ledger_effectful/ledger_effectful_effects.rs +++ b/node/src/ledger_effectful/ledger_effectful_effects.rs @@ -43,6 +43,12 @@ pub fn ledger_effectful_effects( LedgerReadInitCallback::P2pChannelsResponsePending { callback, args } => { store.dispatch_callback(callback, args); } + LedgerReadInitCallback::RpcLedgerStatusGetPending { callback, args } => { + store.dispatch_callback(callback, args); + } + LedgerReadInitCallback::RpcLedgerAccountDelegatorsGetPending { callback, args } => { + store.dispatch_callback(callback, args); + } LedgerReadInitCallback::None => {} } } diff --git a/node/src/logger/logger_effects.rs b/node/src/logger/logger_effects.rs index 0ee13da989..51078446a1 100644 --- a/node/src/logger/logger_effects.rs +++ b/node/src/logger/logger_effects.rs @@ -9,6 +9,7 @@ use crate::p2p::connection::P2pConnectionAction; use crate::p2p::network::P2pNetworkAction; use crate::p2p::P2pAction; use crate::snark::SnarkAction; +use crate::transition_frontier::candidate::TransitionFrontierCandidateAction; use crate::{ Action, ActionWithMetaRef, BlockProducerAction, Service, Store, TransitionFrontierAction, }; @@ -119,6 +120,18 @@ pub fn logger_effects(store: &Store, action: ActionWithMetaRef<'_ Action::Snark(SnarkAction::WorkVerify(a)) => a.action_event(&context), Action::Snark(SnarkAction::UserCommandVerify(a)) => a.action_event(&context), Action::TransitionFrontier(a) => match a { + TransitionFrontierAction::Candidate( + TransitionFrontierCandidateAction::BlockReceived { block, chain_proof }, + ) => { + openmina_core::action_info!( + context, + kind = action.kind().to_string(), + summary = "candidate block received", + block_hash = block.hash().to_string(), + block_height = block.height(), + has_chain_proof = chain_proof.is_some(), + ); + } TransitionFrontierAction::Synced { .. } => { let tip = store.state().transition_frontier.best_tip().unwrap(); diff --git a/node/src/p2p/callbacks/p2p_callbacks_reducer.rs b/node/src/p2p/callbacks/p2p_callbacks_reducer.rs index 06ba7640c6..87f7e5412f 100644 --- a/node/src/p2p/callbacks/p2p_callbacks_reducer.rs +++ b/node/src/p2p/callbacks/p2p_callbacks_reducer.rs @@ -3,7 +3,11 @@ use mina_p2p_messages::{ gossip::GossipNetMessageV2, v2::{MinaLedgerSyncLedgerAnswerStableV2, StateHash}, }; -use openmina_core::{block::BlockWithHash, bug_condition, log, transaction::TransactionWithHash}; +use openmina_core::{ + block::{prevalidate::BlockPrevalidationError, BlockWithHash}, + bug_condition, log, + transaction::TransactionWithHash, +}; use p2p::{ channels::{ best_tip::P2pChannelsBestTipAction, @@ -18,7 +22,6 @@ use redux::{ActionMeta, ActionWithMeta, Dispatcher}; use crate::{ p2p_ready, snark_pool::candidate::SnarkPoolCandidateAction, - state::BlockPrevalidationError, transaction_pool::candidate::TransactionPoolCandidateAction, transition_frontier::candidate::{allow_block_too_late, TransitionFrontierCandidateAction}, transition_frontier::sync::{ diff --git a/node/src/recorder/recorder.rs b/node/src/recorder/recorder.rs index 081c88b011..b9895fb6a6 100644 --- a/node/src/recorder/recorder.rs +++ b/node/src/recorder/recorder.rs @@ -109,6 +109,8 @@ impl Recorder { let mut writer = BufWriter::new(file); let encoded = data.encode().unwrap(); + // RecordedActionWithMeta::decode(&encoded) + // .expect(&format!("failed to decode encoded message: {:?}", data)); writer .write_all(&(encoded.len() as u64).to_be_bytes()) .unwrap(); @@ -158,7 +160,7 @@ fn graceful_shutdown(only_i: Option) { let files_iter = files .iter_mut() .enumerate() - .filter(|(i, _)| only_i.map_or(true, |only_i| only_i == *i)) + .filter(|(i, _)| only_i.is_none_or(|only_i| only_i == *i)) .filter_map(|(i, v)| Some((i, v.take()?))); for (i, file) in files_iter { diff --git a/node/src/rpc/heartbeat.rs b/node/src/rpc/heartbeat.rs index 1be92a6ac4..768033ffce 100644 --- a/node/src/rpc/heartbeat.rs +++ b/node/src/rpc/heartbeat.rs @@ -97,8 +97,16 @@ pub struct NodeHeartbeat { pub status: NodeStatus, pub node_timestamp: Timestamp, pub peer_id: PeerId, - // binprot+base64 encoded block - pub last_produced_block: Option, + // binprot+base64 encoded block header + pub last_produced_block_info: Option, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ProducedBlockInfo { + pub height: u32, + pub global_slot: u32, + pub hash: String, + pub base64_encoded_header: String, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -283,7 +291,7 @@ pub(crate) mod tests { peer_id: "2bEgBrPTzL8wov2D4Kz34WVLCxR4uCarsBmHYXWKQA5wvBQzd9H" .parse() .unwrap(), - last_produced_block: None, + last_produced_block_info: None, } } } diff --git a/node/src/rpc/mod.rs b/node/src/rpc/mod.rs index 629874f092..dbac579702 100644 --- a/node/src/rpc/mod.rs +++ b/node/src/rpc/mod.rs @@ -5,17 +5,19 @@ use std::str::FromStr; use ark_ff::fields::arithmetic::InvalidBigInt; use ledger::scan_state::currency::{Amount, Balance, Fee, Nonce, Slot}; use ledger::scan_state::transaction_logic::signed_command::SignedCommandPayload; -use ledger::scan_state::transaction_logic::{self, signed_command, valid, Memo}; +use ledger::scan_state::transaction_logic::{signed_command, valid, Memo}; use ledger::transaction_pool::{diff, ValidCommandWithHash}; -use ledger::Account; +use ledger::{Account, AccountId}; use mina_p2p_messages::bigint::BigInt; use mina_p2p_messages::v2::{ - MinaBaseSignedCommandPayloadBodyStableV2, MinaBaseTransactionStatusStableV2, - MinaBaseUserCommandStableV2, MinaTransactionTransactionStableV2, + LedgerHash, MinaBaseSignedCommandPayloadBodyStableV2, MinaBaseSignedCommandStableV2, + MinaBaseTransactionStatusStableV2, MinaBaseUserCommandStableV2, + MinaBaseZkappCommandTStableV1WireStableV1, MinaTransactionTransactionStableV2, SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponse, StateHash, TransactionHash, + TransactionSnarkWorkTStableV2, }; -use openmina_core::block::AppliedBlock; -use openmina_core::consensus::ConsensusConstants; +use openmina_core::block::{AppliedBlock, ArcBlockWithHash}; +use openmina_core::consensus::{ConsensusConstants, ConsensusTime}; use openmina_node_account::AccountPublicKey; use p2p::bootstrap::P2pNetworkKadBootstrapStats; pub use rpc_state::*; @@ -29,7 +31,7 @@ pub use rpc_reducer::collect_rpc_peers_info; mod rpc_impls; mod heartbeat; -pub use heartbeat::{NodeHeartbeat, SignedNodeHeartbeat}; +pub use heartbeat::{NodeHeartbeat, ProducedBlockInfo, SignedNodeHeartbeat}; pub use openmina_core::requests::{RpcId, RpcIdType}; @@ -43,12 +45,13 @@ use serde::{Deserialize, Serialize}; use crate::external_snark_worker::{ ExternalSnarkWorkerError, ExternalSnarkWorkerWorkError, SnarkWorkSpecError, }; -use crate::ledger::read::{LedgerReadId, LedgerReadKind}; +use crate::ledger::read::{LedgerReadId, LedgerReadKind, LedgerStatus}; use crate::ledger::write::LedgerWriteKind; use crate::p2p::connection::incoming::P2pConnectionIncomingInitOpts; use crate::p2p::connection::outgoing::P2pConnectionOutgoingInitOpts; use crate::p2p::PeerId; -use crate::snark_pool::{JobCommitment, JobSummary}; +use crate::service::Queues; +use crate::snark_pool::{JobCommitment, JobState, JobSummary}; use crate::stats::actions::{ActionStatsForBlock, ActionStatsSnapshot}; use crate::stats::block_producer::{ BlockProductionAttempt, BlockProductionAttemptWonSlot, VrfEvaluatorStats, @@ -70,6 +73,8 @@ pub enum RpcRequest { ScanStateSummaryGet(RpcScanStateSummaryGetQuery), SnarkPoolGet, SnarkPoolJobGet { job_id: SnarkJobId }, + SnarkPoolCompletedJobsGet, + SnarkPoolPendingJobsGet, SnarkerConfig, SnarkerJobCommit { job_id: SnarkJobId }, SnarkerJobSpec { job_id: SnarkJobId }, @@ -85,6 +90,19 @@ pub enum RpcRequest { BestChain(MaxLength), ConsensusConstantsGet, TransactionStatusGet(MinaBaseUserCommandStableV2), + GetBlock(GetBlockQuery), + PooledUserCommands(PooledUserCommandsQuery), + PooledZkappCommands(PooledZkappsCommandsQuery), + GenesisBlockGet, + ConsensusTimeGet(ConsensusTimeQuery), + LedgerStatusGet(LedgerHash), + LedgerAccountDelegatorsGet(LedgerHash, AccountId), +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum ConsensusTimeQuery { + Now, + BestTip, } pub type MaxLength = u32; @@ -157,7 +175,7 @@ pub enum ActionStatsResponse { ForBlock(ActionStatsForBlock), } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, strum_macros::Display)] pub enum PeerConnectionStatus { Disconnecting, Disconnected, @@ -356,6 +374,8 @@ pub type RpcPeersGetResponse = Vec; pub type RpcP2pConnectionOutgoingResponse = Result<(), String>; pub type RpcScanStateSummaryGetResponse = Result; pub type RpcSnarkPoolGetResponse = Vec; +pub type RpcSnarkPoolCompletedJobsResponse = Vec; +pub type RpcSnarkPoolPendingJobsGetResponse = Vec; pub type RpcSnarkPoolJobGetResponse = Option; pub type RpcSnarkerConfigGetResponse = Option; pub type RpcTransactionPoolResponse = Vec; @@ -365,6 +385,12 @@ pub type RpcTransitionFrontierUserCommandsResponse = Vec; pub type RpcConsensusConstantsGetResponse = ConsensusConstants; pub type RpcTransactionStatusGetResponse = TransactionStatus; +pub type RpcPooledUserCommandsResponse = Vec; +pub type RpcPooledZkappCommandsResponse = Vec; +pub type RpcGenesisBlockResponse = Option; +pub type RpcConsensusTimeGetResponse = Option; +pub type RpcLedgerStatusGetResponse = Option; +pub type RpcLedgerAccountDelegatorsGetResponse = Option>; #[derive(Serialize, Deserialize, Debug, Clone, strum_macros::Display)] #[strum(serialize_all = "SCREAMING_SNAKE_CASE")] @@ -390,15 +416,16 @@ pub struct RpcTransactionInjectedPayment { pub nonce: Nonce, } -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum RpcTransactionInjectedCommand { - Payment(RpcTransactionInjectedPayment), - Delegation, - Zkapp(valid::UserCommand), -} +// TODO(adonagy): remove this, not needed anymore +// #[derive(Serialize, Deserialize, Debug, Clone)] +// pub enum RpcTransactionInjectedCommand { +// Payment(valid::UserCommand), +// Delegation(valid::UserCommand), +// Zkapp(valid::UserCommand), +// } -pub type RpcTransactionInjectSuccess = Vec; -pub type RpcTransactionInjectRejected = Vec<(RpcTransactionInjectedCommand, diff::Error)>; +pub type RpcTransactionInjectSuccess = Vec; +pub type RpcTransactionInjectRejected = Vec<(valid::UserCommand, diff::Error)>; /// Errors pub type RpcTransactionInjectFailure = Vec; @@ -410,36 +437,56 @@ pub enum RpcTransactionInjectResponse { Failure(RpcTransactionInjectFailure), } -impl From for RpcTransactionInjectedCommand { - fn from(value: ValidCommandWithHash) -> Self { - match value.data { - transaction_logic::valid::UserCommand::SignedCommand(signedcmd) => { - match signedcmd.payload.body { - transaction_logic::signed_command::Body::Payment(ref payment) => { - Self::Payment(RpcTransactionInjectedPayment { - amount: payment.amount, - fee: signedcmd.fee(), - // fee_token: signedcmd.fee_token(), - from: signedcmd.fee_payer_pk().clone().into(), - to: payment.receiver_pk.clone().into(), - hash: value.hash.to_string(), - is_delegation: false, - // memo: signedcmd.payload.common.memo.clone(), - memo: signedcmd.payload.common.memo.to_string(), - nonce: signedcmd.nonce(), - }) - } - transaction_logic::signed_command::Body::StakeDelegation(_) => { - todo!("inject stake delegation") - } - } - } - transaction_logic::valid::UserCommand::ZkAppCommand(_) => { - Self::Zkapp(value.data.clone()) - } - } - } -} +// impl From for RpcTransactionInjectedCommand { +// fn from(value: ValidCommandWithHash) -> Self { +// match value.data { +// transaction_logic::valid::UserCommand::SignedCommand(ref signedcmd) => { +// match signedcmd.payload.body { +// transaction_logic::signed_command::Body::Payment(_) => { +// Self::Payment(value.data.clone()) +// } +// transaction_logic::signed_command::Body::StakeDelegation(_) => { +// Self::Delegation(value.data.clone()) +// } +// } +// } +// transaction_logic::valid::UserCommand::ZkAppCommand(_) => { +// Self::Zkapp(value.data.clone()) +// } +// } +// } +// } + +// impl From for RpcTransactionInjectedCommand { +// fn from(value: ValidCommandWithHash) -> Self { +// match value.data { +// transaction_logic::valid::UserCommand::SignedCommand(signedcmd) => { +// match signedcmd.payload.body { +// transaction_logic::signed_command::Body::Payment(ref payment) => { +// Self::RpcPayment(RpcTransactionInjectedPayment { +// amount: payment.amount, +// fee: signedcmd.fee(), +// // fee_token: signedcmd.fee_token(), +// from: signedcmd.fee_payer_pk().clone().into(), +// to: payment.receiver_pk.clone().into(), +// hash: value.hash.to_string(), +// is_delegation: false, +// // memo: signedcmd.payload.common.memo.clone(), +// memo: signedcmd.payload.common.memo.to_string(), +// nonce: signedcmd.nonce(), +// }) +// } +// transaction_logic::signed_command::Body::StakeDelegation(_) => { +// todo!("inject stake delegation") +// } +// } +// } +// transaction_logic::valid::UserCommand::ZkAppCommand(_) => { +// Self::Zkapp(value.data.clone()) +// } +// } +// } +// } #[derive(Serialize, Debug, Clone)] pub struct AccountSlim { @@ -466,8 +513,21 @@ pub struct RpcNodeStatus { pub snark_pool: RpcNodeStatusSnarkPool, pub transaction_pool: RpcNodeStatusTransactionPool, pub current_block_production_attempt: Option, + pub previous_block_production_attempt: Option, pub peers: Vec, pub resources_status: RpcNodeStatusResources, + pub service_queues: Queues, + pub network_info: RpcNodeStatusNetworkInfo, + pub block_producer: Option, + pub coinbase_receiver: Option, +} + +#[derive(Serialize, Debug, Clone)] +pub struct RpcNodeStatusNetworkInfo { + pub bind_ip: String, + pub external_ip: Option, + pub client_port: Option, + pub libp2p_port: Option, } #[derive(Serialize, Debug, Clone)] @@ -607,6 +667,24 @@ pub type RpcReadinessCheckResponse = Result<(), String>; pub type RpcDiscoveryRoutingTableResponse = Option; pub type RpcDiscoveryBoostrapStatsResponse = Option; +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum GetBlockQuery { + Hash(StateHash), + Height(u32), +} + +pub type RpcGetBlockResponse = Option; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PooledCommandsQuery { + pub public_key: Option, + pub hashes: Option>, + pub ids: Option>, +} + +pub type PooledUserCommandsQuery = PooledCommandsQuery; +pub type PooledZkappsCommandsQuery = PooledCommandsQuery; + pub mod discovery { use p2p::{ libp2p_identity::DecodingError, ConnectionType, P2pNetworkKadBucket, P2pNetworkKadDist, diff --git a/node/src/rpc/rpc_actions.rs b/node/src/rpc/rpc_actions.rs index 2701c70202..29cf1402df 100644 --- a/node/src/rpc/rpc_actions.rs +++ b/node/src/rpc/rpc_actions.rs @@ -1,7 +1,7 @@ use ledger::transaction_pool::{diff, ValidCommandWithHash}; -use ledger::Account; -use mina_p2p_messages::v2::MinaBaseUserCommandStableV2; +use ledger::{Account, AccountId}; use mina_p2p_messages::v2::TokenIdKeyHash; +use mina_p2p_messages::v2::{LedgerHash, MinaBaseUserCommandStableV2}; use openmina_core::block::AppliedBlock; use openmina_core::snark::SnarkJobId; use openmina_core::ActionEvent; @@ -15,7 +15,9 @@ use crate::p2p::connection::outgoing::{P2pConnectionOutgoingError, P2pConnection use crate::p2p::connection::P2pConnectionResponse; use super::{ - ActionStatsQuery, RpcId, RpcScanStateSummaryGetQuery, RpcScanStateSummaryScanStateJob, + ActionStatsQuery, ConsensusTimeQuery, GetBlockQuery, PooledUserCommandsQuery, + PooledZkappsCommandsQuery, RpcId, RpcLedgerAccountDelegatorsGetResponse, + RpcLedgerStatusGetResponse, RpcScanStateSummaryGetQuery, RpcScanStateSummaryScanStateJob, SyncStatsQuery, }; @@ -115,7 +117,12 @@ pub enum RpcAction { job_id: SnarkWorkId, rpc_id: RpcId, }, - + SnarkPoolCompletedJobsGet { + rpc_id: RpcId, + }, + SnarkPoolPendingJobsGet { + rpc_id: RpcId, + }, SnarkerConfigGet { rpc_id: RpcId, }, @@ -206,6 +213,53 @@ pub enum RpcAction { tx: MinaBaseUserCommandStableV2, }, + BlockGet { + rpc_id: RpcId, + query: GetBlockQuery, + }, + ConsensusTimeGet { + rpc_id: RpcId, + query: ConsensusTimeQuery, + }, + LedgerStatusGetInit { + rpc_id: RpcId, + ledger_hash: LedgerHash, + }, + LedgerStatusGetPending { + rpc_id: RpcId, + }, + LedgerStatusGetSuccess { + rpc_id: RpcId, + response: RpcLedgerStatusGetResponse, + }, + #[action_event(level = info)] + LedgerAccountDelegatorsGetInit { + rpc_id: RpcId, + ledger_hash: LedgerHash, + account_id: AccountId, + }, + #[action_event(level = info)] + LedgerAccountDelegatorsGetPending { + rpc_id: RpcId, + }, + #[action_event(level = info)] + LedgerAccountDelegatorsGetSuccess { + rpc_id: RpcId, + response: RpcLedgerAccountDelegatorsGetResponse, + }, + + PooledUserCommands { + rpc_id: RpcId, + query: PooledUserCommandsQuery, + }, + PooledZkappCommands { + rpc_id: RpcId, + query: PooledZkappsCommandsQuery, + }, + GenesisBlock { + rpc_id: RpcId, + }, + Finish { rpc_id: RpcId, }, @@ -213,8 +267,9 @@ pub enum RpcAction { #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum AccountQuery { - SinglePublicKey(AccountPublicKey), All, + SinglePublicKey(AccountPublicKey), + MultipleIds(Vec), PubKeyWithTokenId(AccountPublicKey, TokenIdKeyHash), } @@ -289,6 +344,8 @@ impl redux::EnablingCondition for RpcAction { .is_some_and(|v| v.status.is_pending()), RpcAction::SnarkPoolAvailableJobsGet { .. } => true, RpcAction::SnarkPoolJobGet { .. } => true, + RpcAction::SnarkPoolCompletedJobsGet { .. } => true, + RpcAction::SnarkPoolPendingJobsGet { .. } => true, RpcAction::SnarkerConfigGet { .. } => true, RpcAction::SnarkerJobCommit { .. } => true, RpcAction::SnarkerJobSpec { .. } => true, @@ -301,6 +358,9 @@ impl redux::EnablingCondition for RpcAction { RpcAction::ConsensusConstantsGet { .. } => true, RpcAction::BestChain { .. } => state.transition_frontier.best_tip().is_some(), RpcAction::TransactionStatusGet { .. } => true, + RpcAction::PooledUserCommands { .. } => true, + RpcAction::PooledZkappCommands { .. } => true, + RpcAction::GenesisBlock { .. } => true, RpcAction::LedgerAccountsGetInit { .. } => { state.transition_frontier.best_tip().is_some() } @@ -337,6 +397,32 @@ impl redux::EnablingCondition for RpcAction { .get(rpc_id) .is_some_and(|v| v.status.is_pending()), RpcAction::TransitionFrontierUserCommandsGet { .. } => true, + RpcAction::BlockGet { .. } => true, + RpcAction::ConsensusTimeGet { .. } => true, + RpcAction::LedgerStatusGetInit { .. } => state.transition_frontier.best_tip().is_some(), + RpcAction::LedgerStatusGetPending { rpc_id } => state + .rpc + .requests + .get(rpc_id) + .is_some_and(|v| v.status.is_init()), + RpcAction::LedgerStatusGetSuccess { rpc_id, .. } => state + .rpc + .requests + .get(rpc_id) + .is_some_and(|v| v.status.is_pending()), + RpcAction::LedgerAccountDelegatorsGetInit { .. } => { + state.transition_frontier.best_tip().is_some() + } + RpcAction::LedgerAccountDelegatorsGetPending { rpc_id } => state + .rpc + .requests + .get(rpc_id) + .is_some_and(|v| v.status.is_init()), + RpcAction::LedgerAccountDelegatorsGetSuccess { rpc_id, .. } => state + .rpc + .requests + .get(rpc_id) + .is_some_and(|v| v.status.is_pending()), RpcAction::Finish { rpc_id } => state .rpc .requests diff --git a/node/src/rpc/rpc_reducer.rs b/node/src/rpc/rpc_reducer.rs index 707d0fe90a..71933810f8 100644 --- a/node/src/rpc/rpc_reducer.rs +++ b/node/src/rpc/rpc_reducer.rs @@ -1,8 +1,13 @@ +use ledger::scan_state::transaction_logic::valid; +use mina_p2p_messages::v2::{ + MinaBaseSignedCommandStableV2, MinaBaseZkappCommandTStableV1WireStableV1, NonZeroCurvePoint, + TransactionSnarkWorkTStableV2, +}; use openmina_core::{ block::AppliedBlock, bug_condition, requests::{RequestId, RpcId, RpcIdType}, - transaction::TransactionWithHash, + transaction::{TransactionPoolMessageSource, TransactionWithHash}, }; use p2p::{ connection::{incoming::P2pConnectionIncomingAction, outgoing::P2pConnectionOutgoingAction}, @@ -14,13 +19,15 @@ use redux::ActionWithMeta; use crate::{ ledger::read::{LedgerReadAction, LedgerReadInitCallback, LedgerReadRequest}, p2p_ready, + rpc::{GetBlockQuery, PooledCommandsQuery}, rpc_effectful::RpcEffectfulAction, TransactionPoolAction, }; use super::{ - PeerConnectionStatus, RpcAction, RpcPeerInfo, RpcRequest, RpcRequestExtraData, RpcRequestState, - RpcRequestStatus, RpcScanStateSummaryGetQuery, RpcSnarkerConfig, RpcState, + ConsensusTimeQuery, PeerConnectionStatus, RpcAction, RpcPeerInfo, RpcRequest, + RpcRequestExtraData, RpcRequestState, RpcRequestStatus, RpcScanStateSummaryGetQuery, + RpcSnarkerConfig, RpcState, }; impl RpcState { @@ -326,6 +333,34 @@ impl RpcState { job_id: job_id.clone(), }); } + RpcAction::SnarkPoolCompletedJobsGet { rpc_id } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + + let jobs = state + .snark_pool + .completed_snarks_iter() + .map(|s| TransactionSnarkWorkTStableV2::from(s.clone())) + .collect::>(); + + dispatcher.push(RpcEffectfulAction::SnarkPoolCompletedJobsGet { + rpc_id: *rpc_id, + jobs, + }); + } + RpcAction::SnarkPoolPendingJobsGet { rpc_id } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + + let jobs = state + .snark_pool + .available_jobs_iter() + .cloned() + .collect::>(); + + dispatcher.push(RpcEffectfulAction::SnarkPoolPendingJobsGet { + rpc_id: *rpc_id, + jobs, + }) + } RpcAction::SnarkerConfigGet { rpc_id } => { let (dispatcher, state) = state_context.into_dispatcher_and_state(); @@ -515,7 +550,7 @@ impl RpcState { dispatcher.push(RpcAction::TransactionInjectPending { rpc_id: *rpc_id }); dispatcher.push(TransactionPoolAction::StartVerify { commands: commands_with_hash, - from_rpc: Some(*rpc_id), + from_source: TransactionPoolMessageSource::rpc(*rpc_id), }); } RpcAction::TransactionInjectPending { rpc_id } => { @@ -531,7 +566,7 @@ impl RpcState { rpc.status = RpcRequestStatus::Success { time: meta.time() }; let dispatcher = state_context.into_dispatcher(); - let response = response.clone().into_iter().map(|cmd| cmd.into()).collect(); + let response = response.clone().into_iter().map(|cmd| cmd.data).collect(); dispatcher.push(RpcEffectfulAction::TransactionInjectSuccess { rpc_id: *rpc_id, response, @@ -547,7 +582,7 @@ impl RpcState { let response = response .clone() .into_iter() - .map(|(cmd, failure)| (cmd.into(), failure)) + .map(|(cmd, failure)| (cmd.data, failure)) .collect(); dispatcher.push(RpcEffectfulAction::TransactionInjectRejected { @@ -618,6 +653,26 @@ impl RpcState { tx: tx.clone(), }); } + RpcAction::BlockGet { rpc_id, query } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + + let find_fn = |block: &&AppliedBlock| match query { + GetBlockQuery::Hash(hash) => block.hash() == hash, + GetBlockQuery::Height(height) => block.height() == *height, + }; + + let block = state + .transition_frontier + .best_chain + .iter() + .find(find_fn) + .cloned(); + + dispatcher.push(RpcEffectfulAction::BlockGet { + rpc_id: *rpc_id, + block, + }); + } RpcAction::P2pConnectionIncomingAnswerReady { rpc_id, answer, @@ -631,6 +686,201 @@ impl RpcState { dispatcher .push(P2pConnectionIncomingAction::AnswerSendSuccess { peer_id: *peer_id }); } + RpcAction::PooledUserCommands { rpc_id, query } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + + let PooledCommandsQuery { + public_key, + hashes, + ids, + } = query; + + let all_transactions = state.transaction_pool.get_all_transactions(); + + let mut user_commands: Vec<_> = all_transactions + .into_iter() + .filter_map(|tx| match tx.data { + valid::UserCommand::SignedCommand(signed_command) => Some(( + tx.hash, + MinaBaseSignedCommandStableV2::from(*signed_command), + )), + valid::UserCommand::ZkAppCommand(_) => None, + }) + .collect(); + + if let Some(pk) = public_key { + let pk = NonZeroCurvePoint::from(pk.clone()); + user_commands.retain(|(_, tx)| tx.signer == pk) + } + + if let Some(hashes) = hashes { + user_commands.retain(|(hash, _)| hashes.contains(hash)) + } + + if let Some(ids) = ids { + user_commands.retain(|(_, tx)| ids.contains(tx)) + } + + dispatcher.push(RpcEffectfulAction::PooledUserCommands { + rpc_id: *rpc_id, + user_commands: user_commands.into_iter().map(|(_, tx)| tx).collect(), + }); + } + RpcAction::GenesisBlock { rpc_id } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + let genesis_block = state.genesis_block(); + dispatcher.push(RpcEffectfulAction::GenesisBlock { + rpc_id: *rpc_id, + genesis_block, + }); + } + RpcAction::PooledZkappCommands { rpc_id, query } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + + let PooledCommandsQuery { + public_key, + hashes, + ids, + } = query; + + let all_transactions = state.transaction_pool.get_all_transactions(); + + let mut zkapp_commands: Vec<_> = all_transactions + .into_iter() + .filter_map(|tx| match tx.data { + valid::UserCommand::SignedCommand(_) => None, + valid::UserCommand::ZkAppCommand(zkapp) => Some(( + tx.hash, + MinaBaseZkappCommandTStableV1WireStableV1::from(&zkapp.zkapp_command), + )), + }) + .collect(); + + if let Some(pk) = public_key { + let pk = NonZeroCurvePoint::from(pk.clone()); + zkapp_commands.retain(|(_, tx)| tx.fee_payer.body.public_key == pk); + } + + if let Some(hashes) = hashes { + zkapp_commands.retain(|(hash, _)| hashes.contains(hash)); + } + + if let Some(ids) = ids { + zkapp_commands.retain(|(_, tx)| ids.contains(tx)); + } + + dispatcher.push(RpcEffectfulAction::PooledZkappCommands { + rpc_id: *rpc_id, + zkapp_commands: zkapp_commands.into_iter().map(|(_, tx)| tx).collect(), + }); + } + RpcAction::ConsensusTimeGet { rpc_id, query } => { + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + let consensus_time = match query { + ConsensusTimeQuery::Now => state.consensus_time_now(), + ConsensusTimeQuery::BestTip => state.consensus_time_best_tip(), + }; + println!("consensus_time: {:?}", consensus_time); + dispatcher.push(RpcEffectfulAction::ConsensusTimeGet { + rpc_id: *rpc_id, + consensus_time, + }); + } + RpcAction::LedgerStatusGetInit { + rpc_id, + ledger_hash, + } => { + let rpc_state = RpcRequestState { + req: RpcRequest::LedgerStatusGet(ledger_hash.clone()), + status: RpcRequestStatus::Init { time: meta.time() }, + data: Default::default(), + }; + state.requests.insert(*rpc_id, rpc_state); + + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + let ledger_hash = if let Some(best_tip) = state.transition_frontier.best_tip() { + best_tip.merkle_root_hash() + } else { + return; + }; + + dispatcher.push(LedgerReadAction::Init { + request: LedgerReadRequest::GetLedgerStatus(*rpc_id, ledger_hash.clone()), + callback: LedgerReadInitCallback::RpcLedgerStatusGetPending { + callback: redux::callback!( + on_ledger_read_init_rpc_actions_get_init(rpc_id: RequestId) -> crate::Action{ + RpcAction::LedgerStatusGetPending { rpc_id } + } + ), + args: *rpc_id, + }, + }) + } + RpcAction::LedgerStatusGetPending { rpc_id } => { + let Some(rpc) = state.requests.get_mut(rpc_id) else { + return; + }; + rpc.status = RpcRequestStatus::Pending { time: meta.time() }; + } + RpcAction::LedgerStatusGetSuccess { rpc_id, response } => { + let Some(rpc) = state.requests.get_mut(rpc_id) else { + return; + }; + rpc.status = RpcRequestStatus::Success { time: meta.time() }; + + let dispatcher = state_context.into_dispatcher(); + dispatcher.push(RpcEffectfulAction::LedgerStatusGetSuccess { + rpc_id: *rpc_id, + response: response.clone(), + }); + } + RpcAction::LedgerAccountDelegatorsGetInit { + rpc_id, + ledger_hash, + account_id, + } => { + let rpc_state = RpcRequestState { + req: RpcRequest::LedgerAccountDelegatorsGet( + ledger_hash.clone(), + account_id.clone(), + ), + status: RpcRequestStatus::Init { time: meta.time() }, + data: Default::default(), + }; + state.requests.insert(*rpc_id, rpc_state); + + let dispatcher = state_context.into_dispatcher(); + + dispatcher.push(LedgerReadAction::Init { + request: LedgerReadRequest::GetAccountDelegators(*rpc_id, ledger_hash.clone(), account_id.clone()), + callback: LedgerReadInitCallback::RpcLedgerAccountDelegatorsGetPending { + callback: redux::callback!( + on_ledger_read_init_rpc_actions_get_init(rpc_id: RequestId) -> crate::Action{ + RpcAction::LedgerAccountDelegatorsGetPending { rpc_id } + } + ), + args: *rpc_id, + }, + }) + } + RpcAction::LedgerAccountDelegatorsGetPending { rpc_id } => { + let Some(rpc) = state.requests.get_mut(rpc_id) else { + return; + }; + rpc.status = RpcRequestStatus::Pending { time: meta.time() }; + } + RpcAction::LedgerAccountDelegatorsGetSuccess { rpc_id, response } => { + let Some(rpc) = state.requests.get_mut(rpc_id) else { + return; + }; + rpc.status = RpcRequestStatus::Success { time: meta.time() }; + + let dispatcher = state_context.into_dispatcher(); + dispatcher.push(RpcEffectfulAction::LedgerAccountDelegatorsGetSuccess { + rpc_id: *rpc_id, + response: response.clone(), + }); + } } } } diff --git a/node/src/rpc_effectful/rpc_effectful_action.rs b/node/src/rpc_effectful/rpc_effectful_action.rs index 1159ed2885..7bc2e07e35 100644 --- a/node/src/rpc_effectful/rpc_effectful_action.rs +++ b/node/src/rpc_effectful/rpc_effectful_action.rs @@ -3,9 +3,12 @@ use crate::{ p2p::connection::P2pConnectionResponse, rpc::{ discovery::RpcDiscoveryRoutingTable, AccountQuery, ActionStatsQuery, RpcBestChainResponse, - RpcPeerInfo, RpcScanStateSummaryScanStateJob, RpcSnarkerConfig, - RpcTransactionInjectFailure, RpcTransactionInjectRejected, RpcTransactionInjectSuccess, - SyncStatsQuery, + RpcConsensusTimeGetResponse, RpcGenesisBlockResponse, RpcGetBlockResponse, + RpcLedgerAccountDelegatorsGetResponse, RpcLedgerStatusGetResponse, RpcPeerInfo, + RpcPooledUserCommandsResponse, RpcPooledZkappCommandsResponse, + RpcScanStateSummaryScanStateJob, RpcSnarkPoolCompletedJobsResponse, + RpcSnarkPoolPendingJobsGetResponse, RpcSnarkerConfig, RpcTransactionInjectFailure, + RpcTransactionInjectRejected, RpcTransactionInjectSuccess, SyncStatsQuery, }, }; use ledger::{ @@ -79,6 +82,14 @@ pub enum RpcEffectfulAction { job_id: SnarkWorkId, rpc_id: RpcId, }, + SnarkPoolCompletedJobsGet { + rpc_id: RpcId, + jobs: RpcSnarkPoolCompletedJobsResponse, + }, + SnarkPoolPendingJobsGet { + rpc_id: RpcId, + jobs: RpcSnarkPoolPendingJobsGetResponse, + }, SnarkerConfigGet { rpc_id: RpcId, config: Option, @@ -147,6 +158,34 @@ pub enum RpcEffectfulAction { rpc_id: RpcId, tx: MinaBaseUserCommandStableV2, }, + BlockGet { + rpc_id: RpcId, + block: RpcGetBlockResponse, + }, + PooledUserCommands { + rpc_id: RpcId, + user_commands: RpcPooledUserCommandsResponse, + }, + PooledZkappCommands { + rpc_id: RpcId, + zkapp_commands: RpcPooledZkappCommandsResponse, + }, + GenesisBlock { + rpc_id: RpcId, + genesis_block: RpcGenesisBlockResponse, + }, + ConsensusTimeGet { + rpc_id: RpcId, + consensus_time: RpcConsensusTimeGetResponse, + }, + LedgerStatusGetSuccess { + rpc_id: RpcId, + response: RpcLedgerStatusGetResponse, + }, + LedgerAccountDelegatorsGetSuccess { + rpc_id: RpcId, + response: RpcLedgerAccountDelegatorsGetResponse, + }, } impl redux::EnablingCondition for RpcEffectfulAction { diff --git a/node/src/rpc_effectful/rpc_effectful_effects.rs b/node/src/rpc_effectful/rpc_effectful_effects.rs index 5b7643e8a3..1caeae2636 100644 --- a/node/src/rpc_effectful/rpc_effectful_effects.rs +++ b/node/src/rpc_effectful/rpc_effectful_effects.rs @@ -12,16 +12,16 @@ use crate::{ p2p_ready, rpc::{ AccountQuery, AccountSlim, ActionStatsQuery, ActionStatsResponse, CurrentMessageProgress, - MessagesStats, NodeHeartbeat, RootLedgerSyncProgress, RootStagedLedgerSyncProgress, - RpcAction, RpcBlockProducerStats, RpcMessageProgressResponse, RpcNodeStatus, - RpcNodeStatusLedger, RpcNodeStatusResources, RpcNodeStatusTransactionPool, - RpcNodeStatusTransitionFrontier, RpcNodeStatusTransitionFrontierBlockSummary, - RpcNodeStatusTransitionFrontierSync, RpcRequestExtraData, RpcScanStateSummary, - RpcScanStateSummaryBlock, RpcScanStateSummaryBlockTransaction, - RpcScanStateSummaryBlockTransactionKind, RpcScanStateSummaryScanStateJob, - RpcSnarkPoolJobFull, RpcSnarkPoolJobSnarkWork, RpcSnarkPoolJobSummary, - RpcSnarkerJobCommitResponse, RpcSnarkerJobSpecResponse, RpcTransactionInjectResponse, - TransactionStatus, + MessagesStats, NodeHeartbeat, ProducedBlockInfo, RootLedgerSyncProgress, + RootStagedLedgerSyncProgress, RpcAction, RpcBlockProducerStats, RpcMessageProgressResponse, + RpcNodeStatus, RpcNodeStatusLedger, RpcNodeStatusNetworkInfo, RpcNodeStatusResources, + RpcNodeStatusTransactionPool, RpcNodeStatusTransitionFrontier, + RpcNodeStatusTransitionFrontierBlockSummary, RpcNodeStatusTransitionFrontierSync, + RpcRequestExtraData, RpcScanStateSummary, RpcScanStateSummaryBlock, + RpcScanStateSummaryBlockTransaction, RpcScanStateSummaryBlockTransactionKind, + RpcScanStateSummaryScanStateJob, RpcSnarkPoolJobFull, RpcSnarkPoolJobSnarkWork, + RpcSnarkPoolJobSummary, RpcSnarkerJobCommitResponse, RpcSnarkerJobSpecResponse, + RpcTransactionInjectResponse, TransactionStatus, }, snark_pool::SnarkPoolAction, transition_frontier::sync::{ @@ -37,6 +37,7 @@ use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use mina_p2p_messages::{rpc_kernel::QueryHeader, v2}; use mina_signer::CompressedPubKey; use openmina_core::{block::ArcBlockWithHash, bug_condition}; +use openmina_node_account::AccountPublicKey; use p2p::channels::streaming_rpc::{ staged_ledger_parts::calc_total_pieces_to_transfer, P2pStreamingRpcReceiveProgress, }; @@ -70,10 +71,12 @@ pub fn rpc_effects(store: &mut Store, action: ActionWithMeta block, + let last_produced_block_info = match make_produced_block_info(last_produced_block) { + Ok(data) => data, Err(error) => { - bug_condition!("HeartbeatGet: Failed to encode block, returning None: {error}"); + bug_condition!( + "HeartbeatGet: Failed to encode block header, returning None: {error}" + ); None } }; @@ -82,7 +85,7 @@ pub fn rpc_effects(store: &mut Store, action: ActionWithMeta(store: &mut Store, action: ActionWithMeta { + respond_or_log!( + store + .service() + .respond_snark_pool_completed_jobs_get(rpc_id, jobs), + meta.time() + ); + } + RpcEffectfulAction::SnarkPoolPendingJobsGet { rpc_id, jobs } => { + respond_or_log!( + store + .service() + .respond_snark_pool_pending_jobs_get(rpc_id, jobs), + meta.time() + ); + } RpcEffectfulAction::SnarkerConfigGet { rpc_id, config } => { let _ = store.service().respond_snarker_config_get(rpc_id, config); } @@ -521,17 +540,17 @@ pub fn rpc_effects(store: &mut Store, action: ActionWithMeta RpcSnarkerJobSpecResponse::Ok( - mina_p2p_messages::v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponse(Some(( - mina_p2p_messages::v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponseA0 { - instances, - fee, - }, - public_key, - ))) - ), - Err(err) => RpcSnarkerJobSpecResponse::Err(err), - }; + Ok(instances) => RpcSnarkerJobSpecResponse::Ok( + mina_p2p_messages::v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponse(Some(( + mina_p2p_messages::v2::SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponseA0 { + instances, + fee, + }, + public_key, + ))) + ), + Err(err) => RpcSnarkerJobSpecResponse::Err(err), + }; // TODO: handle potential errors let _ = store.service().respond_snarker_job_spec(rpc_id, input); @@ -620,9 +639,9 @@ pub fn rpc_effects(store: &mut Store, action: ActionWithMeta { + // Is this todo still relevant? // TODO(adonagy): maybe something more effective? match account_query { - AccountQuery::SinglePublicKey(_pk) => todo!(), // all the accounts for the FE in Slim form AccountQuery::All => { let mut accounts: BTreeMap = accounts @@ -664,7 +683,13 @@ pub fn rpc_effects(store: &mut Store, action: ActionWithMeta { + AccountQuery::SinglePublicKey(..) | AccountQuery::PubKeyWithTokenId(..) => { + respond_or_log!( + store.service().respond_ledger_accounts(rpc_id, accounts), + meta.time() + ) + } + AccountQuery::MultipleIds(..) => { respond_or_log!( store.service().respond_ledger_accounts(rpc_id, accounts), meta.time() @@ -769,6 +794,71 @@ pub fn rpc_effects(store: &mut Store, action: ActionWithMeta { + respond_or_log!( + store.service().respond_block_get(rpc_id, block), + meta.time() + ) + } + + RpcEffectfulAction::PooledUserCommands { + rpc_id, + user_commands, + } => { + respond_or_log!( + store + .service() + .respond_pooled_user_commands(rpc_id, user_commands), + meta.time() + ) + } + + RpcEffectfulAction::PooledZkappCommands { + rpc_id, + zkapp_commands, + } => { + respond_or_log!( + store + .service() + .respond_pooled_zkapp_commands(rpc_id, zkapp_commands), + meta.time() + ) + } + RpcEffectfulAction::GenesisBlock { + rpc_id, + genesis_block, + } => { + respond_or_log!( + store.service().respond_genesis_block(rpc_id, genesis_block), + meta.time() + ) + } + + RpcEffectfulAction::ConsensusTimeGet { + rpc_id, + consensus_time, + } => { + respond_or_log!( + store + .service() + .respond_consensus_time_get(rpc_id, consensus_time), + meta.time() + ) + } + RpcEffectfulAction::LedgerStatusGetSuccess { rpc_id, response } => { + respond_or_log!( + store.service().respond_ledger_status_get(rpc_id, response), + meta.time() + ) + } + RpcEffectfulAction::LedgerAccountDelegatorsGetSuccess { rpc_id, response } => { + respond_or_log!( + store + .service() + .respond_ledger_account_delegators_get(rpc_id, response), + meta.time() + ) + } } } @@ -780,12 +870,45 @@ fn compute_node_status(store: &mut Store) -> RpcNodeStatus { height: b.height(), global_slot: b.global_slot(), }; - let current_block_production_attempt = store + + let block_production_attempts = store .service .stats() - .and_then(|stats| Some(stats.block_producer().collect_attempts().last()?.clone())); + .map_or_else(Vec::new, |stats| stats.block_producer().collect_attempts()); + + let current_block_production_attempt = block_production_attempts.last().cloned(); + + let previous_block_production_attempt = block_production_attempts + .len() + .checked_sub(2) + .and_then(|idx| block_production_attempts.get(idx)) + .cloned(); + + let network_info = RpcNodeStatusNetworkInfo { + bind_ip: "0.0.0.0".to_string(), + external_ip: state + .p2p + .config() + .external_addrs + .first() + .map(|addr| addr.to_string()), + client_port: state.config.client_port, + libp2p_port: state.p2p.config().libp2p_port, + }; + + let block_producer = state + .block_producer + .config() + .map(|config| AccountPublicKey::from(config.pub_key.clone())); + let coinbase_receiver = state + .block_producer + .config() + .map(|config| AccountPublicKey::from(config.coinbase_receiver().clone())); + let status = RpcNodeStatus { chain_id, + block_producer, + coinbase_receiver, transition_frontier: RpcNodeStatusTransitionFrontier { best_tip: state.transition_frontier.best_tip().map(block_summary), sync: RpcNodeStatusTransitionFrontierSync { @@ -827,6 +950,7 @@ fn compute_node_status(store: &mut Store) -> RpcNodeStatus { transaction_candidates: state.transaction_pool.candidates.transactions_count(), }, current_block_production_attempt, + previous_block_production_attempt, resources_status: RpcNodeStatusResources { p2p_malloc_size: { let mut set = BTreeSet::new(); @@ -837,20 +961,32 @@ fn compute_node_status(store: &mut Store) -> RpcNodeStatus { transition_frontier: state.transition_frontier.resources_usage(), snark_pool: state.snark_pool.resources_usage(), }, + service_queues: store.service.queues(), + network_info, }; status } -fn base64_encode_block(block: Option) -> std::io::Result> { +fn make_produced_block_info( + block: Option, +) -> std::io::Result> { use base64::{engine::general_purpose::URL_SAFE, Engine as _}; use mina_p2p_messages::binprot::BinProtWrite; let Some(block) = block else { return Ok(None) }; - let mut buf = Vec::with_capacity(10 * 1024 * 1024); - v2::MinaBlockBlockStableV2::binprot_write(&block.block, &mut buf)?; + let height = block.height(); + let global_slot = block.global_slot(); + let hash = block.hash().to_string(); + let mut buf = Vec::with_capacity(5 * 1024 * 1024); + v2::MinaBlockHeaderStableV2::binprot_write(block.header(), &mut buf)?; - let base64_encoded = URL_SAFE.encode(&buf); + let base64_encoded_header = URL_SAFE.encode(&buf); - Ok(Some(base64_encoded)) + Ok(Some(ProducedBlockInfo { + height, + global_slot, + hash, + base64_encoded_header, + })) } diff --git a/node/src/rpc_effectful/rpc_service.rs b/node/src/rpc_effectful/rpc_service.rs index 8400db0575..1575a9ba8b 100644 --- a/node/src/rpc_effectful/rpc_service.rs +++ b/node/src/rpc_effectful/rpc_service.rs @@ -2,14 +2,18 @@ use crate::{ p2p::connection::P2pConnectionResponse, rpc::{ RpcActionStatsGetResponse, RpcBestChainResponse, RpcBlockProducerStatsGetResponse, - RpcDiscoveryBoostrapStatsResponse, RpcDiscoveryRoutingTableResponse, - RpcHealthCheckResponse, RpcHeartbeatGetResponse, RpcId, RpcLedgerAccountsResponse, - RpcLedgerSlimAccountsResponse, RpcMessageProgressResponse, - RpcP2pConnectionOutgoingResponse, RpcPeersGetResponse, RpcReadinessCheckResponse, - RpcScanStateSummaryGetResponse, RpcSnarkPoolGetResponse, RpcSnarkPoolJobGetResponse, - RpcSnarkerConfigGetResponse, RpcSnarkerJobCommitResponse, RpcSnarkerJobSpecResponse, - RpcSnarkerWorkersResponse, RpcStatusGetResponse, RpcSyncStatsGetResponse, - RpcTransactionInjectResponse, RpcTransactionPoolResponse, RpcTransactionStatusGetResponse, + RpcConsensusTimeGetResponse, RpcDiscoveryBoostrapStatsResponse, + RpcDiscoveryRoutingTableResponse, RpcGenesisBlockResponse, RpcGetBlockResponse, + RpcHealthCheckResponse, RpcHeartbeatGetResponse, RpcId, + RpcLedgerAccountDelegatorsGetResponse, RpcLedgerAccountsResponse, + RpcLedgerSlimAccountsResponse, RpcLedgerStatusGetResponse, RpcMessageProgressResponse, + RpcP2pConnectionOutgoingResponse, RpcPeersGetResponse, RpcPooledUserCommandsResponse, + RpcPooledZkappCommandsResponse, RpcReadinessCheckResponse, RpcScanStateSummaryGetResponse, + RpcSnarkPoolCompletedJobsResponse, RpcSnarkPoolGetResponse, RpcSnarkPoolJobGetResponse, + RpcSnarkPoolPendingJobsGetResponse, RpcSnarkerConfigGetResponse, + RpcSnarkerJobCommitResponse, RpcSnarkerJobSpecResponse, RpcSnarkerWorkersResponse, + RpcStatusGetResponse, RpcSyncStatsGetResponse, RpcTransactionInjectResponse, + RpcTransactionPoolResponse, RpcTransactionStatusGetResponse, RpcTransitionFrontierUserCommandsResponse, }, State, @@ -113,6 +117,16 @@ pub trait RpcService { rpc_id: RpcId, response: RpcSnarkPoolJobGetResponse, ) -> Result<(), RespondError>; + fn respond_snark_pool_completed_jobs_get( + &mut self, + rpc_id: RpcId, + response: RpcSnarkPoolCompletedJobsResponse, + ) -> Result<(), RespondError>; + fn respond_snark_pool_pending_jobs_get( + &mut self, + rpc_id: RpcId, + response: RpcSnarkPoolPendingJobsGetResponse, + ) -> Result<(), RespondError>; fn respond_snarker_config_get( &mut self, rpc_id: RpcId, @@ -193,4 +207,39 @@ pub trait RpcService { rpc_id: RpcId, response: RpcTransactionStatusGetResponse, ) -> Result<(), RespondError>; + fn respond_block_get( + &mut self, + rpc_id: RpcId, + response: RpcGetBlockResponse, + ) -> Result<(), RespondError>; + fn respond_pooled_user_commands( + &mut self, + rpc_id: RpcId, + response: RpcPooledUserCommandsResponse, + ) -> Result<(), RespondError>; + fn respond_pooled_zkapp_commands( + &mut self, + rpc_id: RpcId, + response: RpcPooledZkappCommandsResponse, + ) -> Result<(), RespondError>; + fn respond_genesis_block( + &mut self, + rpc_id: RpcId, + response: RpcGenesisBlockResponse, + ) -> Result<(), RespondError>; + fn respond_consensus_time_get( + &mut self, + rpc_id: RpcId, + response: RpcConsensusTimeGetResponse, + ) -> Result<(), RespondError>; + fn respond_ledger_status_get( + &mut self, + rpc_id: RpcId, + response: RpcLedgerStatusGetResponse, + ) -> Result<(), RespondError>; + fn respond_ledger_account_delegators_get( + &mut self, + rpc_id: RpcId, + response: RpcLedgerAccountDelegatorsGetResponse, + ) -> Result<(), RespondError>; } diff --git a/node/src/service.rs b/node/src/service.rs index 9ad4c45671..b09f561023 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -34,7 +34,21 @@ pub trait Service: + RpcService + ArchiveService { + fn queues(&mut self) -> Queues; fn stats(&mut self) -> Option<&mut Stats>; fn recorder(&mut self) -> &mut Recorder; fn is_replay(&self) -> bool; } + +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] +pub struct Queues { + pub events: usize, + pub snark_block_verify: usize, + pub ledger: usize, + pub vrf_evaluator: Option, + pub block_prover: Option, + pub p2p_webrtc: usize, + #[cfg(feature = "p2p-libp2p")] + pub p2p_libp2p: usize, + pub rpc: usize, +} diff --git a/node/src/snark_pool/candidate/snark_pool_candidate_actions.rs b/node/src/snark_pool/candidate/snark_pool_candidate_actions.rs index f35ce599a1..a9aa1d35c6 100644 --- a/node/src/snark_pool/candidate/snark_pool_candidate_actions.rs +++ b/node/src/snark_pool/candidate/snark_pool_candidate_actions.rs @@ -49,6 +49,7 @@ pub enum SnarkPoolCandidateAction { WorkVerifyError { peer_id: PeerId, verify_id: SnarkWorkVerifyId, + batch: Vec, }, WorkVerifySuccess { peer_id: PeerId, @@ -69,7 +70,7 @@ impl redux::EnablingCondition for SnarkPoolCandidateAction { .snark_pool .candidates .get(*peer_id, &info.job_id) - .map_or(true, |v| info > v) + .is_none_or(|v| info > v) } SnarkPoolCandidateAction::WorkFetchAll => state.p2p.ready().is_some(), SnarkPoolCandidateAction::WorkFetchInit { peer_id, job_id } => { @@ -103,7 +104,7 @@ impl redux::EnablingCondition for SnarkPoolCandidateAction { .snark_pool .candidates .get(*peer_id, &job_id) - .map_or(true, |v| match work.partial_cmp(v).unwrap() { + .is_none_or(|v| match work.partial_cmp(v).unwrap() { Ordering::Less => false, Ordering::Greater => true, Ordering::Equal => { @@ -111,7 +112,10 @@ impl redux::EnablingCondition for SnarkPoolCandidateAction { } }) } - SnarkPoolCandidateAction::WorkVerifyNext => state.snark.work_verify.jobs.is_empty(), + SnarkPoolCandidateAction::WorkVerifyNext => { + state.snark.work_verify.jobs.is_empty() + && state.transition_frontier.sync.is_synced() + } SnarkPoolCandidateAction::WorkVerifyPending { peer_id, job_ids, .. } => { diff --git a/node/src/snark_pool/candidate/snark_pool_candidate_reducer.rs b/node/src/snark_pool/candidate/snark_pool_candidate_reducer.rs index c11769c5fd..5ee9e3aa74 100644 --- a/node/src/snark_pool/candidate/snark_pool_candidate_reducer.rs +++ b/node/src/snark_pool/candidate/snark_pool_candidate_reducer.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use crate::{p2p_ready, SnarkPoolAction}; -use openmina_core::snark::Snark; +use openmina_core::snark::{Snark, SnarkJobId}; use p2p::{ channels::rpc::{P2pChannelsRpcAction, P2pRpcId, P2pRpcRequest}, disconnection::{P2pDisconnectionAction, P2pDisconnectionReason}, - PeerId, + BroadcastMessageId, P2pNetworkPubsubAction, PeerId, }; use snark::{work_verify::SnarkWorkVerifyAction, work_verify_effectful::SnarkWorkVerifyId}; @@ -124,10 +124,11 @@ impl SnarkPoolCandidatesState { } }), on_error: redux::callback!( - on_snark_pool_candidate_work_verify_error((req_id: SnarkWorkVerifyId, sender: String)) -> crate::Action { + on_snark_pool_candidate_work_verify_error((req_id: SnarkWorkVerifyId, sender: String, batch: Vec)) -> crate::Action { SnarkPoolCandidateAction::WorkVerifyError { peer_id: sender.parse().unwrap(), verify_id: req_id, + batch } }), }); @@ -144,7 +145,11 @@ impl SnarkPoolCandidatesState { } => { state.verify_pending(meta.time(), peer_id, *verify_id, job_ids); } - SnarkPoolCandidateAction::WorkVerifyError { peer_id, verify_id } => { + SnarkPoolCandidateAction::WorkVerifyError { + peer_id, + verify_id, + batch, + } => { state.verify_result(meta.time(), peer_id, *verify_id, Err(())); // TODO(binier): blacklist peer @@ -154,6 +159,22 @@ impl SnarkPoolCandidatesState { peer_id, reason: P2pDisconnectionReason::SnarkPoolVerifyError, }); + + // TODO: This is not correct. We are rejecting all snark messages, but the fact that the batch + // failed to verify means that there is at least one invalid snark in the batch, not that all of them + // are invalid. + // Instead, what should happen here is that we split the batch in two and try to verify the two batches + // again. Repeating until batches don't fail to verify anymore, or each batch is of size 1. + // It may also be worth capping the batch sizes to 10. + for snark_job_id in batch { + dispatcher.push(P2pNetworkPubsubAction::RejectMessage { + message_id: Some(BroadcastMessageId::Snark { + job_id: snark_job_id.clone(), + }), + peer_id: None, + reason: "Snark work verification failed".to_string(), + }); + } } SnarkPoolCandidateAction::WorkVerifySuccess { peer_id, diff --git a/node/src/snark_pool/snark_pool_reducer.rs b/node/src/snark_pool/snark_pool_reducer.rs index ad0cd19057..f66207b971 100644 --- a/node/src/snark_pool/snark_pool_reducer.rs +++ b/node/src/snark_pool/snark_pool_reducer.rs @@ -1,12 +1,11 @@ use std::collections::BTreeMap; +use crate::{snark_pool::JobCommitment, ExternalSnarkWorkerAction, SnarkerStrategy}; use openmina_core::snark::{SnarkJobCommitment, SnarkJobId}; use p2p::channels::{ snark::P2pChannelsSnarkAction, snark_job_commitment::P2pChannelsSnarkJobCommitmentAction, }; -use crate::{snark_pool::JobCommitment, ExternalSnarkWorkerAction, SnarkerStrategy}; - use super::{ JobState, SnarkPoolAction, SnarkPoolActionWithMetaRef, SnarkPoolEffectfulAction, SnarkPoolState, SnarkWork, @@ -60,7 +59,7 @@ impl SnarkPoolState { let take = state .get(&id) .and_then(|job| job.snark.as_ref()) - .map_or(true, |old_snark| snark.work > old_snark.work); + .is_none_or(|old_snark| snark.work > old_snark.work); if take { state.set_snark_work(snark.clone()); } @@ -202,8 +201,6 @@ impl SnarkPoolState { } } - // TODO: libp2p logic already broadcasts everything right now and doesn't - // wait for validation, thad needs to be fixed. See #952 dispatcher.push(P2pChannelsSnarkAction::Libp2pBroadcast { snark: snark.clone(), nonce: 0, diff --git a/node/src/snark_pool/snark_pool_state.rs b/node/src/snark_pool/snark_pool_state.rs index a5c6de0d53..707ff96ddd 100644 --- a/node/src/snark_pool/snark_pool_state.rs +++ b/node/src/snark_pool/snark_pool_state.rs @@ -226,7 +226,7 @@ fn is_job_commitment_timed_out(job: &JobState, time_now: Timestamp) -> bool { let didnt_deliver = job .snark .as_ref() - .map_or(true, |snark| snark.work < commitment.commitment); + .is_none_or(|snark| snark.work < commitment.commitment); is_timed_out && didnt_deliver } diff --git a/node/src/state.rs b/node/src/state.rs index 4c829a4d9d..867540c526 100644 --- a/node/src/state.rs +++ b/node/src/state.rs @@ -3,7 +3,8 @@ use std::time::Duration; use malloc_size_of_derive::MallocSizeOf; use mina_p2p_messages::v2; -use openmina_core::constants::PROTOCOL_VERSION; +use openmina_core::block::prevalidate::{prevalidate_block, BlockPrevalidationError}; +use openmina_core::consensus::ConsensusTime; use openmina_core::transaction::{TransactionInfo, TransactionWithHash}; use p2p::P2pNetworkPubsubMessageCacheId; use rand::prelude::*; @@ -57,7 +58,7 @@ pub use crate::transition_frontier::TransitionFrontierState; pub use crate::watched_accounts::WatchedAccountsState; pub use crate::Config; use crate::{config::GlobalConfig, SnarkPoolAction}; -use crate::{ActionWithMeta, RpcAction, TransactionPoolAction}; +use crate::{ActionWithMeta, RpcAction}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct State { @@ -80,25 +81,6 @@ pub struct State { applied_actions_count: u64, } -#[derive(Serialize, Deserialize, Debug, Clone)] -pub enum BlockPrevalidationError { - GenesisNotReady, - ReceivedTooEarly { - current_global_slot: u32, - block_global_slot: u32, - }, - ReceivedTooLate { - current_global_slot: u32, - block_global_slot: u32, - delta: u32, - }, - InvalidGenesisProtocolState, - InvalidProtocolVersion, - MismatchedProtocolVersion, - ConsantsMismatch, - InvalidDeltaBlockChainProof, -} - // Substate accessors that will be used in reducers use openmina_core::{bug_condition, impl_substate_access, SubstateAccess}; @@ -376,6 +358,24 @@ impl State { ) } + pub fn slot_time(&self, global_slot: u64) -> Option<(Timestamp, Timestamp)> { + let genesis_timestamp = self.genesis_block()?.genesis_timestamp(); + println!("genesis_timestamp: {}", u64::from(genesis_timestamp)); + + let start_time = genesis_timestamp.checked_add( + global_slot + .checked_mul(constraint_constants().block_window_duration_ms)? + .checked_mul(1_000_000)?, + )?; + let end_time = start_time.checked_add( + constraint_constants() + .block_window_duration_ms + .checked_mul(1_000_000)?, + )?; + + Some((start_time, end_time)) + } + pub fn producing_block_after_genesis(&self) -> bool { #[allow(clippy::arithmetic_side_effects)] let two_mins_in_future = self.time() + Duration::from_secs(2 * 60); @@ -408,75 +408,44 @@ impl State { return Err(BlockPrevalidationError::GenesisNotReady); }; - // received_at_valid_time - // https://github.com/minaprotocol/mina/blob/6af211ad58e9356f00ea4a636cea70aa8267c072/src/lib/consensus/proof_of_stake.ml#L2746 - { - let block_global_slot = block.global_slot(); - - let delta = genesis.constants().delta.as_u32(); - if cur_global_slot < block_global_slot { - // Too_early - return Err(BlockPrevalidationError::ReceivedTooEarly { - current_global_slot: cur_global_slot, - block_global_slot, - }); - } else if !allow_block_too_late - && cur_global_slot.saturating_sub(block_global_slot) > delta - { - // Too_late - return Err(BlockPrevalidationError::ReceivedTooLate { - current_global_slot: cur_global_slot, - block_global_slot, - delta, - }); - } - } - - if block.header().genesis_state_hash() != genesis.hash() { - return Err(BlockPrevalidationError::InvalidGenesisProtocolState); - } - - let (protocol_versions_are_valid, protocol_version_matches_daemon) = { - let min_transaction_version = 1.into(); - let v = &block.header().current_protocol_version; - let nv = block - .header() - .proposed_protocol_version_opt - .as_ref() - .unwrap_or(v); - - // Our version values are unsigned, so there is no need to check that the - // other parts are not negative. - let valid = v.transaction >= min_transaction_version - && nv.transaction >= min_transaction_version; - let compatible = v.transaction == PROTOCOL_VERSION.transaction - && v.network == PROTOCOL_VERSION.network; - - (valid, compatible) - }; - - if !protocol_versions_are_valid { - return Err(BlockPrevalidationError::InvalidProtocolVersion); - } else if !protocol_version_matches_daemon { - return Err(BlockPrevalidationError::MismatchedProtocolVersion); - } - - // NOTE: currently these cannot change between blocks, but that - // may not always be true? - if block.constants() != genesis.constants() { - return Err(BlockPrevalidationError::ConsantsMismatch); - } - - // TODO(tizoc): check for InvalidDeltaBlockChainProof - // https://github.com/MinaProtocol/mina/blob/d800da86a764d8d37ffb8964dd8d54d9f522b358/src/lib/mina_block/validation.ml#L369 - // https://github.com/MinaProtocol/mina/blob/d800da86a764d8d37ffb8964dd8d54d9f522b358/src/lib/transition_chain_verifier/transition_chain_verifier.ml - - Ok(()) + prevalidate_block(block, &genesis, cur_global_slot, allow_block_too_late) } pub fn should_log_node_id(&self) -> bool { self.config.testing_run } + + pub fn consensus_time_now(&self) -> Option { + let (start_time, end_time) = self.slot_time(self.cur_global_slot()?.into())?; + let epoch = self.current_epoch()?; + let global_slot = self.cur_global_slot()?; + let slot = self.current_slot()?; + Some(ConsensusTime { + start_time, + end_time, + epoch, + global_slot, + slot, + }) + } + + pub fn consensus_time_best_tip(&self) -> Option { + let best_tip = self.transition_frontier.best_tip()?; + let global_slot = best_tip + .curr_global_slot_since_hard_fork() + .slot_number + .as_u32(); + let (start_time, end_time) = self.slot_time(global_slot.into())?; + let epoch = best_tip.consensus_state().epoch_count.as_u32(); + let slot = best_tip.slot(); + Some(ConsensusTime { + start_time, + end_time, + epoch, + global_slot, + slot, + }) + } } #[serde_with::serde_as] @@ -538,11 +507,12 @@ impl P2p { } } )), - on_p2p_channels_transaction_libp2p_received: Some(redux::callback!( - on_p2p_channels_transaction_libp2p_received(transaction: Box) -> crate::Action { - TransactionPoolAction::StartVerify { - commands: std::iter::once(*transaction).collect(), - from_rpc: None + on_p2p_channels_transactions_libp2p_received: Some(redux::callback!( + on_p2p_channels_transactions_libp2p_received((peer_id: PeerId, transactions: Vec, message_id: P2pNetworkPubsubMessageCacheId)) -> crate::Action { + TransactionPoolCandidateAction::Libp2pTransactionsReceived { + message_id, + transactions, + peer_id } } )), diff --git a/node/src/stats/stats_sync.rs b/node/src/stats/stats_sync.rs index 87132ed02c..99143e571c 100644 --- a/node/src/stats/stats_sync.rs +++ b/node/src/stats/stats_sync.rs @@ -178,9 +178,11 @@ impl SyncStats { best_tip: &ArcBlockWithHash, root_block: &ArcBlockWithHash, ) -> &mut Self { - let kind = match self.snapshots.back().map_or(true, |s| { - matches!(s.kind, SyncKind::Bootstrap) && s.synced.is_none() - }) { + let kind = match self + .snapshots + .back() + .is_none_or(|s| matches!(s.kind, SyncKind::Bootstrap) && s.synced.is_none()) + { true => SyncKind::Bootstrap, false => SyncKind::Catchup, }; diff --git a/node/src/transaction_pool/candidate/transaction_pool_candidate_actions.rs b/node/src/transaction_pool/candidate/transaction_pool_candidate_actions.rs index 42d5e4ec2d..3fefaa90fa 100644 --- a/node/src/transaction_pool/candidate/transaction_pool_candidate_actions.rs +++ b/node/src/transaction_pool/candidate/transaction_pool_candidate_actions.rs @@ -1,5 +1,8 @@ -use openmina_core::transaction::{TransactionHash, TransactionInfo, TransactionWithHash}; +use openmina_core::transaction::{ + TransactionHash, TransactionInfo, TransactionPoolMessageSource, TransactionWithHash, +}; use openmina_core::ActionEvent; +use p2p::P2pNetworkPubsubMessageCacheId; use serde::{Deserialize, Serialize}; use crate::p2p::channels::rpc::P2pRpcId; @@ -37,12 +40,19 @@ pub enum TransactionPoolCandidateAction { peer_id: PeerId, transaction: TransactionWithHash, }, + /// Callback for transactions received over pubsub + Libp2pTransactionsReceived { + peer_id: PeerId, + transactions: Vec, + message_id: P2pNetworkPubsubMessageCacheId, + }, #[action_event(level = trace)] VerifyNext, VerifyPending { peer_id: PeerId, transaction_hashes: Vec, verify_id: (), + from_source: TransactionPoolMessageSource, }, VerifyError { peer_id: PeerId, @@ -51,6 +61,7 @@ pub enum TransactionPoolCandidateAction { VerifySuccess { peer_id: PeerId, verify_id: (), + from_source: TransactionPoolMessageSource, }, PeerPrune { peer_id: PeerId, @@ -100,7 +111,17 @@ impl redux::EnablingCondition for TransactionPoolCandidateAction { .candidates .get(*peer_id, transaction.hash()) .is_some(), - TransactionPoolCandidateAction::VerifyNext => true, + TransactionPoolCandidateAction::Libp2pTransactionsReceived { .. } => true, + TransactionPoolCandidateAction::VerifyNext => { + // Don't continue if we are producing a block, or we never synced yet + // or if the ledger service is busy. + !state.block_producer.is_producing() + && state + .transition_frontier + .best_tip() + .is_some_and(|b| !b.is_genesis()) + && !state.ledger.write.is_busy() + } TransactionPoolCandidateAction::VerifyPending { peer_id, transaction_hashes, diff --git a/node/src/transaction_pool/candidate/transaction_pool_candidate_reducer.rs b/node/src/transaction_pool/candidate/transaction_pool_candidate_reducer.rs index 14b589fef6..f5fc38017a 100644 --- a/node/src/transaction_pool/candidate/transaction_pool_candidate_reducer.rs +++ b/node/src/transaction_pool/candidate/transaction_pool_candidate_reducer.rs @@ -89,6 +89,18 @@ impl TransactionPoolCandidatesState { } => { state.transaction_received(meta.time(), *peer_id, transaction.clone()); } + TransactionPoolCandidateAction::Libp2pTransactionsReceived { + peer_id, + transactions, + message_id, + } => { + state.transactions_received( + meta.time(), + *peer_id, + transactions.clone(), + *message_id, + ); + } TransactionPoolCandidateAction::VerifyNext => { let (dispatcher, global_state) = state_context.into_dispatcher_and_state(); @@ -96,31 +108,34 @@ impl TransactionPoolCandidatesState { .transaction_pool .candidates .get_batch_to_verify(); - let Some((peer_id, batch)) = batch else { + let Some((peer_id, batch, from_source)) = batch else { return; }; let transaction_hashes = batch.iter().map(|tx| tx.hash().clone()).collect(); dispatcher.push(TransactionPoolAction::StartVerify { commands: batch.into_iter().collect(), - from_rpc: None, + from_source, }); dispatcher.push(TransactionPoolCandidateAction::VerifyPending { peer_id, transaction_hashes, verify_id: (), + from_source, }); } TransactionPoolCandidateAction::VerifyPending { peer_id, transaction_hashes, verify_id, + from_source, } => { state.verify_pending(meta.time(), peer_id, *verify_id, transaction_hashes); let dispatcher = state_context.into_dispatcher(); dispatcher.push(TransactionPoolCandidateAction::VerifySuccess { peer_id: *peer_id, verify_id: *verify_id, + from_source: *from_source, }); } TransactionPoolCandidateAction::VerifyError { @@ -138,8 +153,12 @@ impl TransactionPoolCandidatesState { // reason: P2pDisconnectionReason::TransactionPoolVerifyError, // }); } - TransactionPoolCandidateAction::VerifySuccess { peer_id, verify_id } => { - state.verify_result(meta.time(), peer_id, *verify_id, Ok(())); + TransactionPoolCandidateAction::VerifySuccess { + peer_id, + verify_id, + from_source, + } => { + state.verify_result(meta.time(), peer_id, *verify_id, from_source, Ok(())); } TransactionPoolCandidateAction::PeerPrune { peer_id } => { state.peer_remove(*peer_id); diff --git a/node/src/transaction_pool/candidate/transaction_pool_candidate_state.rs b/node/src/transaction_pool/candidate/transaction_pool_candidate_state.rs index 132adccc93..acf6e33286 100644 --- a/node/src/transaction_pool/candidate/transaction_pool_candidate_state.rs +++ b/node/src/transaction_pool/candidate/transaction_pool_candidate_state.rs @@ -3,6 +3,8 @@ use std::collections::{BTreeMap, BTreeSet}; use mina_p2p_messages::v2; +use openmina_core::transaction::TransactionPoolMessageSource; +use p2p::P2pNetworkPubsubMessageCacheId; use redux::Timestamp; use serde::{Deserialize, Serialize}; @@ -15,10 +17,17 @@ use crate::p2p::PeerId; static EMPTY_PEER_TX_CANDIDATES: BTreeMap = BTreeMap::new(); +type NextBatch = ( + PeerId, + Vec, + TransactionPoolMessageSource, +); + #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct TransactionPoolCandidatesState { by_peer: BTreeMap>, by_hash: BTreeMap>, + by_message_id: BTreeMap)>, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -68,6 +77,10 @@ impl TransactionPoolCandidatesState { self.by_hash.contains_key(hash) } + pub fn message_id_contains(&self, message_id: &P2pNetworkPubsubMessageCacheId) -> bool { + self.by_message_id.contains_key(message_id) + } + pub fn peer_contains(&self, peer_id: PeerId, hash: &TransactionHash) -> bool { self.by_peer .get(&peer_id) @@ -203,10 +216,39 @@ impl TransactionPoolCandidatesState { self.by_peer.entry(peer_id).or_default().insert(hash, state); } - pub fn get_batch_to_verify(&self) -> Option<(PeerId, Vec)> { - for hash in self.by_hash.keys() { + pub fn transactions_received( + &mut self, + time: Timestamp, + peer_id: PeerId, + transactions: Vec, + message_id: P2pNetworkPubsubMessageCacheId, + ) { + let transaction_hashes = transactions + .iter() + .map(TransactionWithHash::hash) + .cloned() + .collect::>(); + + self.by_message_id + .insert(message_id, (peer_id, transaction_hashes)); + + transactions.into_iter().for_each(|transaction| { + self.transaction_received(time, peer_id, transaction); + }) + } + + /// Get next batch of transactions to verify, + /// first checks if there are any transactions to verify from pubsub + /// after that checks for transactions from peers + pub fn get_batch_to_verify(&self) -> Option { + self.next_batch_from_pubsub() + .or_else(|| self.next_batch_from_peers()) + } + + fn next_batch_from_peers(&self) -> Option { + for (hash, peers) in self.by_hash.iter() { if let Some(res) = None.or_else(|| { - for peer_id in self.by_hash.get(hash)? { + for peer_id in peers { let peer_transactions = self.by_peer.get(peer_id)?; if peer_transactions.get(hash)?.transaction().is_some() { let transactions = peer_transactions @@ -219,7 +261,7 @@ impl TransactionPoolCandidatesState { }) .cloned() .collect(); - return Some((*peer_id, transactions)); + return Some((*peer_id, transactions, TransactionPoolMessageSource::None)); } } None @@ -227,9 +269,36 @@ impl TransactionPoolCandidatesState { return Some(res); } } + None } + fn next_batch_from_pubsub(&self) -> Option { + let (message_id, (peer_id, transaction_hashes)) = self.by_message_id.iter().next()?; + let transactions = self + .by_peer + .get(peer_id)? + .iter() + .filter_map(|(hash, state)| { + let TransactionPoolCandidateState::Received { transaction, .. } = state else { + return None; + }; + if transaction_hashes.contains(hash) { + Some(transaction) + } else { + None + } + }) + .cloned() + .collect(); + + Some(( + *peer_id, + transactions, + TransactionPoolMessageSource::pubsub(*message_id), + )) + } + pub fn verify_pending( &mut self, time: Timestamp, @@ -259,17 +328,31 @@ impl TransactionPoolCandidatesState { _time: Timestamp, peer_id: &PeerId, verify_id: (), + from_source: &TransactionPoolMessageSource, _result: Result<(), ()>, ) { - if let Some(peer_transactions) = self.by_peer.get_mut(peer_id) { - let txs_to_remove = peer_transactions - .iter() - .filter(|(_, job_state)| job_state.pending_verify_id() == Some(verify_id)) - .map(|(hash, _)| hash.clone()) - .collect::>(); - - for hash in txs_to_remove { - self.transaction_remove(&hash); + match from_source { + TransactionPoolMessageSource::Pubsub { id } => { + let Some((_, transactions)) = self.by_message_id.remove(id) else { + return; + }; + + for hash in transactions { + self.transaction_remove(&hash); + } + } + _ => { + if let Some(peer_transactions) = self.by_peer.get_mut(peer_id) { + let txs_to_remove = peer_transactions + .iter() + .filter(|(_, job_state)| job_state.pending_verify_id() == Some(verify_id)) + .map(|(hash, _)| hash.clone()) + .collect::>(); + + for hash in txs_to_remove { + self.transaction_remove(&hash); + } + } } } } diff --git a/node/src/transaction_pool/transaction_pool_actions.rs b/node/src/transaction_pool/transaction_pool_actions.rs index f271feff89..181d9b2217 100644 --- a/node/src/transaction_pool/transaction_pool_actions.rs +++ b/node/src/transaction_pool/transaction_pool_actions.rs @@ -8,8 +8,14 @@ use ledger::{ }, Account, AccountId, }; -use mina_p2p_messages::{list::List, v2}; -use openmina_core::{requests::RpcId, transaction::TransactionWithHash, ActionEvent}; +use mina_p2p_messages::{ + list::List, + v2::{self}, +}; +use openmina_core::{ + transaction::{TransactionPoolMessageSource, TransactionWithHash}, + ActionEvent, +}; use redux::Callback; use serde::{Deserialize, Serialize}; @@ -24,16 +30,16 @@ pub enum TransactionPoolAction { Candidate(TransactionPoolCandidateAction), StartVerify { commands: List, - from_rpc: Option, + from_source: TransactionPoolMessageSource, }, StartVerifyWithAccounts { accounts: BTreeMap, pending_id: PendingId, - from_rpc: Option, + from_source: TransactionPoolMessageSource, }, VerifySuccess { valids: Vec, - from_rpc: Option, + from_source: TransactionPoolMessageSource, }, #[action_event(level = warn, fields(debug(errors)))] VerifyError { @@ -48,9 +54,7 @@ pub enum TransactionPoolAction { ApplyVerifiedDiff { best_tip_hash: v2::LedgerHash, diff: DiffVerified, - /// Diff was crearted locally, or from remote peer ? - is_sender_local: bool, - from_rpc: Option, + from_source: TransactionPoolMessageSource, }, ApplyVerifiedDiffWithAccounts { accounts: BTreeMap, @@ -127,7 +131,7 @@ impl redux::EnablingCondition for TransactionPoolAction { type TransactionPoolEffectfulActionCallback = Callback<( BTreeMap, Option, - Option, + TransactionPoolMessageSource, )>; #[derive(Serialize, Deserialize, Debug, Clone)] @@ -137,7 +141,7 @@ pub enum TransactionPoolEffectfulAction { ledger_hash: v2::LedgerHash, on_result: TransactionPoolEffectfulActionCallback, pending_id: Option, - from_rpc: Option, + from_source: TransactionPoolMessageSource, }, } diff --git a/node/src/transaction_pool/transaction_pool_effects.rs b/node/src/transaction_pool/transaction_pool_effects.rs index f508b140b6..56ec52e4e3 100644 --- a/node/src/transaction_pool/transaction_pool_effects.rs +++ b/node/src/transaction_pool/transaction_pool_effects.rs @@ -17,7 +17,7 @@ impl TransactionPoolEffectfulAction { ledger_hash, on_result, pending_id, - from_rpc, + from_source, } => { openmina_core::log::info!( openmina_core::log::system_time(); @@ -49,7 +49,7 @@ impl TransactionPoolEffectfulAction { .map(|account| (account.id(), account)) .collect::>(); - store.dispatch_callback(on_result, (accounts, pending_id, from_rpc)); + store.dispatch_callback(on_result, (accounts, pending_id, from_source)); } } } diff --git a/node/src/transaction_pool/transaction_pool_reducer.rs b/node/src/transaction_pool/transaction_pool_reducer.rs index c1b4a4e1c4..fa7fd22e20 100644 --- a/node/src/transaction_pool/transaction_pool_reducer.rs +++ b/node/src/transaction_pool/transaction_pool_reducer.rs @@ -9,10 +9,11 @@ use ledger::{ use openmina_core::{ bug_condition, constants::constraint_constants, - requests::RpcId, - transaction::{Transaction, TransactionWithHash}, + transaction::{Transaction, TransactionPoolMessageSource, TransactionWithHash}, +}; +use p2p::{ + channels::transaction::P2pChannelsTransactionAction, BroadcastMessageId, P2pNetworkPubsubAction, }; -use p2p::channels::transaction::P2pChannelsTransactionAction; use redux::callback; use snark::user_command_verify::{SnarkUserCommandVerifyAction, SnarkUserCommandVerifyId}; use std::collections::{BTreeMap, BTreeSet}; @@ -57,7 +58,10 @@ impl TransactionPoolState { meta.with_action(a), ); } - TransactionPoolAction::StartVerify { commands, from_rpc } => { + TransactionPoolAction::StartVerify { + commands, + from_source, + } => { let Ok(commands) = commands .iter() .map(TransactionWithHash::body) @@ -79,17 +83,17 @@ impl TransactionPoolState { dispatcher.push(TransactionPoolEffectfulAction::FetchAccounts { account_ids, ledger_hash: best_tip_hash.clone(), - on_result: callback!(fetch_to_verify((accounts: BTreeMap, id: Option, from_rpc: Option)) -> crate::Action { - TransactionPoolAction::StartVerifyWithAccounts { accounts, pending_id: id.unwrap(), from_rpc } + on_result: callback!(fetch_to_verify((accounts: BTreeMap, id: Option, from_source: TransactionPoolMessageSource)) -> crate::Action { + TransactionPoolAction::StartVerifyWithAccounts { accounts, pending_id: id.unwrap(), from_source } }), pending_id: Some(pending_id), - from_rpc: *from_rpc, + from_source: *from_source, }); } TransactionPoolAction::StartVerifyWithAccounts { accounts, pending_id, - from_rpc, + from_source, } => { let TransactionPoolAction::StartVerify { commands, .. } = substate.pending_actions.remove(pending_id).unwrap() @@ -120,14 +124,14 @@ impl TransactionPoolState { dispatcher.push(SnarkUserCommandVerifyAction::Init { req_id, commands: verifiable, - from_rpc: *from_rpc, + from_source: *from_source, on_success: callback!( on_snark_user_command_verify_success( - (req_id: SnarkUserCommandVerifyId, valids: Vec, from_rpc: Option) + (req_id: SnarkUserCommandVerifyId, valids: Vec, from_source: TransactionPoolMessageSource) ) -> crate::Action { TransactionPoolAction::VerifySuccess { valids, - from_rpc, + from_source, } } ), @@ -135,9 +139,7 @@ impl TransactionPoolState { on_snark_user_command_verify_error( (req_id: SnarkUserCommandVerifyId, errors: Vec) ) -> crate::Action { - TransactionPoolAction::VerifyError { - errors - } + TransactionPoolAction::VerifyError { errors } } ) }); @@ -148,11 +150,24 @@ impl TransactionPoolState { dispatcher.push(TransactionPoolAction::VerifyError { errors: errors.clone(), }); - if let Some(rpc_id) = from_rpc { - dispatcher.push(RpcAction::TransactionInjectFailure { - rpc_id: *rpc_id, - errors, - }) + + match from_source { + TransactionPoolMessageSource::Rpc { id } => { + dispatcher.push(RpcAction::TransactionInjectFailure { + rpc_id: *id, + errors, + }); + } + TransactionPoolMessageSource::Pubsub { id } => { + dispatcher.push(P2pNetworkPubsubAction::RejectMessage { + message_id: Some(BroadcastMessageId::MessageId { + message_id: *id, + }), + peer_id: None, + reason: "Transaction diff rejected".to_owned(), + }); + } + TransactionPoolMessageSource::None => {} } }; match e { @@ -169,7 +184,10 @@ impl TransactionPoolState { } } } - TransactionPoolAction::VerifySuccess { valids, from_rpc } => { + TransactionPoolAction::VerifySuccess { + valids, + from_source, + } => { let valids = valids .iter() .cloned() @@ -182,8 +200,7 @@ impl TransactionPoolState { dispatcher.push(TransactionPoolAction::ApplyVerifiedDiff { best_tip_hash, diff, - is_sender_local: from_rpc.is_some(), - from_rpc: *from_rpc, + from_source: *from_source, }); } TransactionPoolAction::VerifyError { .. } => { @@ -197,11 +214,11 @@ impl TransactionPoolState { dispatcher.push(TransactionPoolEffectfulAction::FetchAccounts { account_ids, ledger_hash: best_tip_hash.clone(), - on_result: callback!(fetch_for_best_tip((accounts: BTreeMap, id: Option, from_rpc: Option)) -> crate::Action { + on_result: callback!(fetch_for_best_tip((accounts: BTreeMap, id: Option, from_source: TransactionPoolMessageSource)) -> crate::Action { TransactionPoolAction::BestTipChangedWithAccounts { accounts } }), pending_id: None, - from_rpc: None, + from_source: TransactionPoolMessageSource::None, }); } TransactionPoolAction::BestTipChangedWithAccounts { accounts } => { @@ -220,8 +237,7 @@ impl TransactionPoolState { TransactionPoolAction::ApplyVerifiedDiff { best_tip_hash, diff, - is_sender_local: _, - from_rpc, + from_source, } => { let account_ids = substate.pool.get_accounts_to_apply_diff(diff); let pending_id = substate.make_action_pending(action); @@ -230,14 +246,14 @@ impl TransactionPoolState { dispatcher.push(TransactionPoolEffectfulAction::FetchAccounts { account_ids, ledger_hash: best_tip_hash.clone(), - on_result: callback!(fetch_for_apply((accounts: BTreeMap, id: Option, from_rpc: Option)) -> crate::Action { + on_result: callback!(fetch_for_apply((accounts: BTreeMap, id: Option, from_rpc: TransactionPoolMessageSource)) -> crate::Action { TransactionPoolAction::ApplyVerifiedDiffWithAccounts { accounts, pending_id: id.unwrap(), } }), pending_id: Some(pending_id), - from_rpc: *from_rpc, + from_source: *from_source, }); } TransactionPoolAction::ApplyVerifiedDiffWithAccounts { @@ -247,24 +263,22 @@ impl TransactionPoolState { let TransactionPoolAction::ApplyVerifiedDiff { best_tip_hash: _, diff, - is_sender_local, - from_rpc, + from_source, } = substate.pending_actions.remove(pending_id).unwrap() else { panic!() }; + let is_sender_local = from_source.is_sender_local(); // Note(adonagy): Action for rebroadcast, in his action we can use forget_check - let (rpc_action, was_accepted, accepted, rejected) = match substate - .pool - .unsafe_apply( - meta.time(), - global_slot_from_genesis, - global_slot, - &diff, - accounts, - is_sender_local, - ) { + let (was_accepted, accepted, rejected) = match substate.pool.unsafe_apply( + meta.time(), + global_slot_from_genesis, + global_slot, + &diff, + accounts, + is_sender_local, + ) { Ok((ApplyDecision::Accept, accepted, rejected, dropped)) => { for hash in dropped { substate.dpool.remove(&hash); @@ -275,20 +289,11 @@ impl TransactionPoolState { hash: tx.hash.clone(), }); } - let rpc_action = - from_rpc.map(|rpc_id| RpcAction::TransactionInjectSuccess { - rpc_id, - response: accepted.clone(), - }); - (rpc_action, true, accepted, rejected) + + (true, accepted, rejected) } Ok((ApplyDecision::Reject, accepted, rejected, _)) => { - let rpc_action = - from_rpc.map(|rpc_id| RpcAction::TransactionInjectRejected { - rpc_id, - response: rejected.clone(), - }); - (rpc_action, false, accepted, rejected) + (false, accepted, rejected) } Err(e) => { crate::core::warn!(meta.time(); kind = "TransactionPoolUnsafeApplyError", summary = e); @@ -297,12 +302,46 @@ impl TransactionPoolState { }; let dispatcher = state.into_dispatcher(); - if let Some(rpc_action) = rpc_action { - dispatcher.push(rpc_action); + + // TODO: use callbacks + match (was_accepted, from_source) { + (true, TransactionPoolMessageSource::Rpc { id }) => { + // Note: even though the diff was labeled as accepted the specific tx could be rejected + // (if it is not grounds for diff rejection) + if !rejected.is_empty() { + dispatcher.push(RpcAction::TransactionInjectRejected { + rpc_id: id, + response: rejected.clone(), + }); + } else if !accepted.is_empty() { + dispatcher.push(RpcAction::TransactionInjectSuccess { + rpc_id: id, + response: accepted.clone(), + }); + } + } + (true, TransactionPoolMessageSource::Pubsub { id }) => { + dispatcher.push(P2pNetworkPubsubAction::BroadcastValidatedMessage { + message_id: BroadcastMessageId::MessageId { message_id: id }, + }); + } + (false, TransactionPoolMessageSource::Rpc { id }) => { + dispatcher.push(RpcAction::TransactionInjectRejected { + rpc_id: id, + response: rejected.clone(), + }); + } + (false, TransactionPoolMessageSource::Pubsub { id }) => { + dispatcher.push(P2pNetworkPubsubAction::RejectMessage { + message_id: Some(BroadcastMessageId::MessageId { message_id: id }), + peer_id: None, + reason: "Rejected transaction diff".to_owned(), + }); + } + (_, TransactionPoolMessageSource::None) => {} } - // TODO: libp2p logic already broadcasts everything right now and doesn't - // wait for validation, thad needs to be fixed. See #952 - if was_accepted { + + if was_accepted && !from_source.is_libp2p() { dispatcher.push(TransactionPoolAction::Rebroadcast { accepted, rejected, @@ -324,14 +363,14 @@ impl TransactionPoolState { dispatcher.push(TransactionPoolEffectfulAction::FetchAccounts { account_ids: account_ids.union(&uncommitted).cloned().collect(), ledger_hash: best_tip_hash.clone(), - on_result: callback!(fetch_for_diff((accounts: BTreeMap, id: Option, from_rpc: Option)) -> crate::Action { + on_result: callback!(fetch_for_diff((accounts: BTreeMap, id: Option, from_source: TransactionPoolMessageSource)) -> crate::Action { TransactionPoolAction::ApplyTransitionFrontierDiffWithAccounts { accounts, pending_id: id.unwrap(), } }), pending_id: Some(pending_id), - from_rpc: None, + from_source: TransactionPoolMessageSource::None, }); } TransactionPoolAction::ApplyTransitionFrontierDiffWithAccounts { diff --git a/node/src/transition_frontier/archive/archive_config.rs b/node/src/transition_frontier/archive/archive_config.rs index 5ff5a382be..e3f5adea43 100644 --- a/node/src/transition_frontier/archive/archive_config.rs +++ b/node/src/transition_frontier/archive/archive_config.rs @@ -1,4 +1,7 @@ use serde::{Deserialize, Serialize}; +// use std::path::PathBuf; + +// TODO(adonagy): Do we need this? Is it just unnecessary boilerplate? #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ArchiveConfig { @@ -6,9 +9,7 @@ pub struct ArchiveConfig { } impl ArchiveConfig { - pub fn new(address: &str) -> Self { - Self { - address: address.to_string(), - } + pub fn new(work_dir: String) -> Self { + Self { address: work_dir } } } diff --git a/node/src/transition_frontier/archive/archive_service.rs b/node/src/transition_frontier/archive/archive_service.rs index b61ba9a736..5e6a89b1c0 100644 --- a/node/src/transition_frontier/archive/archive_service.rs +++ b/node/src/transition_frontier/archive/archive_service.rs @@ -1,5 +1,5 @@ -use mina_p2p_messages::v2::ArchiveTransitionFronntierDiff; +use crate::ledger::write::BlockApplyResult; pub trait ArchiveService: redux::Service { - fn send_to_archive(&mut self, data: ArchiveTransitionFronntierDiff); + fn send_to_archive(&mut self, data: BlockApplyResult); } diff --git a/node/src/transition_frontier/candidate/transition_frontier_candidate_actions.rs b/node/src/transition_frontier/candidate/transition_frontier_candidate_actions.rs index 8bb3df049e..435e9e55b7 100644 --- a/node/src/transition_frontier/candidate/transition_frontier_candidate_actions.rs +++ b/node/src/transition_frontier/candidate/transition_frontier_candidate_actions.rs @@ -1,16 +1,12 @@ -use std::sync::Arc; - -use mina_p2p_messages::v2::{MinaBlockBlockStableV2, StateHash}; -use openmina_core::block::{ArcBlockWithHash, BlockWithHash}; +use mina_p2p_messages::v2::StateHash; +use openmina_core::block::prevalidate::BlockPrevalidationError; +use openmina_core::block::ArcBlockWithHash; use openmina_core::consensus::consensus_take; use openmina_core::{action_event, ActionEvent}; use serde::{Deserialize, Serialize}; use snark::block_verify::SnarkBlockVerifyError; use crate::snark::block_verify::SnarkBlockVerifyId; -use crate::state::BlockPrevalidationError; - -use super::TransitionFrontierCandidateStatus; pub type TransitionFrontierCandidateActionWithMeta = redux::ActionWithMeta; @@ -22,10 +18,11 @@ pub type TransitionFrontierCandidateActionWithMetaRef<'a> = #[derive(Serialize, Deserialize, Debug, Clone, ActionEvent)] #[action_event(level = debug, fields(debug(hash), debug(error)))] pub enum TransitionFrontierCandidateAction { - #[action_event(level = info)] + P2pBestTipUpdate { + best_tip: ArcBlockWithHash, + }, BlockReceived { - hash: StateHash, - block: Arc, + block: ArcBlockWithHash, chain_proof: Option<(Vec, ArcBlockWithHash)>, }, BlockPrevalidateSuccess { @@ -52,131 +49,49 @@ pub enum TransitionFrontierCandidateAction { hash: StateHash, error: SnarkBlockVerifyError, }, - DetectForkRange { - hash: StateHash, - }, - ShortRangeForkResolve { - hash: StateHash, - }, - LongRangeForkResolve { - hash: StateHash, - }, - #[action_event(level = info)] - BestTipUpdate { - hash: StateHash, - }, TransitionFrontierSyncTargetUpdate, - P2pBestTipUpdate { - best_tip: BlockWithHash>, - }, Prune, } impl redux::EnablingCondition for TransitionFrontierCandidateAction { fn is_enabled(&self, state: &crate::State, _time: redux::Timestamp) -> bool { match self { - TransitionFrontierCandidateAction::BlockReceived { hash, block, .. } => { - let block = ArcBlockWithHash { - hash: hash.clone(), - block: block.clone() - }; - !block.is_genesis() && !state.transition_frontier.candidates.blocks.contains_key(hash) - }, + TransitionFrontierCandidateAction::P2pBestTipUpdate { .. } => true, + TransitionFrontierCandidateAction::BlockReceived { block, .. } => { + !block.is_genesis() && !state.transition_frontier.candidates.contains(block.hash()) + } TransitionFrontierCandidateAction::BlockPrevalidateSuccess { hash } | TransitionFrontierCandidateAction::BlockPrevalidateError { hash, .. } => state - .transition_frontier.candidates - .blocks + .transition_frontier + .candidates .get(hash) .is_some_and(|block| block.status.is_received()), - TransitionFrontierCandidateAction::BlockChainProofUpdate { hash, .. } => { - (state.transition_frontier.candidates.best_tip.as_ref() == Some(hash) - && state.transition_frontier.candidates.best_tip_chain_proof.is_none()) - || state.transition_frontier - .candidates - .blocks - .get(hash) - .is_some_and( |b| b.status.is_pending() && b.chain_proof.is_none()) - }, + TransitionFrontierCandidateAction::BlockChainProofUpdate { hash, .. } => state + .transition_frontier + .candidates + .is_chain_proof_needed(hash), TransitionFrontierCandidateAction::BlockSnarkVerifyPending { req_id, hash } => { state .transition_frontier .candidates - .blocks .get(hash) - .is_some_and( |block| block.status.is_prevalidated()) + .is_some_and(|block| block.status.is_prevalidated()) && state.snark.block_verify.jobs.contains(*req_id) - }, - TransitionFrontierCandidateAction::BlockSnarkVerifySuccess { hash } => { - state - .transition_frontier - .candidates - .blocks - .get(hash) - .is_some_and( |block| block.status.is_snark_verify_pending()) - }, - TransitionFrontierCandidateAction::BlockSnarkVerifyError { hash, .. } => { - state - .transition_frontier - .candidates - .blocks - .get(hash) - .is_some_and( |block| block.status.is_snark_verify_pending()) - }, - TransitionFrontierCandidateAction::DetectForkRange { hash } => { - state - .transition_frontier - .candidates - .blocks - .get(hash) - .is_some_and( |block| { - matches!( - block.status, - TransitionFrontierCandidateStatus::SnarkVerifySuccess { .. } - ) - }) - }, - TransitionFrontierCandidateAction::ShortRangeForkResolve { hash } => { - state - .transition_frontier - .candidates - .blocks - .get(hash) - .is_some_and( |block| match state.transition_frontier.candidates.best_tip() { - Some(tip) => { - matches!( - &block.status, - TransitionFrontierCandidateStatus::ForkRangeDetected { compared_with, short_fork, .. } - if compared_with.as_ref() == Some(tip.hash) && *short_fork - ) - } - None => true, - }) - }, - TransitionFrontierCandidateAction::LongRangeForkResolve { hash } => { - state - .transition_frontier - .candidates - .blocks - .get(hash) - .is_some_and( |block| match state.transition_frontier.candidates.best_tip() { - Some(tip) => { - matches!( - &block.status, - TransitionFrontierCandidateStatus::ForkRangeDetected { compared_with, short_fork, .. } - if compared_with.as_ref() == Some(tip.hash) && !*short_fork - ) - } - None => false, - }) - }, - TransitionFrontierCandidateAction::BestTipUpdate { hash } => { - state - .transition_frontier - .candidates - .is_candidate_decided_to_use_as_tip(hash) - }, + } + TransitionFrontierCandidateAction::BlockSnarkVerifySuccess { hash } => state + .transition_frontier + .candidates + .get(hash) + .is_some_and(|block| block.status.is_snark_verify_pending()), + TransitionFrontierCandidateAction::BlockSnarkVerifyError { hash, .. } => state + .transition_frontier + .candidates + .get(hash) + .is_some_and(|block| block.status.is_snark_verify_pending()), TransitionFrontierCandidateAction::TransitionFrontierSyncTargetUpdate => { - let Some(best_tip) = state.transition_frontier.candidates.best_tip_block_with_hash() else { + let Some(best_candidate) = + state.transition_frontier.candidates.best_verified_block() + else { return false; }; // do not need to update transition frontier sync target. @@ -185,18 +100,28 @@ impl redux::EnablingCondition for TransitionFrontierCandidateActio state.transition_frontier.sync.best_tip(), ]) .flatten() - .any(|b| b.hash() == best_tip.hash() - || !consensus_take(b.consensus_state(), best_tip.consensus_state(), b.hash(), best_tip.hash())) { + .any(|b| { + b.hash() == best_candidate.hash() + || !consensus_take( + b.consensus_state(), + best_candidate.consensus_state(), + b.hash(), + best_candidate.hash(), + ) + }) { return false; } // has enough data - state.transition_frontier.candidates.best_tip_chain_proof(&state.transition_frontier).is_some() - }, - TransitionFrontierCandidateAction::P2pBestTipUpdate { .. } => true, + state + .transition_frontier + .candidates + .best_verified_block_chain_proof(&state.transition_frontier) + .is_some() + } TransitionFrontierCandidateAction::Prune => { - state.transition_frontier.candidates.best_tip().is_some() - }, + state.transition_frontier.candidates.best().is_some() + } } } } diff --git a/node/src/transition_frontier/candidate/transition_frontier_candidate_reducer.rs b/node/src/transition_frontier/candidate/transition_frontier_candidate_reducer.rs index 813c54e91a..a00b040f77 100644 --- a/node/src/transition_frontier/candidate/transition_frontier_candidate_reducer.rs +++ b/node/src/transition_frontier/candidate/transition_frontier_candidate_reducer.rs @@ -1,7 +1,6 @@ use openmina_core::{ block::{ArcBlockWithHash, BlockHash}, bug_condition, - consensus::{is_short_range_fork, long_range_fork_take, short_range_fork_take}, }; use snark::block_verify::{SnarkBlockVerifyAction, SnarkBlockVerifyError, SnarkBlockVerifyId}; @@ -17,10 +16,8 @@ use crate::{ }; use super::{ - ConsensusLongRangeForkDecision, ConsensusShortRangeForkDecision, TransitionFrontierCandidateAction, TransitionFrontierCandidateActionWithMetaRef, - TransitionFrontierCandidateState, TransitionFrontierCandidateStatus, - TransitionFrontierCandidatesState, + TransitionFrontierCandidateStatus, TransitionFrontierCandidatesState, }; impl TransitionFrontierCandidatesState { @@ -35,55 +32,55 @@ impl TransitionFrontierCandidatesState { let (action, meta) = action.split(); match action { - TransitionFrontierCandidateAction::BlockReceived { - hash, - block, - chain_proof, - } => { - state.blocks.insert( - hash.clone(), - TransitionFrontierCandidateState { - block: block.clone(), - status: TransitionFrontierCandidateStatus::Received { time: meta.time() }, - chain_proof: chain_proof.clone(), - }, - ); + TransitionFrontierCandidateAction::P2pBestTipUpdate { best_tip } => { + let dispatcher = state_context.into_dispatcher(); + dispatcher.push(TransitionFrontierCandidateAction::BlockReceived { + block: best_tip.clone(), + chain_proof: None, + }); + + dispatcher.push(TransitionFrontierSyncLedgerSnarkedAction::PeersQuery); + dispatcher.push(TransitionFrontierSyncLedgerStagedAction::PartsPeerFetchInit); + dispatcher.push(TransitionFrontierSyncAction::BlocksPeersQuery); + } + TransitionFrontierCandidateAction::BlockReceived { block, chain_proof } => { + state.add(meta.time(), block.clone(), chain_proof.clone()); // Dispatch let (dispatcher, state) = state_context.into_dispatcher_and_state(); - let hash = hash.clone(); - let block = ArcBlockWithHash { - hash: hash.clone(), - block: block.clone(), - }; - let allow_block_too_late = allow_block_too_late(state, &block); + let allow_block_too_late = allow_block_too_late(state, block); - match state.prevalidate_block(&block, allow_block_too_late) { + match state.prevalidate_block(block, allow_block_too_late) { Ok(()) => { dispatcher.push( - TransitionFrontierCandidateAction::BlockPrevalidateSuccess { hash }, + TransitionFrontierCandidateAction::BlockPrevalidateSuccess { + hash: block.hash().clone(), + }, ); } Err(error) => { dispatcher.push(TransitionFrontierCandidateAction::BlockPrevalidateError { - hash, + hash: block.hash().clone(), error, }); } } } + TransitionFrontierCandidateAction::BlockPrevalidateError { hash, error } => { + state.invalidate(hash, error.is_forever_invalid()); + } TransitionFrontierCandidateAction::BlockPrevalidateSuccess { hash } => { - let Some(block) = state.blocks.get_mut(hash) else { + state.update_status(hash, |_| TransitionFrontierCandidateStatus::Prevalidated); + let Some(block) = state.get(hash).map(|s| s.block.clone()) else { + bug_condition!("TransitionFrontierCandidateAction::BlockPrevalidateSuccess block not found but action enabled"); return; }; - block.status = TransitionFrontierCandidateStatus::Prevalidated; // Dispatch - let block = (hash.clone(), block.block.clone()).into(); let dispatcher = state_context.into_dispatcher(); dispatcher.push(SnarkBlockVerifyAction::Init { - block, + block: block.into(), on_init: redux::callback!( on_received_block_snark_verify_init((hash: BlockHash, req_id: SnarkBlockVerifyId)) -> crate::Action { TransitionFrontierCandidateAction::BlockSnarkVerifyPending { hash, req_id } @@ -98,185 +95,35 @@ impl TransitionFrontierCandidatesState { }), }); } - TransitionFrontierCandidateAction::BlockPrevalidateError { hash, .. } => { - state.blocks.remove(hash); - } TransitionFrontierCandidateAction::BlockChainProofUpdate { hash, chain_proof } => { - if state.best_tip.as_ref() == Some(hash) { - state.best_tip_chain_proof = Some(chain_proof.clone()); - } else if let Some(block) = state.blocks.get_mut(hash) { - block.chain_proof = Some(chain_proof.clone()); - } - - let (dispatcher, global_state) = state_context.into_dispatcher_and_state(); - if global_state - .transition_frontier - .candidates - .best_tip - .as_ref() - != Some(hash) - { - return; - } + state.set_chain_proof(hash, chain_proof.clone()); + let dispatcher = state_context.into_dispatcher(); dispatcher .push(TransitionFrontierCandidateAction::TransitionFrontierSyncTargetUpdate); } TransitionFrontierCandidateAction::BlockSnarkVerifyPending { req_id, hash } => { - if let Some(block) = state.blocks.get_mut(hash) { - block.status = TransitionFrontierCandidateStatus::SnarkVerifyPending { + state.update_status(hash, |_| { + TransitionFrontierCandidateStatus::SnarkVerifyPending { time: meta.time(), req_id: *req_id, - }; - } - } - TransitionFrontierCandidateAction::BlockSnarkVerifySuccess { hash } => { - if let Some(block) = state.blocks.get_mut(hash) { - block.status = - TransitionFrontierCandidateStatus::SnarkVerifySuccess { time: meta.time() }; - } - - // Dispatch - let hash = hash.clone(); - let dispatcher = state_context.into_dispatcher(); - dispatcher.push(TransitionFrontierCandidateAction::DetectForkRange { hash }); - } - TransitionFrontierCandidateAction::BlockSnarkVerifyError { .. } => { - // TODO: handle block verification error. - } - TransitionFrontierCandidateAction::DetectForkRange { hash } => { - let candidate_hash = hash; - let Some(candidate_state) = state.blocks.get(candidate_hash) else { - return; - }; - let candidate = &candidate_state.block.header; - let (tip_hash, short_fork) = if let Some(tip_ref) = state.best_tip() { - let tip = tip_ref.header; - ( - Some(tip_ref.hash.clone()), - is_short_range_fork( - &candidate.protocol_state.body.consensus_state, - &tip.protocol_state.body.consensus_state, - ), - ) - } else { - (None, true) - }; - if let Some(candidate_state) = state.blocks.get_mut(candidate_hash) { - candidate_state.status = TransitionFrontierCandidateStatus::ForkRangeDetected { - time: meta.time(), - compared_with: tip_hash, - short_fork, - }; - openmina_core::log::debug!(openmina_core::log::system_time(); kind = "ConsensusAction::DetectForkRange", status = serde_json::to_string(&candidate_state.status).unwrap()); - } - openmina_core::log::debug!(openmina_core::log::system_time(); kind = "ConsensusAction::DetectForkRange"); - - // Dispatch - let hash = hash.clone(); - let dispatcher = state_context.into_dispatcher(); - dispatcher.push(TransitionFrontierCandidateAction::ShortRangeForkResolve { - hash: hash.clone(), - }); - dispatcher.push(TransitionFrontierCandidateAction::LongRangeForkResolve { hash }); - } - TransitionFrontierCandidateAction::ShortRangeForkResolve { hash } => { - let candidate_hash = hash; - if let Some(candidate) = state.blocks.get(candidate_hash) { - let (best_tip_hash, decision): (_, ConsensusShortRangeForkDecision) = - match state.best_tip() { - Some(tip) => (Some(tip.hash.clone()), { - let tip_cs = &tip.header.protocol_state.body.consensus_state; - let candidate_cs = - &candidate.block.header.protocol_state.body.consensus_state; - let (take, why) = short_range_fork_take( - tip_cs, - candidate_cs, - tip.hash, - candidate_hash, - ); - if take { - ConsensusShortRangeForkDecision::Take(why) - } else { - ConsensusShortRangeForkDecision::Keep(why) - } - }), - None => (None, ConsensusShortRangeForkDecision::TakeNoBestTip), - }; - if let Some(best_tip_hash) = &best_tip_hash { - openmina_core::log::info!(openmina_core::log::system_time(); best_tip_hash = best_tip_hash.to_string(), candidate_hash = candidate_hash.to_string(), decision = format!("{decision:?}")); - } - if let Some(candidate) = state.blocks.get_mut(candidate_hash) { - if !decision.use_as_best_tip() { - candidate.chain_proof = None; - } - - candidate.status = - TransitionFrontierCandidateStatus::ShortRangeForkResolve { - time: meta.time(), - compared_with: best_tip_hash, - decision, - }; } - } - - // Dispatch - let hash = hash.clone(); - let dispatcher = state_context.into_dispatcher(); - dispatcher.push(TransitionFrontierCandidateAction::BestTipUpdate { hash }); + }); } - TransitionFrontierCandidateAction::LongRangeForkResolve { hash } => { - openmina_core::log::debug!(openmina_core::log::system_time(); kind = "ConsensusAction::LongRangeForkResolve"); - let candidate_hash = hash; - let Some(tip_ref) = state.best_tip() else { - return; - }; - let Some(candidate_state) = state.blocks.get(candidate_hash) else { - return; - }; - openmina_core::log::debug!(openmina_core::log::system_time(); kind = "ConsensusAction::LongRangeForkResolve", pre_status = serde_json::to_string(&candidate_state.status).unwrap()); - let tip_hash = tip_ref.hash.clone(); - let tip = tip_ref.header; - let tip_cs = &tip.protocol_state.body.consensus_state; - let candidate = &candidate_state.block.header; - let candidate_cs = &candidate.protocol_state.body.consensus_state; - - let (take, why) = - long_range_fork_take(tip_cs, candidate_cs, &tip_hash, candidate_hash); - - let Some(candidate_state) = state.blocks.get_mut(candidate_hash) else { - return; - }; - candidate_state.status = TransitionFrontierCandidateStatus::LongRangeForkResolve { - time: meta.time(), - compared_with: tip_hash, - decision: if take { - ConsensusLongRangeForkDecision::Take(why) - } else { - candidate_state.chain_proof = None; - ConsensusLongRangeForkDecision::Keep(why) - }, - }; - openmina_core::log::debug!(openmina_core::log::system_time(); kind = "ConsensusAction::LongRangeForkResolve", status = serde_json::to_string(&candidate_state.status).unwrap()); - - // Dispatch - let hash = hash.clone(); - let dispatcher = state_context.into_dispatcher(); - dispatcher.push(TransitionFrontierCandidateAction::BestTipUpdate { hash }); + TransitionFrontierCandidateAction::BlockSnarkVerifyError { hash, .. } => { + state.invalidate(hash, true); } - TransitionFrontierCandidateAction::BestTipUpdate { hash } => { - state.best_tip = Some(hash.clone()); - - if let Some(tip) = state.blocks.get_mut(hash) { - state.best_tip_chain_proof = tip.chain_proof.take(); - } + TransitionFrontierCandidateAction::BlockSnarkVerifySuccess { hash } => { + state.update_status(hash, |_| { + TransitionFrontierCandidateStatus::SnarkVerifySuccess { time: meta.time() } + }); // Dispatch let (dispatcher, global_state) = state_context.into_dispatcher_and_state(); let Some(block) = global_state .transition_frontier .candidates - .best_tip_block_with_hash() + .best_verified_block() else { return; }; @@ -295,10 +142,7 @@ impl TransitionFrontierCandidatesState { } TransitionFrontierCandidateAction::TransitionFrontierSyncTargetUpdate => { let (dispatcher, state) = state_context.into_dispatcher_and_state(); - let Some(best_tip) = state - .transition_frontier - .candidates - .best_tip_block_with_hash() + let Some(best_tip) = state.transition_frontier.candidates.best_verified_block() else { bug_condition!( "ConsensusAction::TransitionFrontierSyncTargetUpdate | no chosen best tip" @@ -309,7 +153,7 @@ impl TransitionFrontierCandidatesState { let Some((blocks_inbetween, root_block)) = state .transition_frontier .candidates - .best_tip_chain_proof(&state.transition_frontier) + .best_verified_block_chain_proof(&state.transition_frontier) else { bug_condition!("ConsensusAction::TransitionFrontierSyncTargetUpdate | no best tip chain proof"); return; @@ -322,44 +166,14 @@ impl TransitionFrontierCandidatesState { dispatcher.push(TransitionFrontierSyncAction::BestTipUpdate { previous_root_snarked_ledger_hash, - best_tip, + best_tip: best_tip.clone(), root_block, blocks_inbetween, on_success: None, }); } - TransitionFrontierCandidateAction::P2pBestTipUpdate { best_tip } => { - let dispatcher = state_context.into_dispatcher(); - dispatcher.push(TransitionFrontierCandidateAction::BlockReceived { - hash: best_tip.hash.clone(), - block: best_tip.block.clone(), - chain_proof: None, - }); - - dispatcher.push(TransitionFrontierSyncLedgerSnarkedAction::PeersQuery); - dispatcher.push(TransitionFrontierSyncLedgerStagedAction::PartsPeerFetchInit); - dispatcher.push(TransitionFrontierSyncAction::BlocksPeersQuery); - } TransitionFrontierCandidateAction::Prune => { - let Some(best_tip_hash) = state.best_tip.clone() else { - return; - }; - let blocks = &mut state.blocks; - - // keep at most latest 32 candidate blocks. - let blocks_to_keep = (0..32) - .scan(best_tip_hash, |block_hash, _| { - let block_state = blocks.remove(block_hash)?; - let block_hash = match block_state.status.compared_with() { - None => block_hash.clone(), - Some(compared_with) => { - std::mem::replace(block_hash, compared_with.clone()) - } - }; - Some((block_hash, block_state)) - }) - .collect(); - *blocks = blocks_to_keep; + state.prune(); } } } diff --git a/node/src/transition_frontier/candidate/transition_frontier_candidate_state.rs b/node/src/transition_frontier/candidate/transition_frontier_candidate_state.rs index 40a4abafac..136d476185 100644 --- a/node/src/transition_frontier/candidate/transition_frontier_candidate_state.rs +++ b/node/src/transition_frontier/candidate/transition_frontier_candidate_state.rs @@ -1,14 +1,11 @@ -use std::collections::BTreeMap; -use std::sync::Arc; +use std::collections::{BTreeMap, BTreeSet}; -use mina_p2p_messages::v2::{ - MinaBlockBlockStableV2, MinaBlockHeaderStableV2, StagedLedgerDiffDiffStableV2, StateHash, -}; +use mina_p2p_messages::v2::StateHash; use serde::{Deserialize, Serialize}; -use openmina_core::block::{ArcBlockWithHash, BlockWithHash}; +use openmina_core::block::ArcBlockWithHash; use openmina_core::consensus::{ - ConsensusLongRangeForkDecisionReason, ConsensusShortRangeForkDecisionReason, + consensus_take, ConsensusLongRangeForkDecisionReason, ConsensusShortRangeForkDecisionReason, }; use crate::snark::block_verify::SnarkBlockVerifyId; @@ -52,21 +49,6 @@ pub enum TransitionFrontierCandidateStatus { SnarkVerifySuccess { time: redux::Timestamp, }, - ForkRangeDetected { - time: redux::Timestamp, - compared_with: Option, - short_fork: bool, - }, - ShortRangeForkResolve { - time: redux::Timestamp, - compared_with: Option, - decision: ConsensusShortRangeForkDecision, - }, - LongRangeForkResolve { - time: redux::Timestamp, - compared_with: StateHash, - decision: ConsensusLongRangeForkDecision, - }, } impl TransitionFrontierCandidateStatus { @@ -89,41 +71,63 @@ impl TransitionFrontierCandidateStatus { pub fn is_pending(&self) -> bool { matches!(self, Self::SnarkVerifyPending { .. }) } - - pub fn compared_with(&self) -> Option<&StateHash> { - match self { - Self::ShortRangeForkResolve { compared_with, .. } => compared_with.as_ref(), - _ => None, - } - } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct TransitionFrontierCandidateState { - pub block: Arc, + pub block: ArcBlockWithHash, pub status: TransitionFrontierCandidateStatus, pub chain_proof: Option<(Vec, ArcBlockWithHash)>, } +impl Ord for TransitionFrontierCandidateState { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + if self.eq(other) { + return std::cmp::Ordering::Equal; + } + let is_candidate_better = consensus_take( + self.block.consensus_state(), + other.block.consensus_state(), + self.block.hash(), + other.block.hash(), + ); + match is_candidate_better { + true => std::cmp::Ordering::Less, + false => std::cmp::Ordering::Greater, + } + } +} + +impl PartialOrd for TransitionFrontierCandidateState { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Eq for TransitionFrontierCandidateState {} + +impl PartialEq for TransitionFrontierCandidateState { + fn eq(&self, other: &Self) -> bool { + self.block.hash() == other.block.hash() + } +} + impl TransitionFrontierCandidateState { pub fn height(&self) -> u32 { - self.block - .header - .protocol_state - .body - .consensus_state - .blockchain_length - .0 - .0 + self.block.height() } } #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct TransitionFrontierCandidatesState { - pub blocks: BTreeMap, - // TODO(binier): rename to best candidate. Best tip will be in transition_frontier state. - pub best_tip: Option, - pub best_tip_chain_proof: Option<(Vec, ArcBlockWithHash)>, + /// Maintains an ordered list of transition frontier Candidates, + /// ordered using consensus rules worst to best. + ordered: BTreeSet, + /// Candidate block hashes, which failed either the prevalidation + /// or block proof verification. We move them here so that they + /// consume less memory while still preventing us from triggering + /// revalidation for an invalid block if we receive it on p2p again. + invalid: BTreeMap, } impl TransitionFrontierCandidatesState { @@ -131,71 +135,130 @@ impl TransitionFrontierCandidatesState { Self::default() } - pub fn best_tip_block_with_hash(&self) -> Option>> { - let hash = self.best_tip.as_ref()?; - let block = self.blocks.get(hash)?; - Some(BlockWithHash { - hash: hash.clone(), - block: block.block.clone(), - }) + pub fn contains(&self, hash: &StateHash) -> bool { + self.invalid.contains_key(hash) || self.get(hash).is_some() + } + + pub(super) fn get(&self, hash: &StateHash) -> Option<&TransitionFrontierCandidateState> { + self.ordered.iter().rev().find(|s| s.block.hash() == hash) + } + + pub(super) fn add( + &mut self, + time: redux::Timestamp, + block: ArcBlockWithHash, + chain_proof: Option<(Vec, ArcBlockWithHash)>, + ) { + self.ordered.insert(TransitionFrontierCandidateState { + block, + status: TransitionFrontierCandidateStatus::Received { time }, + chain_proof, + }); + } + + fn update( + &mut self, + hash: &StateHash, + update: impl FnOnce(TransitionFrontierCandidateState) -> TransitionFrontierCandidateState, + ) -> bool { + let Some(state) = self.get(hash).cloned() else { + return false; + }; + self.ordered.remove(&state); + self.ordered.insert(update(state)); + true } - pub fn best_tip(&self) -> Option> { - self.best_tip.as_ref().and_then(|hash| { - let block = self.blocks.get(hash)?; - Some(BlockRef { - hash, - header: &block.block.header, - body: &block.block.body.staged_ledger_diff, - status: &block.status, - }) + pub(super) fn update_status( + &mut self, + hash: &StateHash, + update: impl FnOnce(TransitionFrontierCandidateStatus) -> TransitionFrontierCandidateStatus, + ) -> bool { + self.update(hash, move |mut state| { + state.status = update(state.status); + state }) } - pub fn previous_best_tip(&self) -> Option> { - self.best_tip.as_ref().and_then(|hash| { - let block = self.blocks.get(hash)?; - let prev_hash = block.status.compared_with()?; - let prev = self.blocks.get(prev_hash)?; - Some(BlockRef { - hash: prev_hash, - header: &prev.block.header, - body: &prev.block.body.staged_ledger_diff, - status: &prev.status, - }) + pub(super) fn invalidate(&mut self, hash: &StateHash, is_forever_invalid: bool) { + self.ordered.retain(|s| { + if s.block.hash() == hash { + if is_forever_invalid { + self.invalid.insert(hash.clone(), s.block.global_slot()); + } + false + } else { + true + } + }); + } + + pub(super) fn set_chain_proof( + &mut self, + hash: &StateHash, + chain_proof: (Vec, ArcBlockWithHash), + ) -> bool { + self.update(hash, move |mut s| { + s.chain_proof = Some(chain_proof); + s }) } - pub fn is_candidate_decided_to_use_as_tip(&self, hash: &StateHash) -> bool { - let Some(candidate) = self.blocks.get(hash) else { - return false; + pub(super) fn prune(&mut self) { + let mut has_reached_best_candidate = false; + let Some(best_candidate_hash) = self.best_verified().map(|s| s.block.hash().clone()) else { + return; }; - match &candidate.status { - TransitionFrontierCandidateStatus::Received { .. } => false, - TransitionFrontierCandidateStatus::Prevalidated => false, - TransitionFrontierCandidateStatus::SnarkVerifyPending { .. } => false, - TransitionFrontierCandidateStatus::SnarkVerifySuccess { .. } => false, - TransitionFrontierCandidateStatus::ForkRangeDetected { .. } => false, - TransitionFrontierCandidateStatus::ShortRangeForkResolve { - compared_with, - decision, - .. - } => decision.use_as_best_tip() && &self.best_tip == compared_with, - TransitionFrontierCandidateStatus::LongRangeForkResolve { - compared_with, - decision, - .. - } => decision.use_as_best_tip() && self.best_tip.as_ref() == Some(compared_with), - } + + // prune all blocks that are worse(consensus-wise) than the best + // verified candidate. + self.ordered.retain(|s| { + if s.block.hash() == &best_candidate_hash { + // prune all invalid block hashes which are for older + // slots than the current best candidate. + let best_candidate_slot = s.block.global_slot(); + self.invalid.retain(|_, slot| *slot >= best_candidate_slot); + + has_reached_best_candidate = true; + } + + has_reached_best_candidate + }); + } + + pub(super) fn best(&self) -> Option<&TransitionFrontierCandidateState> { + self.ordered.last() + } + + pub fn best_verified(&self) -> Option<&TransitionFrontierCandidateState> { + self.ordered + .iter() + .rev() + .find(|s| s.status.is_snark_verify_success()) } - pub fn best_tip_chain_proof( + pub fn is_chain_proof_needed(&self, hash: &StateHash) -> bool { + self.get(hash).is_some_and(|s| s.chain_proof.is_none()) + } + + pub fn best_verified_block(&self) -> Option<&ArcBlockWithHash> { + self.best_verified().map(|s| &s.block) + } + + pub fn best_verified_block_chain_proof( &self, transition_frontier: &TransitionFrontierState, ) -> Option<(Vec, ArcBlockWithHash)> { - let best_tip = self.best_tip_block_with_hash()?; - let pred_hash = best_tip.pred_hash(); - self.best_tip_chain_proof.clone().or_else(|| { + self.block_chain_proof(self.best_verified()?, transition_frontier) + } + + fn block_chain_proof( + &self, + block_state: &TransitionFrontierCandidateState, + transition_frontier: &TransitionFrontierState, + ) -> Option<(Vec, ArcBlockWithHash)> { + let pred_hash = block_state.block.pred_hash(); + block_state.chain_proof.clone().or_else(|| { let old_best_tip = transition_frontier.best_tip()?; let mut iter = transition_frontier.best_chain.iter(); if old_best_tip.hash() == pred_hash { @@ -215,23 +278,3 @@ impl TransitionFrontierCandidatesState { }) } } - -#[derive(Serialize, Debug, Clone, Copy)] -pub struct BlockRef<'a> { - pub hash: &'a StateHash, - pub header: &'a MinaBlockHeaderStableV2, - pub body: &'a StagedLedgerDiffDiffStableV2, - pub status: &'a TransitionFrontierCandidateStatus, -} - -impl BlockRef<'_> { - pub fn height(&self) -> u32 { - self.header - .protocol_state - .body - .consensus_state - .blockchain_length - .0 - .0 - } -} diff --git a/node/src/transition_frontier/genesis/transition_frontier_genesis_config.rs b/node/src/transition_frontier/genesis/transition_frontier_genesis_config.rs index 6fe757c913..82e609113d 100644 --- a/node/src/transition_frontier/genesis/transition_frontier_genesis_config.rs +++ b/node/src/transition_frontier/genesis/transition_frontier_genesis_config.rs @@ -495,7 +495,8 @@ impl GenesisConfig { fn build_ledger_from_accounts( accounts: impl IntoIterator>, ) -> Result<(ledger::Mask, v2::CurrencyAmountStableV1), InvalidBigInt> { - let db = ledger::Database::create(constraint_constants().ledger_depth as u8); + let db = + ledger::Database::create_with_token_owners(constraint_constants().ledger_depth as u8); let mask = ledger::Mask::new_root(db); let (mask, total_currency) = accounts.into_iter().try_fold( (mask, 0), diff --git a/node/src/transition_frontier/sync/ledger/snarked/transition_frontier_sync_ledger_snarked_reducer.rs b/node/src/transition_frontier/sync/ledger/snarked/transition_frontier_sync_ledger_snarked_reducer.rs index 06efff4cdb..d564ee8dbf 100644 --- a/node/src/transition_frontier/sync/ledger/snarked/transition_frontier_sync_ledger_snarked_reducer.rs +++ b/node/src/transition_frontier/sync/ledger/snarked/transition_frontier_sync_ledger_snarked_reducer.rs @@ -6,6 +6,7 @@ use p2p::{ disconnection::{P2pDisconnectionAction, P2pDisconnectionReason}, PeerId, }; +use rand::prelude::*; use crate::{ ledger::{ @@ -54,7 +55,7 @@ impl TransitionFrontierSyncLedgerSnarkedState { .filter(|(_, p)| p.channels.rpc.can_send_request()) .map(|(id, p)| (*id, p.connected_since)) .collect::>(); - peer_ids.sort_by(|(_, t1), (_, t2)| t2.cmp(t1)); + peer_ids.shuffle(&mut global_state.pseudo_rng()); if is_num_accounts_pending { for (peer_id, _) in peer_ids { @@ -93,18 +94,21 @@ impl TransitionFrontierSyncLedgerSnarkedState { } } - match addresses.pop() { - Some((address, expected_hash)) => { - dispatcher.push( - TransitionFrontierSyncLedgerSnarkedAction::PeerQueryAddressInit { - peer_id, - expected_hash, - address, - }, - ); + if let Some((address, expected_hash)) = addresses.last().cloned() { + if dispatcher.push_if_enabled( + TransitionFrontierSyncLedgerSnarkedAction::PeerQueryAddressInit { + peer_id, + expected_hash, + address, + }, + global_state, + meta.time(), + ) { + addresses.pop(); + continue; } - None if retry_addresses.is_empty() => break, - None => {} + } else if retry_addresses.is_empty() { + break; } } } diff --git a/node/src/transition_frontier/sync/transition_frontier_sync_actions.rs b/node/src/transition_frontier/sync/transition_frontier_sync_actions.rs index 516df9eacf..44ff17ce0a 100644 --- a/node/src/transition_frontier/sync/transition_frontier_sync_actions.rs +++ b/node/src/transition_frontier/sync/transition_frontier_sync_actions.rs @@ -137,12 +137,12 @@ impl redux::EnablingCondition for TransitionFrontierSyncAction { && state .transition_frontier .best_tip() - .map_or(true, |tip| best_tip.hash != tip.hash) + .is_none_or(|tip| best_tip.hash != tip.hash) && state .transition_frontier .candidates - .best_tip() - .is_some_and(|tip| &best_tip.hash == tip.hash) + .best_verified_block() + .is_some_and(|block| best_tip.hash() == block.hash()) } TransitionFrontierSyncAction::BestTipUpdate { best_tip, @@ -161,7 +161,7 @@ impl redux::EnablingCondition for TransitionFrontierSyncAction { .transition_frontier .sync .best_tip() - .map_or(true, |tip| best_tip.hash != tip.hash) + .is_none_or(|tip| best_tip.hash != tip.hash) // TODO(binier): TMP. we shouldn't need to check consensus here. && state .transition_frontier @@ -194,7 +194,7 @@ impl redux::EnablingCondition for TransitionFrontierSyncAction { // TODO(binier): check if candidate best tip is short or // long range fork and based on that compare slot that // we are producing. - .map_or(true, |won_slot| won_slot < best_tip) + .is_none_or(|won_slot| won_slot < best_tip) } TransitionFrontierSyncAction::LedgerStakingPending => { matches!( diff --git a/node/src/transition_frontier/sync/transition_frontier_sync_effects.rs b/node/src/transition_frontier/sync/transition_frontier_sync_effects.rs index 8ba7ae8071..764d1ac818 100644 --- a/node/src/transition_frontier/sync/transition_frontier_sync_effects.rs +++ b/node/src/transition_frontier/sync/transition_frontier_sync_effects.rs @@ -4,7 +4,7 @@ use p2p::channels::rpc::{P2pChannelsRpcAction, P2pRpcId}; use p2p::{P2pNetworkPubsubAction, PeerId}; use redux::ActionMeta; -use crate::ledger::write::{LedgerWriteAction, LedgerWriteRequest}; +use crate::ledger::write::{LedgerWriteAction, LedgerWriteRequest, LedgersToKeep}; use crate::p2p::channels::rpc::P2pRpcRequest; use crate::service::TransitionFrontierSyncLedgerSnarkedService; use crate::{p2p_ready, Service, Store, TransitionFrontierAction}; @@ -324,10 +324,7 @@ impl TransitionFrontierSyncAction { } } TransitionFrontierSyncAction::BlocksSendToArchive { data, .. } => { - // Should be safe to unwrap because archive mode contains the necessary data, and this action is only called in archive mode - if let Ok(data) = data.try_into() { - store.service().send_to_archive(data); - } + store.service().send_to_archive(data.clone()); } TransitionFrontierSyncAction::BlocksSuccess => {} // Bootstrap/Catchup is practically complete at this point. @@ -354,16 +351,8 @@ impl TransitionFrontierSyncAction { }; let ledgers_to_keep = chain .iter() - .flat_map(|b| { - [ - b.snarked_ledger_hash(), - b.merkle_root_hash(), - b.staking_epoch_ledger_hash(), - b.next_epoch_ledger_hash(), - ] - }) - .cloned() - .collect(); + .map(|block| &block.block) + .collect::(); let mut root_snarked_ledger_updates = root_snarked_ledger_updates.clone(); if transition_frontier .best_chain diff --git a/node/src/transition_frontier/sync/transition_frontier_sync_reducer.rs b/node/src/transition_frontier/sync/transition_frontier_sync_reducer.rs index 4af35b667b..5d7a21eac4 100644 --- a/node/src/transition_frontier/sync/transition_frontier_sync_reducer.rs +++ b/node/src/transition_frontier/sync/transition_frontier_sync_reducer.rs @@ -734,7 +734,7 @@ fn next_required_ledger_to_sync( let (kind, ledger) = if old_best_tip.staking_epoch_ledger_hash() != new_best_tip.staking_epoch_ledger_hash() - && cur_best_tip.map_or(true, |cur| { + && cur_best_tip.is_none_or(|cur| { cur.staking_epoch_ledger_hash() != new_best_tip.staking_epoch_ledger_hash() }) { let ledger = TransitionFrontierSyncLedgerSnarkedState::pending( @@ -744,7 +744,7 @@ fn next_required_ledger_to_sync( .into(); (SyncLedgerTargetKind::StakingEpoch, ledger) } else if old_best_tip.next_epoch_ledger_hash() != new_best_tip.next_epoch_ledger_hash() - && cur_best_tip.map_or(true, |cur| { + && cur_best_tip.is_none_or(|cur| { cur.staking_epoch_ledger_hash() != new_best_tip.staking_epoch_ledger_hash() }) && next_epoch_target.is_some() diff --git a/node/src/transition_frontier/transition_frontier_actions.rs b/node/src/transition_frontier/transition_frontier_actions.rs index 6a1e64db84..6ed9dcf7d4 100644 --- a/node/src/transition_frontier/transition_frontier_actions.rs +++ b/node/src/transition_frontier/transition_frontier_actions.rs @@ -60,9 +60,10 @@ impl redux::EnablingCondition for TransitionFrontierAction { let Some(genesis) = state.transition_frontier.genesis.proven_block() else { return false; }; - state.transition_frontier.root().map_or(true, |b| { - b.is_genesis() && !Arc::ptr_eq(&genesis.block, &b.block) - }) + state + .transition_frontier + .root() + .is_none_or(|b| b.is_genesis() && !Arc::ptr_eq(&genesis.block, &b.block)) } TransitionFrontierAction::Candidate(a) => a.is_enabled(state, time), TransitionFrontierAction::Sync(a) => a.is_enabled(state, time), diff --git a/node/src/transition_frontier/transition_frontier_effects.rs b/node/src/transition_frontier/transition_frontier_effects.rs index cb990619a8..c92e279011 100644 --- a/node/src/transition_frontier/transition_frontier_effects.rs +++ b/node/src/transition_frontier/transition_frontier_effects.rs @@ -245,7 +245,7 @@ pub fn transition_frontier_effects( } else if let Some(index) = chain.len().checked_sub(height_diff.saturating_add(1)) { - chain.get(index).map_or(true, |b2| b1.hash() != b2.hash()) + chain.get(index).is_none_or(|b2| b1.hash() != b2.hash()) } else { true } diff --git a/node/src/watched_accounts/watched_accounts_actions.rs b/node/src/watched_accounts/watched_accounts_actions.rs index dd18630856..ca23a67420 100644 --- a/node/src/watched_accounts/watched_accounts_actions.rs +++ b/node/src/watched_accounts/watched_accounts_actions.rs @@ -64,15 +64,22 @@ fn should_request_ledger_initial_state(state: &crate::State, pub_key: &NonZeroCu state .watched_accounts .get(pub_key) - .filter(|_| state.transition_frontier.candidates.best_tip.is_some()) + .filter(|_| { + state + .transition_frontier + .candidates + .best_verified() + .is_some() + }) .is_some_and(|a| match &a.initial_state { WatchedAccountLedgerInitialState::Idle { .. } => true, WatchedAccountLedgerInitialState::Error { .. } => true, WatchedAccountLedgerInitialState::Pending { block, .. } => { - let Some(best_tip) = state.transition_frontier.candidates.best_tip() else { + let Some(best_tip) = state.transition_frontier.candidates.best_verified_block() + else { return false; }; - &block.hash != best_tip.hash + &block.hash != best_tip.hash() } // TODO(binier) WatchedAccountLedgerInitialState::Success { .. } => false, diff --git a/node/testing/Cargo.toml b/node/testing/Cargo.toml index bd82527d62..4cefc707ae 100644 --- a/node/testing/Cargo.toml +++ b/node/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-node-testing" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/node/testing/src/cluster/mod.rs b/node/testing/src/cluster/mod.rs index e1a8a300f3..3d2ebed61b 100644 --- a/node/testing/src/cluster/mod.rs +++ b/node/testing/src/cluster/mod.rs @@ -5,6 +5,7 @@ mod p2p_task_spawner; mod node_id; pub use node_id::{ClusterNodeId, ClusterOcamlNodeId}; +use openmina_core::channels::Aborter; pub mod runner; @@ -19,7 +20,6 @@ use libp2p::futures::{stream::FuturesUnordered, StreamExt}; use ledger::proofs::provers::BlockProver; use node::account::{AccountPublicKey, AccountSecretKey}; -use node::core::channels::mpsc; use node::core::consensus::ConsensusConstants; use node::core::constants::constraint_constants; use node::core::invariants::InvariantsState; @@ -208,7 +208,8 @@ impl Cluster { let node_config = testing_config.clone(); let node_id = ClusterNodeId::new_unchecked(self.nodes.len()); let work_dir = TempDir::new().unwrap(); - let (shutdown_tx, shutdown_rx) = mpsc::channel(1); + let shutdown_initiator = Aborter::default(); + let shutdown_listener = shutdown_initiator.aborted(); let p2p_sec_key = match testing_config.peer_id { TestPeerId::Derived => P2pSecretKey::deterministic(node_id.index()), TestPeerId::Bytes(bytes) => P2pSecretKey::from_bytes(bytes), @@ -271,6 +272,7 @@ impl Cluster { build: BuildEnv::get().into(), snarker: testing_config.snark_worker, consensus_constants: consensus_consts.clone(), + client_port: Some(http_port), testing_run: true, }, p2p: P2pConfig { @@ -306,7 +308,7 @@ impl Cluster { .ledger_init() .p2p_init_with_custom_task_spawner( p2p_sec_key.clone(), - p2p_task_spawner::P2pTaskSpawner::new(shutdown_tx.clone()), + p2p_task_spawner::P2pTaskSpawner::new(shutdown_listener.clone()), ) .gather_stats() .record(match testing_config.recorder { @@ -331,7 +333,7 @@ impl Cluster { .enable_all() .build() .unwrap(); - let shutdown = shutdown_tx.clone(); + let shutdown = shutdown_listener.clone(); let rpc_sender = real_service.rpc_sender(); thread::Builder::new() .name("openmina_http_server".to_owned()) @@ -339,7 +341,7 @@ impl Cluster { let local_set = tokio::task::LocalSet::new(); let task = async { tokio::select! { - _ = shutdown.closed() => {} + _ = shutdown.wait() => {} _ = http_server::run(http_port, rpc_sender) => {} } }; @@ -349,7 +351,7 @@ impl Cluster { let invariants_state = self.invariants_state.clone(); let mut service = - NodeTestingService::new(real_service, node_id, invariants_state, shutdown_rx); + NodeTestingService::new(real_service, node_id, invariants_state, shutdown_initiator); service.set_proof_kind(self.config.proof_kind()); if self.config.all_rust_to_rust_use_webrtc() { diff --git a/node/testing/src/cluster/p2p_task_spawner.rs b/node/testing/src/cluster/p2p_task_spawner.rs index 2c0de726f3..4b5f31029a 100644 --- a/node/testing/src/cluster/p2p_task_spawner.rs +++ b/node/testing/src/cluster/p2p_task_spawner.rs @@ -1,14 +1,14 @@ -use node::core::channels::mpsc; use node::core::thread; use node::p2p::service_impl::TaskSpawner; +use openmina_core::channels::Aborted; #[derive(Clone)] pub struct P2pTaskSpawner { - shutdown: mpsc::Sender<()>, + shutdown: Aborted, } impl P2pTaskSpawner { - pub fn new(shutdown: mpsc::Sender<()>) -> Self { + pub fn new(shutdown: Aborted) -> Self { Self { shutdown } } } @@ -28,7 +28,7 @@ impl TaskSpawner for P2pTaskSpawner { .spawn(move || { let fut = async { tokio::select! { - _ = shutdown.closed() => {} + _ = shutdown.wait() => {} _ = fut => {} } }; diff --git a/node/testing/src/main.rs b/node/testing/src/main.rs index 57b362a036..6e57203fc5 100644 --- a/node/testing/src/main.rs +++ b/node/testing/src/main.rs @@ -40,6 +40,8 @@ pub struct CommandScenariosGenerate { pub name: Option, #[arg(long, short)] pub use_debugger: bool, + #[arg(long, short)] + pub webrtc: bool, } /// Run scenario located at `res/scenarios`. @@ -83,6 +85,9 @@ impl Command { if cmd.use_debugger { config.use_debugger(); } + if cmd.webrtc { + config.set_all_rust_to_rust_use_webrtc(); + } Ok(scenario.run_only_from_scratch(config)) }; let fut = async move { diff --git a/node/testing/src/node/rust/config.rs b/node/testing/src/node/rust/config.rs index 1ab15a902b..c0ca491d19 100644 --- a/node/testing/src/node/rust/config.rs +++ b/node/testing/src/node/rust/config.rs @@ -99,6 +99,11 @@ impl RustNodeTestingConfig { self } + pub fn initial_time(mut self, time: redux::Timestamp) -> Self { + self.initial_time = time; + self + } + pub fn with_peer_id(mut self, bytes: [u8; 32]) -> Self { self.peer_id = TestPeerId::Bytes(bytes); self diff --git a/node/testing/src/scenarios/mod.rs b/node/testing/src/scenarios/mod.rs index 06ba500a8f..18532e4c45 100644 --- a/node/testing/src/scenarios/mod.rs +++ b/node/testing/src/scenarios/mod.rs @@ -131,7 +131,7 @@ impl Scenarios { Self::SimulationSmall(_) => true, Self::SimulationSmallForeverRealTime(_) => true, Self::MultiNodePubsubPropagateBlock(_) => true, // in progress - Self::P2pSignaling(_) => cfg!(feature = "p2p-webrtc"), + Self::P2pSignaling(_) => !cfg!(feature = "p2p-webrtc"), _ => false, } } diff --git a/node/testing/src/scenarios/p2p/pubsub.rs b/node/testing/src/scenarios/p2p/pubsub.rs index e6fbf6ad8b..6561ecb465 100644 --- a/node/testing/src/scenarios/p2p/pubsub.rs +++ b/node/testing/src/scenarios/p2p/pubsub.rs @@ -18,7 +18,9 @@ pub struct P2pReceiveMessage; impl P2pReceiveMessage { pub async fn run(self, mut runner: ClusterRunner<'_>) { - let config = RustNodeTestingConfig::devnet_default().initial_peers(hosts::devnet()); + let config = RustNodeTestingConfig::devnet_default() + .initial_peers(hosts::devnet()) + .initial_time(redux::Timestamp::global_now()); let retransmitter_openmina_node = runner.add_rust_node(config); diff --git a/node/testing/src/scenarios/record_replay/block_production.rs b/node/testing/src/scenarios/record_replay/block_production.rs index 6f11922dee..a4230de90f 100644 --- a/node/testing/src/scenarios/record_replay/block_production.rs +++ b/node/testing/src/scenarios/record_replay/block_production.rs @@ -50,7 +50,8 @@ impl RecordReplayBlockProduction { let replayed_node = replay_state_with_input_actions( recording_dir.as_os_str().to_str().unwrap(), None, - |_, _| Ok(()), + false, + |_, _, _| Ok(()), ) .expect("replay failed"); diff --git a/node/testing/src/scenarios/record_replay/bootstrap.rs b/node/testing/src/scenarios/record_replay/bootstrap.rs index 382ac644ff..f081b98a6d 100644 --- a/node/testing/src/scenarios/record_replay/bootstrap.rs +++ b/node/testing/src/scenarios/record_replay/bootstrap.rs @@ -51,7 +51,8 @@ impl RecordReplayBootstrap { let replayed_node = replay_state_with_input_actions( recording_dir.as_os_str().to_str().unwrap(), None, - |_, _| Ok(()), + false, + |_, _, _| Ok(()), ) .expect("replay failed"); diff --git a/node/testing/src/service/mod.rs b/node/testing/src/service/mod.rs index d3b77c84d9..673bc5f8cd 100644 --- a/node/testing/src/service/mod.rs +++ b/node/testing/src/service/mod.rs @@ -12,8 +12,8 @@ use ledger::scan_state::transaction_logic::{verifiable, WithStatus}; use ledger::Mask; use mina_p2p_messages::string::ByteString; use mina_p2p_messages::v2::{ - ArchiveTransitionFronntierDiff, CurrencyFeeStableV1, LedgerHash, LedgerProofProdStableV2, - MinaBaseProofStableV2, MinaStateSnarkedLedgerStateWithSokStableV2, NonZeroCurvePoint, + CurrencyFeeStableV1, LedgerHash, LedgerProofProdStableV2, MinaBaseProofStableV2, + MinaStateSnarkedLedgerStateWithSokStableV2, NonZeroCurvePoint, ProverExtendBlockchainInputStableV2, SnarkWorkerWorkerRpcsVersionedGetWorkV2TResponseA0Single, StateHash, TransactionSnarkStableV2, TransactionSnarkWorkTStableV2Proofs, }; @@ -24,6 +24,7 @@ use node::core::channels::mpsc; use node::core::invariants::InvariantsState; use node::core::snark::{Snark, SnarkJobId}; use node::external_snark_worker_effectful::ExternalSnarkWorkerEvent; +use node::ledger::write::BlockApplyResult; use node::p2p::service_impl::webrtc_with_libp2p::P2pServiceWebrtcWithLibp2p; use node::p2p::P2pCryptoService; use node::recorder::Recorder; @@ -52,6 +53,7 @@ use node::{ }, }; use node::{ActionWithMeta, State}; +use openmina_core::channels::Aborter; use openmina_node_native::NodeService; use redux::Instant; @@ -140,7 +142,7 @@ pub struct NodeTestingService { cluster_invariants_state: Arc>, /// Once dropped, it will cause all threads associated to shutdown. - _shutdown: mpsc::Receiver<()>, + _shutdown: Aborter, } impl NodeTestingService { @@ -148,7 +150,7 @@ impl NodeTestingService { real: NodeService, id: ClusterNodeId, cluster_invariants_state: Arc>, - _shutdown: mpsc::Receiver<()>, + _shutdown: Aborter, ) -> Self { Self { real, @@ -278,6 +280,10 @@ impl NodeTestingService { impl redux::Service for NodeTestingService {} impl node::Service for NodeTestingService { + fn queues(&mut self) -> node::service::Queues { + self.real.queues() + } + fn stats(&mut self) -> Option<&mut Stats> { self.real.stats() } @@ -360,7 +366,7 @@ impl P2pServiceWebrtc for NodeTestingService { P2pServiceWebrtc::event_sender(&self.real) } - fn cmd_sender(&self) -> &mpsc::UnboundedSender { + fn cmd_sender(&self) -> &mpsc::TrackedUnboundedSender { P2pServiceWebrtc::cmd_sender(&self.real) } @@ -501,7 +507,7 @@ impl BlockProducerVrfEvaluatorService for NodeTestingService { } impl ArchiveService for NodeTestingService { - fn send_to_archive(&mut self, data: ArchiveTransitionFronntierDiff) { + fn send_to_archive(&mut self, data: BlockApplyResult) { self.real.send_to_archive(data); } } @@ -538,7 +544,12 @@ impl BlockProducerService for NodeTestingService { &keypair, true, ) { - Err(ProofError::ConstraintsOk) => { + Err(e) + if matches!( + e.downcast_ref::(), + Some(ProofError::ConstraintsOk) + ) => + { let _ = self.real.event_sender().send(dummy_proof_event(block_hash)); } Err(err) => panic!("unexpected block proof generation error: {err:?}"), @@ -595,6 +606,7 @@ impl ExternalSnarkWorkerService for NodeTestingService { &mut self, public_key: NonZeroCurvePoint, fee: CurrencyFeeStableV1, + _: TransactionVerifier, ) -> Result<(), node::external_snark_worker::ExternalSnarkWorkerError> { let pub_key = AccountPublicKey::from(public_key); let sok_message = SokMessage::create( diff --git a/node/testing/src/service/rpc_service.rs b/node/testing/src/service/rpc_service.rs index 475a3339c1..43f63ca5e8 100644 --- a/node/testing/src/service/rpc_service.rs +++ b/node/testing/src/service/rpc_service.rs @@ -50,6 +50,14 @@ impl RpcService for super::NodeTestingService { respond_snark_pool_job_get, node::rpc::RpcSnarkPoolJobGetResponse, ); + to_real!( + respond_snark_pool_completed_jobs_get, + node::rpc::RpcSnarkPoolCompletedJobsResponse, + ); + to_real!( + respond_snark_pool_pending_jobs_get, + node::rpc::RpcSnarkPoolPendingJobsGetResponse + ); to_real!( respond_snarker_job_commit, node::rpc::RpcSnarkerJobCommitResponse, @@ -108,4 +116,26 @@ impl RpcService for super::NodeTestingService { respond_transaction_status, node::rpc::RpcTransactionStatusGetResponse, ); + to_real!(respond_block_get, node::rpc::RpcGetBlockResponse,); + to_real!( + respond_pooled_user_commands, + node::rpc::RpcPooledUserCommandsResponse, + ); + to_real!( + respond_pooled_zkapp_commands, + node::rpc::RpcPooledZkappCommandsResponse, + ); + to_real!(respond_genesis_block, node::rpc::RpcGenesisBlockResponse,); + to_real!( + respond_consensus_time_get, + node::rpc::RpcConsensusTimeGetResponse, + ); + to_real!( + respond_ledger_status_get, + node::rpc::RpcLedgerStatusGetResponse, + ); + to_real!( + respond_ledger_account_delegators_get, + node::rpc::RpcLedgerAccountDelegatorsGetResponse, + ); } diff --git a/node/web/Cargo.toml b/node/web/Cargo.toml index 11c07fdba0..cc3ee2f3aa 100644 --- a/node/web/Cargo.toml +++ b/node/web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-node-web" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/node/web/src/node/builder.rs b/node/web/src/node/builder.rs index 06b6c7ba7d..0e5ed481f6 100644 --- a/node/web/src/node/builder.rs +++ b/node/web/src/node/builder.rs @@ -228,6 +228,7 @@ impl NodeBuilder { snarker: self.snarker, consensus_constants: consensus_consts.clone(), testing_run: false, + client_port: None, }, p2p: P2pConfig { libp2p_port: None, diff --git a/p2p/Cargo.toml b/p2p/Cargo.toml index 81dfc2e8be..da7160516e 100644 --- a/p2p/Cargo.toml +++ b/p2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "p2p" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" @@ -24,7 +24,7 @@ cfg-if = "1.0.0" url = "2.3.1" multihash = "0.18.1" sha2 = "0.10.6" -ed25519-dalek = { version = "2.1.1", features = ["serde"] } +ed25519-dalek = { version = "2.1.1", features = ["serde", "pem"] } x25519-dalek = { version = "2.0.1", features = ["static_secrets"] } aes-gcm = "0.10.3" faster-stun = { version = "1.0.1", optional = true } @@ -80,10 +80,11 @@ p2p-testing = { path = "testing" } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] redux = { workspace = true, features = ["serializable_callbacks"] } tokio = { version = "1.26", features = ["rt"] } -webrtc = { git = "https://github.com/openmina/webrtc.git", rev = "e8705db39af1b198b324a5db6ff57fb213ba75e9", optional = true } +webrtc = { git = "https://github.com/openmina/webrtc.git", rev = "aeaa62682b97f6984627bedd6e6811fe17af18eb", optional = true } datachannel = { git = "https://github.com/openmina/datachannel-rs.git", rev = "1bfb064d0ff3e54a93ae0288409902aab8d102d3", optional = true, features = [ "vendored", ] } +rcgen = { version = "0.13", features = ["pem", "x509-parser"], optional = true } reqwest = { version = "0.11", features = ["json"] } mio = { version = "0.8.11", features = ["os-poll", "net"] } libc = { version = "0.2.151" } @@ -119,7 +120,7 @@ getrandom = { version = "0.2", features = ["js"] } [features] serializable_callbacks = [] p2p-webrtc = ["p2p-webrtc-rs"] -p2p-webrtc-rs = ["webrtc"] +p2p-webrtc-rs = ["webrtc", "rcgen"] p2p-webrtc-cpp = ["datachannel"] p2p-libp2p = ["fuzzing", "dep:reqwest", "dep:faster-stun"] fuzzing = ["openmina-fuzzer", "openmina-core/fuzzing"] diff --git a/p2p/libp2p-rpc-behaviour/Cargo.toml b/p2p/libp2p-rpc-behaviour/Cargo.toml index 4647e0eb6e..63fc874478 100644 --- a/p2p/libp2p-rpc-behaviour/Cargo.toml +++ b/p2p/libp2p-rpc-behaviour/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-rpc-behaviour" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/p2p/src/channels/best_tip/p2p_channels_best_tip_actions.rs b/p2p/src/channels/best_tip/p2p_channels_best_tip_actions.rs index 03b3759110..3daff6326a 100644 --- a/p2p/src/channels/best_tip/p2p_channels_best_tip_actions.rs +++ b/p2p/src/channels/best_tip/p2p_channels_best_tip_actions.rs @@ -118,7 +118,7 @@ impl redux::EnablingCondition for P2pChannelsBestTipAction { } last_sent .as_ref() - .map_or(true, |sent| sent.hash != best_tip.hash) + .is_none_or(|sent| sent.hash != best_tip.hash) } _ => false, }), diff --git a/p2p/src/channels/snark/p2p_channels_snark_actions.rs b/p2p/src/channels/snark/p2p_channels_snark_actions.rs index dba737da18..8468d91402 100644 --- a/p2p/src/channels/snark/p2p_channels_snark_actions.rs +++ b/p2p/src/channels/snark/p2p_channels_snark_actions.rs @@ -51,6 +51,9 @@ pub enum P2pChannelsSnarkAction { snark: Box, nonce: u32, }, + /// Checks if a snark has already been received from pubsub network, ff it has, it broadcasts a validated message. + /// If not, it constructs a new message with the snark and broadcasts it to the network, + /// either directly or by rebroadcasting it if it was received from a WebRTC connection. Libp2pBroadcast { snark: Snark, nonce: u32, diff --git a/p2p/src/channels/snark/p2p_channels_snark_reducer.rs b/p2p/src/channels/snark/p2p_channels_snark_reducer.rs index bb6ab1e57b..fb31df3f99 100644 --- a/p2p/src/channels/snark/p2p_channels_snark_reducer.rs +++ b/p2p/src/channels/snark/p2p_channels_snark_reducer.rs @@ -218,7 +218,21 @@ impl P2pChannelsSnarkState { nonce, is_local, } => { - let dispatcher = state_context.into_dispatcher(); + let (dispatcher, state) = state_context.into_dispatcher_and_state(); + let p2p_state: &P2pState = state.substate()?; + let pubsub_state = &p2p_state.network.scheduler.broadcast_state.mcache; + + let message_id = crate::BroadcastMessageId::Snark { + job_id: snark.job_id(), + }; + + if pubsub_state.contains_broadcast_id(&message_id) { + dispatcher + .push(P2pNetworkPubsubAction::BroadcastValidatedMessage { message_id }); + + return Ok(()); + }; + let message = Box::new((snark.statement(), (&snark).into())); let message = v2::NetworkPoolSnarkPoolDiffVersionedStableV2::AddSolvedWork(message); let nonce = nonce.into(); diff --git a/p2p/src/channels/streaming_rpc/p2p_channels_streaming_rpc_actions.rs b/p2p/src/channels/streaming_rpc/p2p_channels_streaming_rpc_actions.rs index cb4373f579..fdaa869daa 100644 --- a/p2p/src/channels/streaming_rpc/p2p_channels_streaming_rpc_actions.rs +++ b/p2p/src/channels/streaming_rpc/p2p_channels_streaming_rpc_actions.rs @@ -211,7 +211,7 @@ impl redux::EnablingCondition for P2pChannelsStreamingRpcAction { && (response.is_none() || progress.is_done()) && response .as_ref() - .map_or(true, |resp| resp.kind() == request.kind()) + .is_none_or(|resp| resp.kind() == request.kind()) } _ => false, }), @@ -242,7 +242,7 @@ impl redux::EnablingCondition for P2pChannelsStreamingRpcAction { rpc_id == *id && response .as_ref() - .map_or(true, |resp| resp.kind() == req.kind()) + .is_none_or(|resp| resp.kind() == req.kind()) }), P2pChannelsStreamingRpcAction::ResponsePartNextSend { peer_id, id } => state .get_ready_peer(peer_id) diff --git a/p2p/src/channels/transaction/p2p_channels_transaction_actions.rs b/p2p/src/channels/transaction/p2p_channels_transaction_actions.rs index fc75742ba8..38650150c7 100644 --- a/p2p/src/channels/transaction/p2p_channels_transaction_actions.rs +++ b/p2p/src/channels/transaction/p2p_channels_transaction_actions.rs @@ -1,5 +1,5 @@ -use openmina_core::transaction::Transaction; use openmina_core::ActionEvent; +use openmina_core::{p2p::P2pNetworkPubsubMessageCacheId, transaction::Transaction}; use serde::{Deserialize, Serialize}; use crate::{channels::P2pChannelsAction, P2pState, PeerId}; @@ -46,8 +46,9 @@ pub enum P2pChannelsTransactionAction { }, Libp2pReceived { peer_id: PeerId, - transaction: Box, + transactions: Vec, nonce: u32, + message_id: P2pNetworkPubsubMessageCacheId, }, Libp2pBroadcast { transaction: Box, diff --git a/p2p/src/channels/transaction/p2p_channels_transaction_reducer.rs b/p2p/src/channels/transaction/p2p_channels_transaction_reducer.rs index 9b9e483cde..beee731bcc 100644 --- a/p2p/src/channels/transaction/p2p_channels_transaction_reducer.rs +++ b/p2p/src/channels/transaction/p2p_channels_transaction_reducer.rs @@ -212,17 +212,26 @@ impl P2pChannelsTransactionState { } Ok(()) } - P2pChannelsTransactionAction::Libp2pReceived { transaction, .. } => { + P2pChannelsTransactionAction::Libp2pReceived { + transactions, + message_id, + peer_id, + .. + } => { let (dispatcher, state) = state_context.into_dispatcher_and_state(); let p2p_state: &P2pState = state.substate()?; if let Some(callback) = &p2p_state .callbacks - .on_p2p_channels_transaction_libp2p_received + .on_p2p_channels_transactions_libp2p_received { - if let Ok(transaction) = TransactionWithHash::try_new(*transaction) { - dispatcher.push_callback(callback.clone(), Box::new(transaction)); - } + let transactions = transactions + .into_iter() + .map(TransactionWithHash::try_new) + .filter_map(Result::ok) + .collect(); + + dispatcher.push_callback(callback.clone(), (peer_id, transactions, message_id)); } Ok(()) diff --git a/p2p/src/connection/outgoing/p2p_connection_outgoing_actions.rs b/p2p/src/connection/outgoing/p2p_connection_outgoing_actions.rs index dcc3b1cee9..88098fe8b0 100644 --- a/p2p/src/connection/outgoing/p2p_connection_outgoing_actions.rs +++ b/p2p/src/connection/outgoing/p2p_connection_outgoing_actions.rs @@ -99,7 +99,7 @@ impl redux::EnablingCondition for P2pConnectionOutgoingAction { state .peers .get(opts.peer_id()) - .map_or(true, |peer| !peer.status.is_connected_or_connecting()) + .is_none_or(|peer| !peer.status.is_connected_or_connecting()) } P2pConnectionOutgoingAction::Reconnect { opts, .. } => { !state.already_has_min_peers() diff --git a/p2p/src/identity/secret_key.rs b/p2p/src/identity/secret_key.rs index 7e6ba1ad81..43617726d1 100644 --- a/p2p/src/identity/secret_key.rs +++ b/p2p/src/identity/secret_key.rs @@ -1,10 +1,13 @@ use std::{fmt, path::Path, str::FromStr}; use base64::Engine; -use ed25519_dalek::{ed25519::signature::SignerMut, SigningKey as Ed25519SecretKey}; +use ed25519_dalek::{ + ed25519::signature::SignerMut, pkcs8::EncodePrivateKey as _, SigningKey as Ed25519SecretKey, +}; use openmina_core::{EncryptedSecretKey, EncryptedSecretKeyFile, EncryptionError}; use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; +use zeroize::Zeroizing; use super::{PublicKey, Signature}; @@ -53,6 +56,12 @@ impl SecretKey { self.0.to_scalar_bytes().into() } + pub fn to_pem(&self) -> Zeroizing { + self.0 + .to_pkcs8_pem(Default::default()) + .expect("must be valid key") + } + pub fn from_encrypted_file( path: impl AsRef, password: &str, diff --git a/p2p/src/network/pubsub/mod.rs b/p2p/src/network/pubsub/mod.rs index 79ab3d009f..a9ebc12b9c 100644 --- a/p2p/src/network/pubsub/mod.rs +++ b/p2p/src/network/pubsub/mod.rs @@ -1,14 +1,13 @@ pub mod pb { include!(concat!(env!("OUT_DIR"), "/gossipsub.rs")); } - +pub use openmina_core::p2p::P2pNetworkPubsubMessageCacheId; mod p2p_network_pubsub_actions; pub use self::p2p_network_pubsub_actions::P2pNetworkPubsubAction; mod p2p_network_pubsub_state; pub use self::p2p_network_pubsub_state::{ - P2pNetworkPubsubClientState, P2pNetworkPubsubClientTopicState, P2pNetworkPubsubMessageCacheId, - P2pNetworkPubsubState, + P2pNetworkPubsubClientState, P2pNetworkPubsubClientTopicState, P2pNetworkPubsubState, }; #[cfg(feature = "p2p-libp2p")] @@ -18,6 +17,7 @@ mod p2p_network_pubsub_reducer; const TOPIC: &str = "coda/consensus-messages/0.0.1"; pub mod pubsub_effectful; +use openmina_core::snark::SnarkJobId; pub use pubsub_effectful::P2pNetworkPubsubEffectfulAction; use binprot::BinProtWrite; @@ -32,6 +32,9 @@ pub enum BroadcastMessageId { BlockHash { hash: mina_p2p_messages::v2::StateHash, }, + Snark { + job_id: SnarkJobId, + }, MessageId { message_id: P2pNetworkPubsubMessageCacheId, }, diff --git a/p2p/src/network/pubsub/p2p_network_pubsub_actions.rs b/p2p/src/network/pubsub/p2p_network_pubsub_actions.rs index 63ff30c9d4..71caedc43a 100644 --- a/p2p/src/network/pubsub/p2p_network_pubsub_actions.rs +++ b/p2p/src/network/pubsub/p2p_network_pubsub_actions.rs @@ -1,7 +1,7 @@ -use super::{p2p_network_pubsub_state::P2pNetworkPubsubMessageCacheId, pb, BroadcastMessageId}; +use super::{pb, BroadcastMessageId}; use crate::{token::BroadcastAlgorithm, ConnectionAddr, Data, P2pState, PeerId, StreamId}; use mina_p2p_messages::gossip::GossipNetMessageV2; -use openmina_core::ActionEvent; +use openmina_core::{p2p::P2pNetworkPubsubMessageCacheId, ActionEvent}; use serde::{Deserialize, Serialize}; /// Actions that can occur within the P2P Network PubSub system. diff --git a/p2p/src/network/pubsub/p2p_network_pubsub_reducer.rs b/p2p/src/network/pubsub/p2p_network_pubsub_reducer.rs index 95b616f33a..5a43766672 100644 --- a/p2p/src/network/pubsub/p2p_network_pubsub_reducer.rs +++ b/p2p/src/network/pubsub/p2p_network_pubsub_reducer.rs @@ -5,7 +5,9 @@ use mina_p2p_messages::{ gossip::{self, GossipNetMessageV2}, v2::NetworkPoolSnarkPoolDiffVersionedStableV2, }; -use openmina_core::{block::BlockWithHash, bug_condition, fuzz_maybe, fuzzed_maybe, Substate}; +use openmina_core::{ + block::BlockWithHash, bug_condition, fuzz_maybe, fuzzed_maybe, snark::Snark, Substate, +}; use redux::{Dispatcher, Timestamp}; use crate::{ @@ -421,20 +423,10 @@ impl P2pNetworkPubsubState { Ok(sig) => Some(sig.to_bytes().to_vec()), }; - let message_state = match &message { - GossipNetMessageV2::NewState(block) => { - P2pNetworkPubsubMessageCacheMessage::PreValidatedBlockMessage { - block_hash: block.try_hash()?, - message: msg, - peer_id: source_peer_id, - time, - } - } - _ => P2pNetworkPubsubMessageCacheMessage::PreValidated { - message: msg, - peer_id: source_peer_id, - time, - }, + let message_state = P2pNetworkPubsubMessageCacheMessage::Validated { + message: msg, + peer_id: source_peer_id, + time, }; pubsub_state.mcache.map.insert(message_id, message_state); @@ -555,6 +547,19 @@ impl P2pNetworkPubsubState { time, } } + GossipNetMessageV2::SnarkPoolDiff { + message: NetworkPoolSnarkPoolDiffVersionedStableV2::AddSolvedWork(snark), + .. + } => { + let snark: Snark = snark.1.clone().into(); + let job_id = snark.job_id(); + P2pNetworkPubsubMessageCacheMessage::PreValidatedSnark { + job_id, + message, + peer_id, + time, + } + } _ => P2pNetworkPubsubMessageCacheMessage::PreValidated { message, peer_id, @@ -568,59 +573,55 @@ impl P2pNetworkPubsubState { let dispatcher = state_context.into_dispatcher(); + // TODO: for transaction proof we track source, we should do that for `BestTipUpdate` and for `SnarkPoolDiff` match content { GossipNetMessageV2::NewState(block) => { let best_tip = BlockWithHash::try_new(block.clone())?; dispatcher.push(P2pPeerAction::BestTipUpdate { peer_id, best_tip }); - return Ok(()); } GossipNetMessageV2::TransactionPoolDiff { message, nonce } => { let nonce = nonce.as_u32(); - for transaction in message.0 { - dispatcher.push(P2pChannelsTransactionAction::Libp2pReceived { - peer_id, - transaction: Box::new(transaction), - nonce, - }); - } + dispatcher.push(P2pChannelsTransactionAction::Libp2pReceived { + peer_id, + transactions: message.0.into_iter().collect(), + nonce, + message_id, + }); } GossipNetMessageV2::SnarkPoolDiff { message: NetworkPoolSnarkPoolDiffVersionedStableV2::AddSolvedWork(work), nonce, } => { + let snark: Snark = work.1.into(); dispatcher.push(P2pChannelsSnarkAction::Libp2pReceived { peer_id, - snark: Box::new(work.1.into()), + snark: Box::new(snark), nonce: nonce.as_u32(), }); } _ => {} } - - dispatcher.push(P2pNetworkPubsubAction::BroadcastValidatedMessage { - message_id: super::BroadcastMessageId::MessageId { message_id }, - }); Ok(()) } P2pNetworkPubsubAction::BroadcastValidatedMessage { message_id } => { - let Some((mcache_message_id, message)) = + let Some((message_id, _)) = pubsub_state.mcache.get_message_id_and_message(&message_id) else { bug_condition!("Message with id: {:?} not found", message_id); return Ok(()); }; + + let Some(message) = pubsub_state.mcache.map.get(&message_id) else { + bug_condition!("Message with id: {:?} not found", message_id); + return Ok(()); + }; + let raw_message = message.message().clone(); - let peer_id = message.peer_id(); + let peer_id = *message.peer_id(); - pubsub_state.reduce_incoming_validated_message( - mcache_message_id, - peer_id, - &raw_message, - ); + pubsub_state.reduce_incoming_validated_message(message_id, peer_id, &raw_message); - let Some((_message_id, message)) = - pubsub_state.mcache.get_message_id_and_message(&message_id) - else { + let Some(message) = pubsub_state.mcache.map.get_mut(&message_id) else { bug_condition!("Message with id: {:?} not found", message_id); return Ok(()); }; @@ -628,11 +629,10 @@ impl P2pNetworkPubsubState { *message = P2pNetworkPubsubMessageCacheMessage::Validated { message: raw_message, peer_id, - time: message.time(), + time: *message.time(), }; let (dispatcher, state) = state_context.into_dispatcher_and_state(); - Self::broadcast(dispatcher, state) } P2pNetworkPubsubAction::PruneMessages {} => { @@ -641,7 +641,7 @@ impl P2pNetworkPubsubState { .map .iter() .filter_map(|(message_id, message)| { - if message.time() + MAX_MESSAGE_KEEP_DURATION > time { + if (*message.time() + MAX_MESSAGE_KEEP_DURATION) <= time { Some(message_id.to_owned()) } else { None @@ -659,25 +659,28 @@ impl P2pNetworkPubsubState { peer_id, .. } => { - let mut peer_id = peer_id; + let mut involved_peers = peer_id.into_iter().collect::>(); + let mut add_peer = |peer: &PeerId| { + if !involved_peers.contains(peer) { + involved_peers.push(*peer); + } + }; + if let Some(message_id) = message_id { - let Some((_message_id, message)) = + let Some((message_id, message)) = pubsub_state.mcache.get_message_id_and_message(&message_id) else { bug_condition!("Message not found for id: {:?}", message_id); return Ok(()); }; - if peer_id.is_none() { - peer_id = Some(message.peer_id()); - } - - pubsub_state.mcache.remove_message(_message_id); + add_peer(message.peer_id()); + pubsub_state.mcache.remove_message(message_id); } let dispatcher = state_context.into_dispatcher(); - if let Some(peer_id) = peer_id { + for peer_id in involved_peers { dispatcher.push(P2pDisconnectionAction::Init { peer_id, reason: P2pDisconnectionReason::InvalidMessage, @@ -715,6 +718,10 @@ impl P2pNetworkPubsubState { Ok(()) } + /// Queues a validated message for propagation to other peers in the pubsub network. + /// For peers that are "on mesh" for the message's topic, queues the full message. + /// For other peers, queues an IHAVE control message to notify about message availability. + /// The original sender is excluded from propagation. fn reduce_incoming_validated_message( &mut self, message_id: P2pNetworkPubsubMessageCacheId, diff --git a/p2p/src/network/pubsub/p2p_network_pubsub_state.rs b/p2p/src/network/pubsub/p2p_network_pubsub_state.rs index e09f9dfda6..8f5eecaa5d 100644 --- a/p2p/src/network/pubsub/p2p_network_pubsub_state.rs +++ b/p2p/src/network/pubsub/p2p_network_pubsub_state.rs @@ -3,7 +3,11 @@ use crate::{token::BroadcastAlgorithm, ConnectionAddr, PeerId, StreamId}; use libp2p_identity::ParseError; use mina_p2p_messages::gossip::GossipNetMessageV2; -use openmina_core::{snark::Snark, transaction::Transaction}; +use openmina_core::{ + p2p::P2pNetworkPubsubMessageCacheId, + snark::{Snark, SnarkJobId}, + transaction::Transaction, +}; use redux::Timestamp; use serde::{Deserialize, Serialize}; use std::{ @@ -175,7 +179,7 @@ pub struct P2pNetworkPubsubClientState { impl P2pNetworkPubsubClientState { pub fn publish(&mut self, message: &pb::Message) { - let Ok(id) = P2pNetworkPubsubMessageCacheId::compute_message_id(message) else { + let Ok(id) = compute_message_id(message) else { self.message.publish.push(message.clone()); return; }; @@ -228,7 +232,12 @@ pub enum P2pNetworkPubsubMessageCacheMessage { peer_id: PeerId, time: Timestamp, }, - // This is temporary handling for transactions and snark pool + PreValidatedSnark { + job_id: SnarkJobId, + message: pb::Message, + peer_id: PeerId, + time: Timestamp, + }, PreValidated { message: pb::Message, peer_id: PeerId, @@ -241,62 +250,41 @@ pub enum P2pNetworkPubsubMessageCacheMessage { }, } -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)] -pub struct P2pNetworkPubsubMessageCacheId { - pub source: libp2p_identity::PeerId, - pub seqno: u64, -} - -impl P2pNetworkPubsubMessageCacheId { - // TODO: what if wasm32? - // How to test it? - pub fn compute_message_id( - message: &pb::Message, - ) -> Result { - let source = source_from_message(message)?; +// TODO: what if wasm32? +// How to test it? +pub fn compute_message_id( + message: &pb::Message, +) -> Result { + let source = source_from_message(message)?; - let seqno = message - .seqno - .as_ref() - .and_then(|b| <[u8; 8]>::try_from(b.as_slice()).ok()) - .map(u64::from_be_bytes) - .unwrap_or_default(); + let seqno = message + .seqno + .as_ref() + .and_then(|b| <[u8; 8]>::try_from(b.as_slice()).ok()) + .map(u64::from_be_bytes) + .unwrap_or_default(); - Ok(P2pNetworkPubsubMessageCacheId { source, seqno }) - } + Ok(P2pNetworkPubsubMessageCacheId { source, seqno }) +} - pub fn to_raw_bytes(&self) -> Vec { - let mut message_id = self.source.to_base58(); - message_id.push_str(&self.seqno.to_string()); - message_id.into_bytes() - } +macro_rules! enum_field { + ($field:ident: $field_type:ty) => { + pub fn $field(&self) -> &$field_type { + match self { + Self::Init { $field, .. } + | Self::PreValidated { $field, .. } + | Self::PreValidatedBlockMessage { $field, .. } + | Self::PreValidatedSnark { $field, .. } + | Self::Validated { $field, .. } => $field, + } + } + }; } impl P2pNetworkPubsubMessageCacheMessage { - pub fn message(&self) -> &pb::Message { - match self { - Self::Init { message, .. } => message, - Self::PreValidated { message, .. } => message, - Self::PreValidatedBlockMessage { message, .. } => message, - Self::Validated { message, .. } => message, - } - } - pub fn time(&self) -> Timestamp { - *match self { - Self::Init { time, .. } => time, - Self::PreValidated { time, .. } => time, - Self::PreValidatedBlockMessage { time, .. } => time, - Self::Validated { time, .. } => time, - } - } - pub fn peer_id(&self) -> PeerId { - *match self { - Self::Init { peer_id, .. } => peer_id, - Self::PreValidated { peer_id, .. } => peer_id, - Self::PreValidatedBlockMessage { peer_id, .. } => peer_id, - Self::Validated { peer_id, .. } => peer_id, - } - } + enum_field!(message: pb::Message); + enum_field!(time: Timestamp); + enum_field!(peer_id: PeerId); } impl P2pNetworkPubsubMessageCache { @@ -309,7 +297,7 @@ impl P2pNetworkPubsubMessageCache { peer_id: PeerId, time: Timestamp, ) -> Result { - let id = P2pNetworkPubsubMessageCacheId::compute_message_id(&message)?; + let id = compute_message_id(&message)?; self.map.insert( id, P2pNetworkPubsubMessageCacheMessage::Init { @@ -339,13 +327,19 @@ impl P2pNetworkPubsubMessageCache { pub fn contains_broadcast_id(&self, message_id: &BroadcastMessageId) -> bool { match message_id { - super::BroadcastMessageId::BlockHash { hash } => self + BroadcastMessageId::BlockHash { hash } => self .map .values() .any(|message| matches!(message, P2pNetworkPubsubMessageCacheMessage::PreValidatedBlockMessage { block_hash, .. } if block_hash == hash)), - super::BroadcastMessageId::MessageId { message_id } => { + BroadcastMessageId::MessageId { message_id } => { self.map.contains_key(message_id) - } + }, + BroadcastMessageId::Snark { job_id: snark_job_id } => { + self + .map + .values() + .any(|message| matches!(message, P2pNetworkPubsubMessageCacheMessage::PreValidatedSnark { job_id,.. } if job_id == snark_job_id)) + }, } } @@ -357,7 +351,7 @@ impl P2pNetworkPubsubMessageCache { &mut P2pNetworkPubsubMessageCacheMessage, )> { match message_id { - super::BroadcastMessageId::BlockHash { hash } => { + BroadcastMessageId::BlockHash { hash } => { self.map .iter_mut() .find_map(|(message_id, message)| match message { @@ -368,18 +362,34 @@ impl P2pNetworkPubsubMessageCache { _ => None, }) } - super::BroadcastMessageId::MessageId { message_id } => self + BroadcastMessageId::MessageId { message_id } => self .map .get_mut(message_id) .map(|content| (*message_id, content)), + BroadcastMessageId::Snark { + job_id: snark_job_id, + } => { + self.map + .iter_mut() + .find_map(|(message_id, message)| match message { + P2pNetworkPubsubMessageCacheMessage::PreValidatedSnark { + job_id, .. + } if job_id == snark_job_id => Some((*message_id, message)), + _ => None, + }) + } } } - pub fn remove_message(&mut self, message_id: P2pNetworkPubsubMessageCacheId) { - let _ = self.map.remove(&message_id); + pub fn remove_message( + &mut self, + message_id: P2pNetworkPubsubMessageCacheId, + ) -> Option { + let message = self.map.remove(&message_id); if let Some(position) = self.queue.iter().position(|id| id == &message_id) { self.queue.remove(position); } + message } pub fn get_message_from_raw_message_id( diff --git a/p2p/src/network/rpc/p2p_network_rpc_state.rs b/p2p/src/network/rpc/p2p_network_rpc_state.rs index a80c47826c..f07f63f9ed 100644 --- a/p2p/src/network/rpc/p2p_network_rpc_state.rs +++ b/p2p/src/network/rpc/p2p_network_rpc_state.rs @@ -54,7 +54,7 @@ impl P2pNetworkRpcState { } pub fn should_send_heartbeat(&self, now: redux::Timestamp) -> bool { - self.last_heartbeat_sent.map_or(true, |last_sent| { + self.last_heartbeat_sent.is_none_or(|last_sent| { now.checked_sub(last_sent) .is_some_and(|dur| dur >= HEARTBEAT_INTERVAL) }) diff --git a/p2p/src/network/scheduler/p2p_network_scheduler_actions.rs b/p2p/src/network/scheduler/p2p_network_scheduler_actions.rs index 1465090e5f..1719fb34f5 100644 --- a/p2p/src/network/scheduler/p2p_network_scheduler_actions.rs +++ b/p2p/src/network/scheduler/p2p_network_scheduler_actions.rs @@ -137,7 +137,7 @@ impl redux::EnablingCondition for P2pNetworkSchedulerAction { sock_addr: *addr, incoming: false, }) - .map_or(true, |v| v.closed.is_some()), + .is_none_or(|v| v.closed.is_some()), P2pNetworkSchedulerAction::OutgoingDidConnect { addr, .. } => state .network .scheduler diff --git a/p2p/src/p2p_state.rs b/p2p/src/p2p_state.rs index c1f57f4aa9..84566ce947 100644 --- a/p2p/src/p2p_state.rs +++ b/p2p/src/p2p_state.rs @@ -518,7 +518,11 @@ pub struct P2pCallbacks { /// Callback for [`P2pChannelsTransactionAction::Received`] pub on_p2p_channels_transaction_received: OptionalCallback<(PeerId, Box)>, /// Callback for [`P2pChannelsTransactionAction::Libp2pReceived`] - pub on_p2p_channels_transaction_libp2p_received: OptionalCallback>, + pub on_p2p_channels_transactions_libp2p_received: OptionalCallback<( + PeerId, + Vec, + P2pNetworkPubsubMessageCacheId, + )>, /// Callback for [`P2pChannelsSnarkJobCommitmentAction::Received`] pub on_p2p_channels_snark_job_commitment_received: OptionalCallback<(PeerId, Box)>, diff --git a/p2p/src/peer/p2p_peer_actions.rs b/p2p/src/peer/p2p_peer_actions.rs index 1f298c66c7..2657043a1e 100644 --- a/p2p/src/peer/p2p_peer_actions.rs +++ b/p2p/src/peer/p2p_peer_actions.rs @@ -42,7 +42,7 @@ impl redux::EnablingCondition for P2pPeerAction { && state .peers .get(peer_id) - .map_or(true, |p| p.dial_opts.is_none()) + .is_none_or(|p| p.dial_opts.is_none()) && state.peers.len() < state.config.limits.max_peers_in_state() } P2pPeerAction::Ready { peer_id, .. } => state diff --git a/p2p/src/service_impl/mio/mod.rs b/p2p/src/service_impl/mio/mod.rs index 8ab9473545..2780dbabb6 100644 --- a/p2p/src/service_impl/mio/mod.rs +++ b/p2p/src/service_impl/mio/mod.rs @@ -6,13 +6,12 @@ use std::{ io::{self, Read, Write}, net::{IpAddr, Ipv4Addr, Shutdown, SocketAddr}, process, - sync::mpsc, }; use libp2p_identity::Keypair; use mio::net::{TcpListener, TcpStream}; -use openmina_core::bug_condition; +use openmina_core::{bug_condition, channels::mpsc}; use thiserror::Error; use crate::{ConnectionAddr, MioCmd, MioEvent}; @@ -54,7 +53,7 @@ pub enum MioService { #[derive(Debug)] pub struct MioRunningService { keypair: Keypair, - cmd_sender: mpsc::Sender, + cmd_sender: mpsc::UnboundedSender, waker: Option, } @@ -89,6 +88,13 @@ impl MioService { } } + pub fn pending_cmds(&self) -> usize { + match self { + Self::Pending(_) => 0, + Self::Ready(v) => v.cmd_sender.len(), + } + } + pub fn send_cmd(&mut self, cmd: MioCmd) { let MioService::Ready(service) = self else { bug_condition!("mio service is not initialized"); @@ -109,7 +115,7 @@ impl MioRunningService { fn mocked(keypair: Keypair) -> Self { MioRunningService { keypair, - cmd_sender: mpsc::channel().0, + cmd_sender: mpsc::unbounded_channel().0, waker: None, } } @@ -135,7 +141,7 @@ impl MioRunningService { } }; - let (tx, rx) = mpsc::channel(); + let (tx, rx) = mpsc::unbounded_channel(); let mut inner = MioServiceInner { poll, @@ -174,7 +180,7 @@ impl MioRunningService { struct MioServiceInner { poll: mio::Poll, event_sender: F, - cmd_receiver: mpsc::Receiver, + cmd_receiver: mpsc::UnboundedReceiver, tokens: TokenRegistry, listeners: BTreeMap, connections: BTreeMap, diff --git a/p2p/src/service_impl/mod.rs b/p2p/src/service_impl/mod.rs index 421343a815..dc3c2af078 100644 --- a/p2p/src/service_impl/mod.rs +++ b/p2p/src/service_impl/mod.rs @@ -47,11 +47,15 @@ pub mod webrtc { fn event_sender(&self) -> &mpsc::UnboundedSender; - fn cmd_sender(&self) -> &mpsc::UnboundedSender; + fn cmd_sender(&self) -> &mpsc::TrackedUnboundedSender; fn peers(&mut self) -> &mut BTreeMap; - fn init(_secret_key: SecretKey, _spawner: S) -> P2pServiceCtx { + fn init( + _secret_key: SecretKey, + _spawner: S, + _rng_seed: [u8; 32], + ) -> P2pServiceCtx { let (cmd_sender, _) = mpsc::unbounded_channel(); P2pServiceCtx { cmd_sender, diff --git a/p2p/src/service_impl/webrtc/mod.rs b/p2p/src/service_impl/webrtc/mod.rs index 1ca88e3a37..6cb4df4ebe 100644 --- a/p2p/src/service_impl/webrtc/mod.rs +++ b/p2p/src/service_impl/webrtc/mod.rs @@ -19,7 +19,7 @@ use tokio::task::spawn_local; #[cfg(target_arch = "wasm32")] use wasm_bindgen_futures::spawn_local; -use openmina_core::channels::{broadcast, mpsc, oneshot}; +use openmina_core::channels::{mpsc, oneshot, Aborted, Aborter}; use crate::identity::{EncryptableType, PublicKey}; use crate::webrtc::{ConnectionAuth, ConnectionAuthEncrypted}; @@ -33,22 +33,22 @@ use crate::{ #[cfg(all(not(target_arch = "wasm32"), feature = "p2p-webrtc-rs"))] mod imports { pub use super::webrtc_rs::{ - build_api, webrtc_signal_send, Api, RTCChannel, RTCConnection, RTCConnectionState, - RTCSignalingError, + build_api, certificate_from_pem_key, webrtc_signal_send, Api, RTCCertificate, RTCChannel, + RTCConnection, RTCConnectionState, RTCSignalingError, }; } #[cfg(all(not(target_arch = "wasm32"), feature = "p2p-webrtc-cpp"))] mod imports { pub use super::webrtc_cpp::{ - build_api, webrtc_signal_send, Api, RTCChannel, RTCConnection, RTCConnectionState, - RTCSignalingError, + build_api, certificate_from_pem_key, webrtc_signal_send, Api, RTCCertificate, RTCChannel, + RTCConnection, RTCConnectionState, RTCSignalingError, }; } #[cfg(target_arch = "wasm32")] mod imports { pub use super::web::{ - build_api, webrtc_signal_send, Api, RTCChannel, RTCConnection, RTCConnectionState, - RTCSignalingError, + build_api, certificate_from_pem_key, webrtc_signal_send, Api, RTCCertificate, RTCChannel, + RTCConnection, RTCConnectionState, RTCSignalingError, }; } @@ -61,10 +61,7 @@ use super::TaskSpawner; const CHUNK_SIZE: usize = 16 * 1024; pub enum Cmd { - PeerAdd { - args: PeerAddArgs, - abort: broadcast::Receiver<()>, - }, + PeerAdd { args: PeerAddArgs, aborted: Aborted }, } #[derive(Debug)] @@ -87,7 +84,7 @@ enum PeerCmdAll { } pub struct P2pServiceCtx { - pub cmd_sender: mpsc::UnboundedSender, + pub cmd_sender: mpsc::TrackedUnboundedSender, pub peers: BTreeMap, } @@ -95,7 +92,7 @@ pub struct PeerAddArgs { peer_id: PeerId, kind: PeerConnectionKind, event_sender: Arc Option<()> + Send + Sync + 'static>, - cmd_receiver: mpsc::UnboundedReceiver, + cmd_receiver: mpsc::TrackedUnboundedReceiver, } pub enum PeerConnectionKind { @@ -104,8 +101,8 @@ pub enum PeerConnectionKind { } pub struct PeerState { - pub cmd_sender: mpsc::UnboundedSender, - pub abort: broadcast::Sender<()>, + pub cmd_sender: mpsc::TrackedUnboundedSender, + pub abort: Aborter, } #[derive(thiserror::Error, derive_more::From, Debug)] @@ -143,7 +140,8 @@ pub type OnConnectionStateChangeHdlrFn = Box< pub struct RTCConfig { pub ice_servers: RTCConfigIceServers, - // TODO(binier): certificate + pub certificate: RTCCertificate, + pub seed: [u8; 32], } #[derive(Serialize)] @@ -229,8 +227,10 @@ async fn wait_for_ice_gathering_complete(pc: &mut RTCConnection) { async fn peer_start( api: Api, args: PeerAddArgs, - abort: broadcast::Receiver<()>, - closed: broadcast::Sender<()>, + abort: Aborted, + closed: mpsc::Sender<()>, + certificate: RTCCertificate, + rng_seed: [u8; 32], ) { let PeerAddArgs { peer_id, @@ -242,6 +242,8 @@ async fn peer_start( let config = RTCConfig { ice_servers: Default::default(), + certificate, + seed: rng_seed, }; let fut = async { let mut pc = RTCConnection::create(&api, config).await?; @@ -290,14 +292,14 @@ async fn peer_start( let sdp = pc.local_sdp().await.unwrap(); event_sender(P2pConnectionEvent::OfferSdpReady(peer_id, Ok(sdp)).into()) .ok_or(Error::ChannelClosed)?; - match cmd_receiver.recv().await.ok_or(Error::ChannelClosed)? { + match cmd_receiver.recv().await.ok_or(Error::ChannelClosed)?.0 { PeerCmd::PeerHttpOfferSend(url, offer) => { let answer = webrtc_signal_send(&url, offer).await?; event_sender(P2pConnectionEvent::AnswerReceived(peer_id, answer).into()) .ok_or(Error::ChannelClosed)?; if let PeerCmd::AnswerSet(v) = - cmd_receiver.recv().await.ok_or(Error::ChannelClosed)? + cmd_receiver.recv().await.ok_or(Error::ChannelClosed)?.0 { return Ok(v); } @@ -353,7 +355,7 @@ async fn peer_start( if let Some(connected_tx) = connected_tx.take() { let _ = connected_tx.send(Err("disconnected")); } else { - let _ = closed.send(()); + let _ = closed.try_send(()); } } _ => {} @@ -386,13 +388,16 @@ async fn peer_start( #[cfg(not(all(not(target_arch = "wasm32"), feature = "p2p-webrtc-cpp")))] std::future::ready(()) }); - match cmd_receiver.recv().await { + let msg = match cmd_receiver.recv().await { None => return, - Some(PeerCmd::ConnectionAuthorizationSend(None)) => { + Some(msg) => msg, + }; + match msg.0 { + PeerCmd::ConnectionAuthorizationSend(None) => { // eprintln!("PeerCmd::ConnectionAuthorizationSend(None)"); return; } - Some(PeerCmd::ConnectionAuthorizationSend(Some(auth))) => { + PeerCmd::ConnectionAuthorizationSend(Some(auth)) => { let _ = main_channel_open.await; // Add a delay for sending messages after channel @@ -413,7 +418,7 @@ async fn peer_start( return; } } - Some(cmd) => { + cmd => { bug_condition!("unexpected peer cmd! Expected `PeerCmd::ConnectionAuthorizationSend`. received: {cmd:?}"); return; } @@ -426,9 +431,11 @@ async fn peer_start( struct Channel { id: ChannelId, - msg_sender: mpsc::UnboundedSender<(MsgId, Vec)>, + msg_sender: ChannelMsgSender, } +type ChannelMsgSender = mpsc::UnboundedSender<(MsgId, Vec, Option)>; + struct MsgBuffer { buf: Vec, } @@ -463,11 +470,11 @@ impl Channels { } } - fn get_msg_sender(&self, id: ChannelId) -> Option<&mpsc::UnboundedSender<(MsgId, Vec)>> { + fn get_msg_sender(&self, id: ChannelId) -> Option<&ChannelMsgSender> { self.list.iter().find(|c| c.id == id).map(|c| &c.msg_sender) } - fn add(&mut self, id: ChannelId, msg_sender: mpsc::UnboundedSender<(MsgId, Vec)>) { + fn add(&mut self, id: ChannelId, msg_sender: ChannelMsgSender) { self.list.push(Channel { id, msg_sender }); } @@ -487,9 +494,9 @@ impl Channels { async fn peer_loop( peer_id: PeerId, event_sender: Arc Option<()> + Send + Sync + 'static>, - mut cmd_receiver: mpsc::UnboundedReceiver, + mut cmd_receiver: mpsc::TrackedUnboundedReceiver, mut pc: RTCConnection, - abort: broadcast::Receiver<()>, + aborted: Aborted, ) { // TODO(binier): maybe use small_vec (stack allocated) or something like that. let mut channels = Channels::new(); @@ -499,14 +506,14 @@ async fn peer_loop( mpsc::unbounded_channel::(); while matches!(pc.connection_state(), RTCConnectionState::Connected) { - let cmd = tokio::select! { + let (cmd, _tracker) = tokio::select! { cmd = cmd_receiver.recv() => match cmd { None => return, - Some(cmd) => PeerCmdAll::External(cmd), + Some(cmd) => (PeerCmdAll::External(cmd.0), Some(cmd.1)), }, cmd = internal_cmd_receiver.recv() => match cmd { None => return, - Some(cmd) => PeerCmdAll::Internal(cmd), + Some(cmd) => (PeerCmdAll::Internal(cmd), None), }, }; match cmd { @@ -566,10 +573,10 @@ async fn peer_loop( let _ = internal_cmd_sender.send(PeerCmdInternal::ChannelOpened(id, result.await)); }; - let mut aborted = abort.resubscribe(); + let mut aborted = aborted.clone(); spawn_local(async move { tokio::select! { - _ = aborted.recv() => {} + _ = aborted.wait() => {} _ = fut => {} } }); @@ -578,7 +585,7 @@ async fn peer_loop( let id = msg.channel_id(); let err = match channels.get_msg_sender(id) { Some(msg_sender) => match msg_buf.encode(&msg) { - Ok(encoded) => match msg_sender.send((msg_id, encoded)) { + Ok(encoded) => match msg_sender.send((msg_id, encoded, _tracker)) { Ok(_) => None, Err(_) => Some("ChannelMsgMpscSendFailed".to_owned()), }, @@ -671,7 +678,7 @@ async fn peer_loop( // TODO(binier): find deeper cause and fix it. sleep(Duration::from_secs(3)).await; - while let Some((msg_id, encoded)) = sender_rx.recv().await { + while let Some((msg_id, encoded, _tracker)) = sender_rx.recv().await { let encoded = bytes::Bytes::from(encoded); let mut chunks = encoded.chunks(CHUNK_SIZE).map(|b| encoded.slice_ref(b)); @@ -698,10 +705,10 @@ async fn peer_loop( } }; - let mut aborted = abort.resubscribe(); + let mut aborted = aborted.clone(); spawn_local(async move { tokio::select! { - _ = aborted.recv() => {} + _ = aborted.wait() => {} _ = fut => {} } }); @@ -727,35 +734,40 @@ pub trait P2pServiceWebrtc: redux::Service { fn event_sender(&self) -> &mpsc::UnboundedSender; - fn cmd_sender(&self) -> &mpsc::UnboundedSender; + fn cmd_sender(&self) -> &mpsc::TrackedUnboundedSender; fn peers(&mut self) -> &mut BTreeMap; - fn init(secret_key: SecretKey, spawner: S) -> P2pServiceCtx { + fn init( + secret_key: SecretKey, + spawner: S, + rng_seed: [u8; 32], + ) -> P2pServiceCtx { const MAX_PEERS: usize = 500; - let (cmd_sender, mut cmd_receiver) = mpsc::unbounded_channel(); + let (cmd_sender, mut cmd_receiver) = mpsc::tracked_unbounded_channel(); - let _ = secret_key; + let certificate = certificate_from_pem_key(secret_key.to_pem().as_str()); spawner.spawn_main("webrtc", async move { #[allow(clippy::all)] let api = build_api(); let conn_permits = Arc::new(Semaphore::const_new(MAX_PEERS)); while let Some(cmd) = cmd_receiver.recv().await { - match cmd { - Cmd::PeerAdd { args, mut abort } => { + match cmd.0 { + Cmd::PeerAdd { args, aborted } => { #[allow(clippy::all)] let api = api.clone(); let conn_permits = conn_permits.clone(); let peer_id = args.peer_id; let event_sender = args.event_sender.clone(); + let certificate = certificate.clone(); spawn_local(async move { let Ok(_permit) = conn_permits.try_acquire() else { // state machine shouldn't allow this to happen. bug_condition!("P2P WebRTC Semaphore acquisition failed!"); return; }; - let (closed_tx, mut closed) = broadcast::channel(1); + let (closed_tx, mut closed) = mpsc::channel(1); let event_sender_clone = event_sender.clone(); spawn_local(async move { // to avoid sending closed multiple times @@ -763,14 +775,14 @@ pub trait P2pServiceWebrtc: redux::Service { event_sender_clone(P2pConnectionEvent::Closed(peer_id).into()); }); tokio::select! { - _ = peer_start(api, args, abort.resubscribe(), closed_tx.clone()) => {} - _ = abort.recv() => { + _ = peer_start(api, args, aborted.clone(), closed_tx.clone(), certificate, rng_seed) => {} + _ = aborted.wait() => { } } // delay dropping permit to give some time for cleanup. sleep(Duration::from_millis(100)).await; - let _ = closed_tx.send(()); + let _ = closed_tx.send(()).await; }); } } @@ -784,75 +796,80 @@ pub trait P2pServiceWebrtc: redux::Service { } fn outgoing_init(&mut self, peer_id: PeerId) { - let (peer_cmd_sender, peer_cmd_receiver) = mpsc::unbounded_channel(); - let (abort_sender, abort_receiver) = broadcast::channel(1); + let (peer_cmd_sender, peer_cmd_receiver) = mpsc::tracked_unbounded_channel(); + let aborter = Aborter::default(); + let aborted = aborter.aborted(); self.peers().insert( peer_id, PeerState { cmd_sender: peer_cmd_sender, - abort: abort_sender, + abort: aborter, }, ); let event_sender = self.event_sender().clone(); let event_sender = Arc::new(move |p2p_event: P2pEvent| event_sender.send(p2p_event.into()).ok()); - let _ = self.cmd_sender().send(Cmd::PeerAdd { + let _ = self.cmd_sender().tracked_send(Cmd::PeerAdd { args: PeerAddArgs { peer_id, kind: PeerConnectionKind::Outgoing, event_sender, cmd_receiver: peer_cmd_receiver, }, - abort: abort_receiver, + aborted, }); } fn incoming_init(&mut self, peer_id: PeerId, offer: webrtc::Offer) { - let (peer_cmd_sender, peer_cmd_receiver) = mpsc::unbounded_channel(); - let (abort_sender, abort_receiver) = broadcast::channel(1); + let (peer_cmd_sender, peer_cmd_receiver) = mpsc::tracked_unbounded_channel(); + let aborter = Aborter::default(); + let aborted = aborter.aborted(); self.peers().insert( peer_id, PeerState { cmd_sender: peer_cmd_sender, - abort: abort_sender, + abort: aborter, }, ); let event_sender = self.event_sender().clone(); let event_sender = Arc::new(move |p2p_event: P2pEvent| event_sender.send(p2p_event.into()).ok()); - let _ = self.cmd_sender().send(Cmd::PeerAdd { + let _ = self.cmd_sender().tracked_send(Cmd::PeerAdd { args: PeerAddArgs { peer_id, kind: PeerConnectionKind::Incoming(Box::new(offer)), event_sender, cmd_receiver: peer_cmd_receiver, }, - abort: abort_receiver, + aborted, }); } fn set_answer(&mut self, peer_id: PeerId, answer: webrtc::Answer) { if let Some(peer) = self.peers().get(&peer_id) { - let _ = peer.cmd_sender.send(PeerCmd::AnswerSet(answer)); + let _ = peer.cmd_sender.tracked_send(PeerCmd::AnswerSet(answer)); } } fn http_signaling_request(&mut self, url: String, offer: webrtc::Offer) { if let Some(peer) = self.peers().get(&offer.target_peer_id) { - let _ = peer.cmd_sender.send(PeerCmd::PeerHttpOfferSend(url, offer)); + let _ = peer + .cmd_sender + .tracked_send(PeerCmd::PeerHttpOfferSend(url, offer)); } } fn disconnect(&mut self, peer_id: PeerId) -> bool { + // TODO(binier): improve // By removing the peer, `abort` gets dropped which will // cause `peer_loop` to end. - if let Some(peer) = self.peers().remove(&peer_id) { - if peer.abort.receiver_count() > 0 { - // peer disconnection not yet finished - return false; - } + if let Some(_peer) = self.peers().remove(&peer_id) { + // if peer.abort.receiver_count() > 0 { + // // peer disconnection not yet finished + // return false; + // } } else { openmina_core::error!(openmina_core::log::system_time(); "`disconnect` shouldn't be used for libp2p peers"); } @@ -861,13 +878,15 @@ pub trait P2pServiceWebrtc: redux::Service { fn channel_open(&mut self, peer_id: PeerId, id: ChannelId) { if let Some(peer) = self.peers().get(&peer_id) { - let _ = peer.cmd_sender.send(PeerCmd::ChannelOpen(id)); + let _ = peer.cmd_sender.tracked_send(PeerCmd::ChannelOpen(id)); } } fn channel_send(&mut self, peer_id: PeerId, msg_id: MsgId, msg: ChannelMsg) { if let Some(peer) = self.peers().get(&peer_id) { - let _ = peer.cmd_sender.send(PeerCmd::ChannelSend(msg_id, msg)); + let _ = peer + .cmd_sender + .tracked_send(PeerCmd::ChannelSend(msg_id, msg)); } } @@ -892,7 +911,7 @@ pub trait P2pServiceWebrtc: redux::Service { if let Some(peer) = self.peers().get(&peer_id) { let _ = peer .cmd_sender - .send(PeerCmd::ConnectionAuthorizationSend(auth)); + .tracked_send(PeerCmd::ConnectionAuthorizationSend(auth)); } } @@ -909,3 +928,13 @@ pub trait P2pServiceWebrtc: redux::Service { auth: ConnectionAuthEncrypted, ) -> Option; } + +impl P2pServiceCtx { + pub fn pending_cmds(&self) -> usize { + self.peers + .iter() + .fold(self.cmd_sender.len(), |acc, (_, peer)| { + acc + peer.cmd_sender.len() + }) + } +} diff --git a/p2p/src/service_impl/webrtc/web.rs b/p2p/src/service_impl/webrtc/web.rs index df83300143..17a9627987 100644 --- a/p2p/src/service_impl/webrtc/web.rs +++ b/p2p/src/service_impl/webrtc/web.rs @@ -32,6 +32,12 @@ pub type RTCConnectionState = RtcPeerConnectionState; pub type Api = (); +pub type RTCCertificate = (); + +pub fn certificate_from_pem_key(_: &str) -> RTCCertificate { + () +} + pub fn build_api() -> Api {} pub struct RTCConnection(Rc, bool); diff --git a/p2p/src/service_impl/webrtc/webrtc_cpp.rs b/p2p/src/service_impl/webrtc/webrtc_cpp.rs index 8ce614a1dd..c83367b581 100644 --- a/p2p/src/service_impl/webrtc/webrtc_cpp.rs +++ b/p2p/src/service_impl/webrtc/webrtc_cpp.rs @@ -22,6 +22,12 @@ pub type RTCConnectionState = ConnectionState; pub type Api = (); +pub type RTCCertificate = (); + +pub fn certificate_from_pem_key(_: &str) -> RTCCertificate { + () +} + type MessageHandler = Box; pub fn build_api() -> Api {} diff --git a/p2p/src/service_impl/webrtc/webrtc_rs.rs b/p2p/src/service_impl/webrtc/webrtc_rs.rs index bca11e9fad..aaf7b7826e 100644 --- a/p2p/src/service_impl/webrtc/webrtc_rs.rs +++ b/p2p/src/service_impl/webrtc/webrtc_rs.rs @@ -28,6 +28,13 @@ pub type RTCConnectionState = RTCPeerConnectionState; pub type Api = Arc; +pub type RTCCertificate = webrtc::peer_connection::certificate::RTCCertificate; + +pub fn certificate_from_pem_key(pem_str: &str) -> RTCCertificate { + let keypair = rcgen::KeyPair::from_pem(pem_str).expect("valid pem"); + RTCCertificate::from_key_pair(keypair).expect("keypair is compatible") +} + pub fn build_api() -> Api { APIBuilder::new().build().into() } @@ -47,7 +54,10 @@ pub enum RTCSignalingError { impl RTCConnection { pub async fn create(api: &Api, config: RTCConfig) -> Result { - api.new_peer_connection(config.into()) + let mut configuration = RTCConfiguration::from(config); + // try default certificate, TODO(vlad): do it right + configuration.certificates.clear(); + api.new_peer_connection(configuration) .await .map(|v| Self(v.into(), true)) } @@ -192,6 +202,8 @@ impl From for RTCConfiguration { RTCConfiguration { ice_servers: value.ice_servers.0.into_iter().map(Into::into).collect(), ice_transport_policy: RTCIceTransportPolicy::All, + certificates: vec![value.certificate], + seed: Some(value.seed.to_vec()), ..Default::default() } } diff --git a/p2p/src/service_impl/webrtc_with_libp2p.rs b/p2p/src/service_impl/webrtc_with_libp2p.rs index 3d972a1ad3..cd384cb5fa 100644 --- a/p2p/src/service_impl/webrtc_with_libp2p.rs +++ b/p2p/src/service_impl/webrtc_with_libp2p.rs @@ -29,12 +29,12 @@ pub trait P2pServiceWebrtcWithLibp2p: P2pServiceWebrtc { fn connections(&self) -> BTreeSet; - fn init(sec_key: SecretKey, spawner: S) -> P2pServiceCtx { + fn init(sec_key: SecretKey, spawner: S, rng_seed: [u8; 32]) -> P2pServiceCtx { P2pServiceCtx { sec_key: sec_key.clone(), #[cfg(feature = "p2p-libp2p")] mio: MioService::pending(sec_key.clone().try_into().expect("valid keypair")), - webrtc: ::init(sec_key, spawner), + webrtc: ::init(sec_key, spawner, rng_seed), } } diff --git a/p2p/testing/Cargo.toml b/p2p/testing/Cargo.toml index 6941fcb38d..1e6b4de3ec 100644 --- a/p2p/testing/Cargo.toml +++ b/p2p/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "p2p-testing" -version = "0.14.0" +version = "0.16.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/p2p/testing/src/cluster.rs b/p2p/testing/src/cluster.rs index a199cecd8c..9740e06bbd 100644 --- a/p2p/testing/src/cluster.rs +++ b/p2p/testing/src/cluster.rs @@ -8,6 +8,7 @@ use std::{ use futures::StreamExt; use libp2p::{multiaddr::multiaddr, swarm::DialError, Multiaddr}; +use openmina_core::channels::mpsc; use openmina_core::{ChainId, Substate, DEVNET_CHAIN_ID}; use p2p::{ connection::outgoing::{ @@ -18,7 +19,6 @@ use p2p::{ P2pCallbacks, P2pConfig, P2pMeshsubConfig, P2pState, PeerId, }; use redux::SystemTime; -use tokio::sync::mpsc; use crate::{ event::{event_mapper_effect, RustNodeEvent}, diff --git a/p2p/testing/src/redux.rs b/p2p/testing/src/redux.rs index bff9051f43..065971848a 100644 --- a/p2p/testing/src/redux.rs +++ b/p2p/testing/src/redux.rs @@ -229,10 +229,7 @@ pub(super) fn event_effect(store: &mut crate::redux::Store, event: P2pEvent) -> ), MioEvent::IncomingDataDidReceive(addr, result) => SubStore::dispatch( store, - P2pNetworkSchedulerAction::IncomingDataDidReceive { - addr, - result: result.map(From::from), - }, + P2pNetworkSchedulerAction::IncomingDataDidReceive { addr, result }, ), MioEvent::OutgoingDataDidSend(_, _result) => true, MioEvent::ConnectionDidClose(addr, result) => { diff --git a/p2p/testing/src/rust_node.rs b/p2p/testing/src/rust_node.rs index 8067ee6b0c..7b5bde8d57 100644 --- a/p2p/testing/src/rust_node.rs +++ b/p2p/testing/src/rust_node.rs @@ -5,9 +5,9 @@ use std::{ }; use futures::Stream; +use openmina_core::channels::mpsc; use p2p::{P2pAction, P2pEvent, P2pLimits, P2pState, P2pTimeouts, PeerId}; use redux::{Effects, EnablingCondition, Reducer, SubStore}; -use tokio::sync::mpsc; use crate::{ cluster::{Listener, PeerIdConfig}, @@ -73,14 +73,14 @@ impl RustNodeConfig { pub struct RustNode { store: Store, - event_receiver: mpsc::UnboundedReceiver, + event_receiver: mpsc::RecvStream, } impl RustNode { pub(super) fn new(store: Store, event_receiver: mpsc::UnboundedReceiver) -> Self { RustNode { store, - event_receiver, + event_receiver: event_receiver.stream(), } } @@ -109,7 +109,7 @@ impl RustNode { } fn poll_event_receiver(&mut self, cx: &mut Context<'_>) -> Poll> { - let event = ready!(Pin::new(&mut self.event_receiver).poll_recv(cx)); + let event = ready!(Pin::new(&mut self.event_receiver).poll_next(cx)); Poll::Ready(event.map(|event| { self.dispatch_event(event.clone()); RustNodeEvent::P2p { event } diff --git a/p2p/testing/src/service.rs b/p2p/testing/src/service.rs index cecf2d50ba..4789e19e05 100644 --- a/p2p/testing/src/service.rs +++ b/p2p/testing/src/service.rs @@ -1,5 +1,6 @@ use std::{collections::VecDeque, time::Instant}; +use openmina_core::channels::mpsc; use p2p::{ identity::SecretKey, service_impl::{ @@ -9,14 +10,13 @@ use p2p::{ }; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use redux::{Service, TimeService}; -use tokio::sync::mpsc; use crate::event::{RustNodeEvent, RustNodeEventStore}; pub struct ClusterService { pub rng: StdRng, pub event_sender: mpsc::UnboundedSender, - pub cmd_sender: mpsc::UnboundedSender, + pub cmd_sender: mpsc::TrackedUnboundedSender, mio: MioService, peers: std::collections::BTreeMap, time: Instant, @@ -29,7 +29,7 @@ impl ClusterService { node_idx: usize, secret_key: SecretKey, event_sender: mpsc::UnboundedSender, - cmd_sender: mpsc::UnboundedSender, + cmd_sender: mpsc::TrackedUnboundedSender, time: Instant, ) -> Self { let mio = { @@ -94,7 +94,7 @@ impl P2pServiceWebrtc for ClusterService { &self.event_sender } - fn cmd_sender(&self) -> &mpsc::UnboundedSender { + fn cmd_sender(&self) -> &mpsc::TrackedUnboundedSender { &self.cmd_sender } diff --git a/poseidon/Cargo.toml b/poseidon/Cargo.toml index 1e79ed21d1..20ef69c991 100644 --- a/poseidon/Cargo.toml +++ b/poseidon/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "poseidon" -version = "0.14.0" +version = "0.16.0" edition = "2021" [lints] diff --git a/producer-dashboard/Cargo.toml b/producer-dashboard/Cargo.toml index 69a56d0d27..5177dc8298 100644 --- a/producer-dashboard/Cargo.toml +++ b/producer-dashboard/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-producer-dashboard" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/producer-dashboard/build.rs b/producer-dashboard/build.rs new file mode 100644 index 0000000000..df10bcc774 --- /dev/null +++ b/producer-dashboard/build.rs @@ -0,0 +1,3 @@ +fn main() { + println!("cargo:rustc-env=SQLX_OFFLINE=true"); +} diff --git a/snark/Cargo.toml b/snark/Cargo.toml index a853c3a3f5..fa45669ac4 100644 --- a/snark/Cargo.toml +++ b/snark/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "snark" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0" diff --git a/snark/src/user_command_verify/snark_user_command_verify_actions.rs b/snark/src/user_command_verify/snark_user_command_verify_actions.rs index 54f936b907..294ebda6e0 100644 --- a/snark/src/user_command_verify/snark_user_command_verify_actions.rs +++ b/snark/src/user_command_verify/snark_user_command_verify_actions.rs @@ -2,7 +2,7 @@ use ledger::scan_state::transaction_logic::{valid, verifiable, WithStatus}; use redux::Callback; use serde::{Deserialize, Serialize}; -use openmina_core::{requests::RpcId, ActionEvent}; +use openmina_core::{transaction::TransactionPoolMessageSource, ActionEvent}; use super::{SnarkUserCommandVerifyError, SnarkUserCommandVerifyId}; @@ -14,7 +14,7 @@ pub type SnarkUserCommandVerifyActionWithMetaRef<'a> = pub(super) type OnSuccess = Callback<( SnarkUserCommandVerifyId, Vec, - Option, + TransactionPoolMessageSource, )>; #[derive(Serialize, Deserialize, Debug, Clone, ActionEvent)] @@ -24,7 +24,7 @@ pub enum SnarkUserCommandVerifyAction { Init { req_id: SnarkUserCommandVerifyId, commands: Vec>, - from_rpc: Option, + from_source: TransactionPoolMessageSource, on_success: OnSuccess, on_error: Callback<(SnarkUserCommandVerifyId, Vec)>, }, diff --git a/snark/src/user_command_verify/snark_user_command_verify_reducer.rs b/snark/src/user_command_verify/snark_user_command_verify_reducer.rs index 3e4803087e..182fc3c609 100644 --- a/snark/src/user_command_verify/snark_user_command_verify_reducer.rs +++ b/snark/src/user_command_verify/snark_user_command_verify_reducer.rs @@ -23,7 +23,7 @@ pub fn reducer( SnarkUserCommandVerifyAction::Init { commands, req_id, - from_rpc, + from_source, on_success, on_error, } => { @@ -32,7 +32,7 @@ pub fn reducer( substate.jobs.add(SnarkUserCommandVerifyStatus::Init { time: meta.time(), commands: commands.clone(), - from_rpc: *from_rpc, + from_source: *from_source, on_success: on_success.clone(), on_error: on_error.clone(), }); @@ -54,7 +54,7 @@ pub fn reducer( }; let SnarkUserCommandVerifyStatus::Init { commands, - from_rpc, + from_source, on_success, on_error, .. @@ -67,7 +67,7 @@ pub fn reducer( *req = SnarkUserCommandVerifyStatus::Pending { time: meta.time(), commands: std::mem::take(commands), - from_rpc: std::mem::take(from_rpc), + from_source: std::mem::take(from_source), on_success: on_success.clone(), on_error: on_error.clone(), }; @@ -102,7 +102,7 @@ pub fn reducer( return; }; let SnarkUserCommandVerifyStatus::Pending { - from_rpc, + from_source, on_success, .. } = req @@ -111,7 +111,7 @@ pub fn reducer( return; }; - let from_rpc = std::mem::take(from_rpc); + let from_source = std::mem::take(from_source); let commands: Vec = commands.clone(); let on_success = on_success.clone(); @@ -123,7 +123,7 @@ pub fn reducer( // Dispatch let dispatcher = state.into_dispatcher(); - dispatcher.push_callback(on_success, (*req_id, commands, from_rpc)); + dispatcher.push_callback(on_success, (*req_id, commands, from_source)); dispatcher.push(SnarkUserCommandVerifyAction::Finish { req_id: *req_id }); } SnarkUserCommandVerifyAction::Finish { req_id } => { diff --git a/snark/src/user_command_verify/snark_user_command_verify_state.rs b/snark/src/user_command_verify/snark_user_command_verify_state.rs index a97486adb6..eef2697e49 100644 --- a/snark/src/user_command_verify/snark_user_command_verify_state.rs +++ b/snark/src/user_command_verify/snark_user_command_verify_state.rs @@ -4,7 +4,7 @@ use ledger::scan_state::transaction_logic::{valid, verifiable, WithStatus}; use redux::Callback; use serde::{Deserialize, Serialize}; -use openmina_core::requests::{PendingRequests, RpcId}; +use openmina_core::{requests::PendingRequests, transaction::TransactionPoolMessageSource}; use crate::{TransactionVerifier, VerifierSRS}; @@ -47,14 +47,14 @@ pub enum SnarkUserCommandVerifyStatus { Init { time: redux::Timestamp, commands: Vec>, - from_rpc: Option, + from_source: TransactionPoolMessageSource, on_success: super::OnSuccess, on_error: Callback<(SnarkUserCommandVerifyId, Vec)>, }, Pending { time: redux::Timestamp, commands: Vec>, - from_rpc: Option, + from_source: TransactionPoolMessageSource, on_success: super::OnSuccess, on_error: Callback<(SnarkUserCommandVerifyId, Vec)>, }, diff --git a/snark/src/work_verify/snark_work_verify_actions.rs b/snark/src/work_verify/snark_work_verify_actions.rs index 287f835771..286cdaccab 100644 --- a/snark/src/work_verify/snark_work_verify_actions.rs +++ b/snark/src/work_verify/snark_work_verify_actions.rs @@ -1,4 +1,4 @@ -use openmina_core::SubstateAccess; +use openmina_core::{snark::SnarkJobId, SubstateAccess}; use serde::{Deserialize, Serialize}; use openmina_core::{snark::Snark, ActionEvent}; @@ -17,7 +17,7 @@ pub enum SnarkWorkVerifyAction { batch: Vec, sender: String, on_success: redux::Callback<(SnarkWorkVerifyId, String, Vec)>, - on_error: redux::Callback<(SnarkWorkVerifyId, String)>, + on_error: redux::Callback<(SnarkWorkVerifyId, String, Vec)>, }, Pending { req_id: SnarkWorkVerifyId, diff --git a/snark/src/work_verify/snark_work_verify_reducer.rs b/snark/src/work_verify/snark_work_verify_reducer.rs index e55d78acd6..ec4d4bab89 100644 --- a/snark/src/work_verify/snark_work_verify_reducer.rs +++ b/snark/src/work_verify/snark_work_verify_reducer.rs @@ -1,4 +1,4 @@ -use openmina_core::{bug_condition, Substate, SubstateAccess}; +use openmina_core::{bug_condition, snark::Snark, Substate, SubstateAccess}; use redux::EnablingCondition; use crate::work_verify_effectful::SnarkWorkVerifyEffectfulAction; @@ -94,16 +94,17 @@ pub fn reducer( }; let callback = on_error.clone(); let sender = std::mem::take(sender); - + let batch = std::mem::take(batch); + let job_ids = batch.iter().map(Snark::job_id).collect(); *req = SnarkWorkVerifyStatus::Error { time: meta.time(), - batch: std::mem::take(batch), + batch, sender: sender.clone(), error: error.clone(), }; // Dispatch let dispatcher = state_context.into_dispatcher(); - dispatcher.push_callback(callback, (*req_id, sender)); + dispatcher.push_callback(callback, (*req_id, sender, job_ids)); dispatcher.push(SnarkWorkVerifyAction::Finish { req_id: *req_id }); } SnarkWorkVerifyAction::Success { req_id } => { diff --git a/snark/src/work_verify/snark_work_verify_state.rs b/snark/src/work_verify/snark_work_verify_state.rs index 048ca41da9..cdd7f7eb1c 100644 --- a/snark/src/work_verify/snark_work_verify_state.rs +++ b/snark/src/work_verify/snark_work_verify_state.rs @@ -2,8 +2,8 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; -use openmina_core::requests::PendingRequests; use openmina_core::snark::Snark; +use openmina_core::{requests::PendingRequests, snark::SnarkJobId}; use crate::{TransactionVerifier, VerifierSRS}; @@ -50,14 +50,14 @@ pub enum SnarkWorkVerifyStatus { // `PeerId` here. sender: String, on_success: redux::Callback<(SnarkWorkVerifyId, String, Vec)>, - on_error: redux::Callback<(SnarkWorkVerifyId, String)>, + on_error: redux::Callback<(SnarkWorkVerifyId, String, Vec)>, }, Pending { time: redux::Timestamp, batch: Vec, sender: String, on_success: redux::Callback<(SnarkWorkVerifyId, String, Vec)>, - on_error: redux::Callback<(SnarkWorkVerifyId, String)>, + on_error: redux::Callback<(SnarkWorkVerifyId, String, Vec)>, }, Error { time: redux::Timestamp, diff --git a/tools/archive-breadcrumb-compare/Cargo.toml b/tools/archive-breadcrumb-compare/Cargo.toml index de3b214149..34437e3fb8 100644 --- a/tools/archive-breadcrumb-compare/Cargo.toml +++ b/tools/archive-breadcrumb-compare/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-archive-breadcrumb-compare" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] diff --git a/tools/archive-breadcrumb-compare/src/main.rs b/tools/archive-breadcrumb-compare/src/main.rs index 028e86fca9..ca1879781b 100644 --- a/tools/archive-breadcrumb-compare/src/main.rs +++ b/tools/archive-breadcrumb-compare/src/main.rs @@ -1,4 +1,4 @@ -use mina_p2p_messages::v2::ArchiveTransitionFronntierDiff; +use mina_p2p_messages::v2::ArchiveTransitionFrontierDiff; use std::{collections::HashSet, path::PathBuf}; use anyhow::Result; @@ -289,26 +289,26 @@ async fn compare_binary_diffs( } } -fn load_and_deserialize(path: &PathBuf) -> Result { +fn load_and_deserialize(path: &PathBuf) -> Result { let data = std::fs::read(path)?; let diff = binprot::BinProtRead::binprot_read(&mut data.as_slice())?; Ok(diff) } fn compare_diffs( - ocaml: &ArchiveTransitionFronntierDiff, - openmina: &ArchiveTransitionFronntierDiff, + ocaml: &ArchiveTransitionFrontierDiff, + openmina: &ArchiveTransitionFrontierDiff, ) -> Option { match (ocaml, openmina) { ( - ArchiveTransitionFronntierDiff::BreadcrumbAdded { + ArchiveTransitionFrontierDiff::BreadcrumbAdded { block: (b1, (body_hash1, state_hash1)), accounts_accessed: a1, accounts_created: c1, tokens_used: t1, sender_receipt_chains_from_parent_ledger: s1, }, - ArchiveTransitionFronntierDiff::BreadcrumbAdded { + ArchiveTransitionFrontierDiff::BreadcrumbAdded { block: (b2, (body_hash2, state_hash2)), accounts_accessed: a2, accounts_created: c2, diff --git a/tools/bootstrap-sandbox/Cargo.toml b/tools/bootstrap-sandbox/Cargo.toml index 332daa5e4b..127f94557e 100644 --- a/tools/bootstrap-sandbox/Cargo.toml +++ b/tools/bootstrap-sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-bootstrap-sandbox" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] diff --git a/tools/fuzzing/Cargo.toml b/tools/fuzzing/Cargo.toml index 83098fad5f..e6a1a9a46b 100644 --- a/tools/fuzzing/Cargo.toml +++ b/tools/fuzzing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "transaction_fuzzer" -version = "0.14.0" +version = "0.16.0" edition = "2021" diff --git a/tools/fuzzing/src/transaction_fuzzer/coverage/reports.rs b/tools/fuzzing/src/transaction_fuzzer/coverage/reports.rs index a326fb9ba1..35f10cc30e 100644 --- a/tools/fuzzing/src/transaction_fuzzer/coverage/reports.rs +++ b/tools/fuzzing/src/transaction_fuzzer/coverage/reports.rs @@ -497,7 +497,7 @@ impl CoverageReport { //println!("{:?}", source_range); for line in start..=end { - if line == lines.len() || lines[line].line.chars().count() == 0 { + if line >= lines.len() || lines[line].line.chars().count() == 0 { continue; } diff --git a/tools/gossipsub-sandbox/Cargo.toml b/tools/gossipsub-sandbox/Cargo.toml index 896f354af8..38dccd29bf 100644 --- a/tools/gossipsub-sandbox/Cargo.toml +++ b/tools/gossipsub-sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "openmina-gossipsub-sandbox" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] diff --git a/tools/hash-tool/Cargo.toml b/tools/hash-tool/Cargo.toml index 6eb42d8eca..9ea45fee8c 100644 --- a/tools/hash-tool/Cargo.toml +++ b/tools/hash-tool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hash-tool" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] diff --git a/tools/heartbeats-processor/.sqlx/query-25c9e074156b792e92cbfbaf5954647bfdda59b680b5a393c5324dd6d8a19683.json b/tools/heartbeats-processor/.sqlx/query-1fa0deb48209e10e904bac0830573635fbba9aaf7e0067f72656ea39a412c590.json similarity index 59% rename from tools/heartbeats-processor/.sqlx/query-25c9e074156b792e92cbfbaf5954647bfdda59b680b5a393c5324dd6d8a19683.json rename to tools/heartbeats-processor/.sqlx/query-1fa0deb48209e10e904bac0830573635fbba9aaf7e0067f72656ea39a412c590.json index ea821622ea..6d1bee4992 100644 --- a/tools/heartbeats-processor/.sqlx/query-25c9e074156b792e92cbfbaf5954647bfdda59b680b5a393c5324dd6d8a19683.json +++ b/tools/heartbeats-processor/.sqlx/query-1fa0deb48209e10e904bac0830573635fbba9aaf7e0067f72656ea39a412c590.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "\n SELECT\n pk.public_key,\n ss.score,\n ss.blocks_produced,\n datetime(ss.last_updated, 'unixepoch') as last_updated\n FROM submitter_scores ss\n JOIN public_keys pk ON pk.id = ss.public_key_id\n ORDER BY ss.score DESC, ss.blocks_produced DESC\n ", + "query": "\n SELECT\n pk.public_key,\n ss.score,\n ss.blocks_produced,\n datetime(ss.last_updated, 'unixepoch') as last_updated,\n datetime(ss.last_heartbeat, 'unixepoch') as last_heartbeat\n FROM submitter_scores ss\n JOIN public_keys pk ON pk.id = ss.public_key_id\n ORDER BY ss.score DESC, ss.blocks_produced DESC\n ", "describe": { "columns": [ { @@ -22,6 +22,11 @@ "name": "last_updated", "ordinal": 3, "type_info": "Text" + }, + { + "name": "last_heartbeat", + "ordinal": 4, + "type_info": "Text" } ], "parameters": { @@ -31,8 +36,9 @@ false, false, false, + true, true ] }, - "hash": "25c9e074156b792e92cbfbaf5954647bfdda59b680b5a393c5324dd6d8a19683" + "hash": "1fa0deb48209e10e904bac0830573635fbba9aaf7e0067f72656ea39a412c590" } diff --git a/tools/heartbeats-processor/.sqlx/query-5776d825f55385c0b83c30d311e5b68047cc9ce146b3eaba368a69810afd0203.json b/tools/heartbeats-processor/.sqlx/query-5776d825f55385c0b83c30d311e5b68047cc9ce146b3eaba368a69810afd0203.json new file mode 100644 index 0000000000..0dfbb1a74e --- /dev/null +++ b/tools/heartbeats-processor/.sqlx/query-5776d825f55385c0b83c30d311e5b68047cc9ce146b3eaba368a69810afd0203.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n WITH MaxHeights AS (\n SELECT\n window_id,\n MAX(best_tip_height) as max_height\n FROM heartbeat_presence\n WHERE disabled = FALSE\n GROUP BY window_id\n ),\n PrevMaxHeights AS (\n -- Get the max height from the immediate previous window\n SELECT\n tw.id as window_id,\n prev.max_height as prev_max_height\n FROM time_windows tw\n LEFT JOIN time_windows prev_tw ON prev_tw.id = tw.id - 1\n LEFT JOIN MaxHeights prev ON prev.window_id = prev_tw.id\n )\n UPDATE heartbeat_presence\n SET disabled = TRUE\n WHERE (window_id, best_tip_height) IN (\n SELECT\n hp.window_id,\n hp.best_tip_height\n FROM heartbeat_presence hp\n JOIN PrevMaxHeights pmh ON pmh.window_id = hp.window_id\n WHERE hp.disabled = FALSE\n AND pmh.prev_max_height IS NOT NULL -- Ensure there is a previous window\n AND hp.best_tip_height < (pmh.prev_max_height - ?)\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "5776d825f55385c0b83c30d311e5b68047cc9ce146b3eaba368a69810afd0203" +} diff --git a/tools/heartbeats-processor/.sqlx/query-ad9dff13f32bd4e8193be71bd22a2e52c496fceabbd9a2a916f610fb7ed27a32.json b/tools/heartbeats-processor/.sqlx/query-ad9dff13f32bd4e8193be71bd22a2e52c496fceabbd9a2a916f610fb7ed27a32.json new file mode 100644 index 0000000000..62ae51e31f --- /dev/null +++ b/tools/heartbeats-processor/.sqlx/query-ad9dff13f32bd4e8193be71bd22a2e52c496fceabbd9a2a916f610fb7ed27a32.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n WITH ValidWindows AS (\n SELECT id, start_time, end_time\n FROM time_windows\n WHERE disabled = FALSE\n AND end_time <= ?2\n AND start_time >= ?1\n ),\n BlockCounts AS (\n -- Count one block per global slot per producer\n SELECT\n public_key_id,\n COUNT(DISTINCT block_global_slot) as blocks\n FROM (\n -- Deduplicate blocks per global slot\n SELECT\n pb.public_key_id,\n pb.block_global_slot\n FROM produced_blocks pb\n JOIN ValidWindows vw ON vw.id = pb.window_id\n -- TODO: enable once block proof validation has been implemented\n -- WHERE pb.validated = TRUE\n GROUP BY pb.public_key_id, pb.block_global_slot\n ) unique_blocks\n GROUP BY public_key_id\n ),\n HeartbeatCounts AS (\n -- Count heartbeats only within valid windows and not disabled\n SELECT\n hp.public_key_id,\n COUNT(DISTINCT hp.window_id) as heartbeats\n FROM heartbeat_presence hp\n JOIN ValidWindows vw ON vw.id = hp.window_id\n WHERE hp.disabled = FALSE\n GROUP BY hp.public_key_id\n ),\n LastHeartbeats AS (\n -- Get last heartbeat time across all windows, including disabled entries\n SELECT\n public_key_id,\n MAX(heartbeat_time) as last_heartbeat\n FROM heartbeat_presence\n GROUP BY public_key_id\n )\n INSERT INTO submitter_scores (\n public_key_id,\n score,\n blocks_produced,\n last_heartbeat\n )\n SELECT\n pk.id,\n COALESCE(hc.heartbeats, 0) as score,\n COALESCE(bc.blocks, 0) as blocks_produced,\n COALESCE(lh.last_heartbeat, 0) as last_heartbeat\n FROM public_keys pk\n LEFT JOIN HeartbeatCounts hc ON hc.public_key_id = pk.id\n LEFT JOIN BlockCounts bc ON bc.public_key_id = pk.id\n LEFT JOIN LastHeartbeats lh ON lh.public_key_id = pk.id\n WHERE hc.heartbeats > 0 OR bc.blocks > 0\n ON CONFLICT(public_key_id) DO UPDATE SET\n score = excluded.score,\n blocks_produced = excluded.blocks_produced,\n last_heartbeat = excluded.last_heartbeat,\n last_updated = strftime('%s', 'now')\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 2 + }, + "nullable": [] + }, + "hash": "ad9dff13f32bd4e8193be71bd22a2e52c496fceabbd9a2a916f610fb7ed27a32" +} diff --git a/tools/heartbeats-processor/.sqlx/query-bc586a064ad3094fe93bf09715e00c3638e403705c437816d56de4af3fcbdb17.json b/tools/heartbeats-processor/.sqlx/query-bc586a064ad3094fe93bf09715e00c3638e403705c437816d56de4af3fcbdb17.json deleted file mode 100644 index f8a843138a..0000000000 --- a/tools/heartbeats-processor/.sqlx/query-bc586a064ad3094fe93bf09715e00c3638e403705c437816d56de4af3fcbdb17.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "\n INSERT INTO submitter_scores (public_key_id, score, blocks_produced)\n SELECT\n pk.id,\n COUNT(DISTINCT hp.window_id) as score,\n COUNT(DISTINCT pb.id) as blocks_produced\n FROM public_keys pk\n LEFT JOIN heartbeat_presence hp ON pk.id = hp.public_key_id\n LEFT JOIN time_windows tw ON hp.window_id = tw.id\n LEFT JOIN produced_blocks pb ON pk.id = pb.public_key_id\n WHERE tw.disabled = FALSE\n GROUP BY pk.id\n ON CONFLICT(public_key_id) DO UPDATE SET\n score = excluded.score,\n blocks_produced = excluded.blocks_produced,\n last_updated = strftime('%s', 'now')\n ", - "describe": { - "columns": [], - "parameters": { - "Right": 0 - }, - "nullable": [] - }, - "hash": "bc586a064ad3094fe93bf09715e00c3638e403705c437816d56de4af3fcbdb17" -} diff --git a/tools/heartbeats-processor/.sqlx/query-12261ecc56a9408bc7b95eb66dd939823a72b071adc7428ef353375982274d7f.json b/tools/heartbeats-processor/.sqlx/query-d141ebb15726fe1b50565e833520253a1d56ca5faf94a3c3b3d8661ae0cd1fd6.json similarity index 65% rename from tools/heartbeats-processor/.sqlx/query-12261ecc56a9408bc7b95eb66dd939823a72b071adc7428ef353375982274d7f.json rename to tools/heartbeats-processor/.sqlx/query-d141ebb15726fe1b50565e833520253a1d56ca5faf94a3c3b3d8661ae0cd1fd6.json index fc5b537638..de543a1788 100644 --- a/tools/heartbeats-processor/.sqlx/query-12261ecc56a9408bc7b95eb66dd939823a72b071adc7428ef353375982274d7f.json +++ b/tools/heartbeats-processor/.sqlx/query-d141ebb15726fe1b50565e833520253a1d56ca5faf94a3c3b3d8661ae0cd1fd6.json @@ -1,6 +1,6 @@ { "db_name": "SQLite", - "query": "\n SELECT\n pk.public_key,\n ss.score,\n ss.blocks_produced,\n ss.last_updated\n FROM submitter_scores ss\n JOIN public_keys pk ON pk.id = ss.public_key_id\n ORDER BY ss.score DESC\n ", + "query": "\n SELECT\n pk.public_key,\n ss.score,\n ss.blocks_produced,\n ss.last_updated,\n ss.last_heartbeat\n FROM submitter_scores ss\n JOIN public_keys pk ON pk.id = ss.public_key_id\n ORDER BY ss.score DESC\n ", "describe": { "columns": [ { @@ -22,6 +22,11 @@ "name": "last_updated", "ordinal": 3, "type_info": "Integer" + }, + { + "name": "last_heartbeat", + "ordinal": 4, + "type_info": "Integer" } ], "parameters": { @@ -31,8 +36,9 @@ false, false, false, + false, false ] }, - "hash": "12261ecc56a9408bc7b95eb66dd939823a72b071adc7428ef353375982274d7f" + "hash": "d141ebb15726fe1b50565e833520253a1d56ca5faf94a3c3b3d8661ae0cd1fd6" } diff --git a/tools/heartbeats-processor/Cargo.toml b/tools/heartbeats-processor/Cargo.toml index fa55fddd83..617eb18447 100644 --- a/tools/heartbeats-processor/Cargo.toml +++ b/tools/heartbeats-processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "heartbeats-processor" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] @@ -15,6 +15,8 @@ dotenv = "0.15" clap = { version = "4.4", features = ["derive"] } gcloud-sdk = { version = "0.26.0", default-features = false, features = ["google-firestore-v1"] } base64 = "0.22" +mina-tree = { path = "../../ledger" } +snark = { path = "../../snark" } mina-p2p-messages = { workspace = true } openmina-core = { path = "../../core" } \ No newline at end of file diff --git a/tools/heartbeats-processor/schema.sql b/tools/heartbeats-processor/schema.sql index f1a99dd4e6..ebb3cee0b5 100644 --- a/tools/heartbeats-processor/schema.sql +++ b/tools/heartbeats-processor/schema.sql @@ -45,6 +45,7 @@ CREATE TABLE IF NOT EXISTS submitter_scores ( score INTEGER NOT NULL DEFAULT 0, last_updated INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), blocks_produced INTEGER NOT NULL DEFAULT 0, + last_heartbeat INTEGER NOT NULL DEFAULT 0, FOREIGN KEY (public_key_id) REFERENCES public_keys(id) ); @@ -83,6 +84,22 @@ ON heartbeat_presence(public_key_id); CREATE INDEX IF NOT EXISTS idx_heartbeat_presence_global_slot ON heartbeat_presence(best_tip_global_slot); +-- Index for presence queries by best tip height +CREATE INDEX IF NOT EXISTS idx_heartbeat_presence_height +ON heartbeat_presence(best_tip_height); + +-- Combined index for height queries with disabled flag +CREATE INDEX IF NOT EXISTS idx_heartbeat_presence_window_disabled_height +ON heartbeat_presence(window_id, disabled, best_tip_height); + +-- Combined index for disabled flag, window and global slot lookups +CREATE INDEX IF NOT EXISTS idx_heartbeat_presence_window_disabled_global_slot +ON heartbeat_presence(window_id, disabled, best_tip_global_slot); + +-- Index for heartbeat time queries +CREATE INDEX IF NOT EXISTS idx_heartbeat_presence_time +ON heartbeat_presence(heartbeat_time); + -- Index for submitter counts lookup CREATE INDEX IF NOT EXISTS idx_submitter_counts_last_seen ON submitter_counts(last_seen); diff --git a/tools/heartbeats-processor/src/local_db.rs b/tools/heartbeats-processor/src/local_db.rs index 6a9f62ffbd..f5603e5077 100644 --- a/tools/heartbeats-processor/src/local_db.rs +++ b/tools/heartbeats-processor/src/local_db.rs @@ -11,6 +11,7 @@ use crate::config::Config; use crate::remote_db::BlockInfo; use crate::remote_db::HeartbeatChunkState; use crate::time::*; +use mina_tree::proofs::verification::verify_block; #[derive(Debug)] pub struct HeartbeatPresence { @@ -241,13 +242,74 @@ async fn batch_insert_produced_blocks(pool: &SqlitePool, blocks: &[ProducedBlock Ok(()) } +/// Marks heartbeat presence entries as outdated (disabled) based on block height comparisons. +/// +/// This function performs the following steps: +/// 1. Finds the maximum block height for each window (considering only non-disabled entries). +/// 2. Identifies the previous window's maximum block height for each window. +/// 3. Marks a presence entry as disabled if its block height is less than: +/// - The maximum block height of the previous window minus a tolerance of $HEIGHT_TOLERANCE blocks (if it exists). +/// +/// This approach allows for a reasonable tolerance in synchronization: +/// - Entries matching or exceeding the previous window's max height - $HEIGHT_TOLERANCE are considered up-to-date. +/// - This allows for slight delays in block propagation between windows. +/// +/// Note: The first window in the sequence will not have any entries marked as disabled, +/// as there is no previous window to compare against. +/// +/// Returns the number of presence entries marked as disabled. +async fn mark_outdated_presence(pool: &SqlitePool) -> Result { + const HEIGHT_TOLERANCE: i64 = 5; + + let affected = sqlx::query!( + r#" + WITH MaxHeights AS ( + SELECT + window_id, + MAX(best_tip_height) as max_height + FROM heartbeat_presence + WHERE disabled = FALSE + GROUP BY window_id + ), + PrevMaxHeights AS ( + -- Get the max height from the immediate previous window + SELECT + tw.id as window_id, + prev.max_height as prev_max_height + FROM time_windows tw + LEFT JOIN time_windows prev_tw ON prev_tw.id = tw.id - 1 + LEFT JOIN MaxHeights prev ON prev.window_id = prev_tw.id + ) + UPDATE heartbeat_presence + SET disabled = TRUE + WHERE (window_id, best_tip_height) IN ( + SELECT + hp.window_id, + hp.best_tip_height + FROM heartbeat_presence hp + JOIN PrevMaxHeights pmh ON pmh.window_id = hp.window_id + WHERE hp.disabled = FALSE + AND pmh.prev_max_height IS NOT NULL -- Ensure there is a previous window + AND hp.best_tip_height < (pmh.prev_max_height - ?) + ) + "#, + HEIGHT_TOLERANCE + ) + .execute(pool) + .await?; + + Ok(affected.rows_affected() as usize) +} + pub async fn process_heartbeats( db: &FirestoreDb, pool: &SqlitePool, config: &Config, -) -> Result<()> { +) -> Result { let last_processed_time = get_last_processed_time(pool, Some(config)).await?; let now = Utc::now(); + // Don't fetch heartbeats beyond window range end + let end_time = config.window_range_end.min(now); let mut total_heartbeats = 0; let mut latest_time = last_processed_time; @@ -265,8 +327,13 @@ pub async fn process_heartbeats( last_timestamp: None, }; + // Its ok to call these functions multiple times because the result is cached + let verifier_index = snark::BlockVerifier::make(); + let verifier_srs = snark::get_srs(); + loop { - let heartbeats = crate::remote_db::fetch_heartbeat_chunk(db, &mut chunk_state, now).await?; + let heartbeats = + crate::remote_db::fetch_heartbeat_chunk(db, &mut chunk_state, end_time).await?; if heartbeats.is_empty() { break; } @@ -323,67 +390,92 @@ pub async fn process_heartbeats( processed_heartbeats.insert(idx); let best_tip = entry.best_tip_block(); + let public_key_id = *public_key_map.get(&entry.submitter).unwrap(); + let has_presence = + (entry.is_synced() || entry.is_catchup()) && best_tip.is_some(); + + // Record presence only if node is synced and has a best tip + if has_presence { + presence_batch.push(HeartbeatPresence { + window_id: window.id.unwrap(), + public_key_id, + best_tip: best_tip.unwrap(), // Cannot fail due to the above check + heartbeat_time: to_unix_timestamp(entry.create_time), + }); + presence_count += 1; + } else { + skipped_count += 1; + } + + // Process produced blocks regardless of sync status + match entry + .last_produced_block_info() + .map(|bi| (bi.clone(), bi.block_header_decoded())) + { + None => (), // No block to process + Some((block_info, Ok(block_header))) => { + let key = (public_key_id, block_info.hash.clone()); + + if let Some(first_seen) = seen_blocks.get(&key) { + blocks_duplicate += 1; + println!( + "Duplicate block detected: {} (height: {}, producer: {}, peer_id: {}) [first seen at {}, now at {}]", + key.1, + block_info.height, + entry.submitter, + entry.peer_id().unwrap_or_else(|| "unknown".to_string()), + first_seen, + entry.create_time + ); + continue; + } + + // Verify that the block slot matches the expected one for the current time + // Allow a difference of 1 in either direction + let expected_slot = global_slot_at_time(entry.create_time); + if (block_info.global_slot as i64 - expected_slot as i64).abs() > 1 { + println!( + "WARNING: Invalid block slot: {} (height: {}, producer: {}, expected slot: {}, actual slot: {})", + block_info.hash, block_info.height, entry.submitter, expected_slot, block_info.global_slot + ); + continue; + } - if entry.is_synced() && best_tip.is_some() { - if let Some(&public_key_id) = public_key_map.get(&entry.submitter) { - presence_batch.push(HeartbeatPresence { - window_id: window.id.unwrap(), - public_key_id, - best_tip: best_tip.unwrap(), // Cannot fail due to the above check - heartbeat_time: to_unix_timestamp(entry.create_time), - }); - presence_count += 1; - - // Add produced block if it exists - match entry.last_produced_block_decoded() { - Ok(Some(block)) => { - let block_data = entry.last_produced_block_raw().unwrap(); // Cannot fail, we have the block - let key = (public_key_id, block.hash().to_string()); - - if let Some(first_seen) = seen_blocks.get(&key) { - blocks_duplicate += 1; - println!( - "Duplicate block detected: {} (height: {}, producer: {}, peer_id: {}) [first seen at {}, now at {}]", - key.1, - block.height(), - entry.submitter, - entry.peer_id().unwrap_or_else(|| "unknown".to_string()), - first_seen, - entry.create_time - ); - continue; - } - - seen_blocks.insert(key.clone(), entry.create_time); - produced_blocks_batch.push(ProducedBlock { - window_id: window.id.unwrap(), - public_key_id, - block_hash: block.hash().to_string(), - block_height: block.height(), - block_global_slot: block.global_slot(), - block_data, - }); - } - Ok(None) => (), // No block to process - Err(e) => { - println!( - "WARNING: Failed to decode block from {}: {}", - entry.submitter, e - ) - } + // Verify block proof + if !verify_block(&block_header, &verifier_index, &verifier_srs) { + println!( + "WARNING: Invalid block proof: {} (height: {}, producer: {})", + block_info.hash, block_info.height, entry.submitter + ); + continue; + } + + if has_presence { + seen_blocks.insert(key.clone(), entry.create_time); + produced_blocks_batch.push(ProducedBlock { + window_id: window.id.unwrap(), + public_key_id, + block_hash: block_info.hash, + block_height: block_info.height, + block_global_slot: block_info.global_slot, + block_data: block_info.base64_encoded_header, + }); + } else { + println!( + "WARNING: Block produced by unsynced node: {} (height: {}, producer: {})", + block_info.hash, block_info.height, entry.submitter + ); + println!("Submitter: {:?}", entry.submitter); + println!("Sync status: {}", entry.sync_phase().unwrap_or_default()); + println!("Best tip: {:?}", entry.best_tip_block().map(|b| b.hash)); } } - } else { - if let Ok(Some(block)) = entry.last_produced_block_decoded() { + Some((_block_info, Err(e))) => { println!( - "Skipping unsynced block: {} (height: {}, producer: {}, peer_id: {})", - block.hash(), - block.height(), - entry.submitter, - entry.peer_id().unwrap_or_else(|| "unknown".to_string()) - ); + "WARNING: Failed to decode block from {}: {}", + entry.submitter, e + ) } - skipped_count += 1; } } } @@ -428,9 +520,18 @@ pub async fn process_heartbeats( if latest_time > last_processed_time { update_last_processed_time(pool, latest_time).await?; + + // Mark outdated presence entries as disabled + let disabled_count = mark_outdated_presence(pool).await?; + if disabled_count > 0 { + println!( + "Marked {} outdated presence entries as disabled", + disabled_count + ); + } } - Ok(()) + Ok(total_heartbeats) } pub async fn create_tables_from_file(pool: &SqlitePool) -> Result<()> { @@ -483,27 +584,80 @@ pub async fn toggle_windows( Ok(()) } -// TODO: multiple blocks for the same slot should be counted as one // TODO: take into account the validated flag to count blocks -pub async fn update_scores(pool: &SqlitePool) -> Result<()> { +pub async fn update_scores(pool: &SqlitePool, config: &Config) -> Result<()> { + let window_start = to_unix_timestamp(config.window_range_start); + let current_time = chrono::Utc::now().timestamp(); + sqlx::query!( r#" - INSERT INTO submitter_scores (public_key_id, score, blocks_produced) + WITH ValidWindows AS ( + SELECT id, start_time, end_time + FROM time_windows + WHERE disabled = FALSE + AND end_time <= ?2 + AND start_time >= ?1 + ), + BlockCounts AS ( + -- Count one block per global slot per producer + SELECT + public_key_id, + COUNT(DISTINCT block_global_slot) as blocks + FROM ( + -- Deduplicate blocks per global slot + SELECT + pb.public_key_id, + pb.block_global_slot + FROM produced_blocks pb + JOIN ValidWindows vw ON vw.id = pb.window_id + -- TODO: enable once block proof validation has been implemented + -- WHERE pb.validated = TRUE + GROUP BY pb.public_key_id, pb.block_global_slot + ) unique_blocks + GROUP BY public_key_id + ), + HeartbeatCounts AS ( + -- Count heartbeats only within valid windows and not disabled + SELECT + hp.public_key_id, + COUNT(DISTINCT hp.window_id) as heartbeats + FROM heartbeat_presence hp + JOIN ValidWindows vw ON vw.id = hp.window_id + WHERE hp.disabled = FALSE + GROUP BY hp.public_key_id + ), + LastHeartbeats AS ( + -- Get last heartbeat time across all windows, including disabled entries + SELECT + public_key_id, + MAX(heartbeat_time) as last_heartbeat + FROM heartbeat_presence + GROUP BY public_key_id + ) + INSERT INTO submitter_scores ( + public_key_id, + score, + blocks_produced, + last_heartbeat + ) SELECT pk.id, - COUNT(DISTINCT hp.window_id) as score, - COUNT(DISTINCT pb.id) as blocks_produced + COALESCE(hc.heartbeats, 0) as score, + COALESCE(bc.blocks, 0) as blocks_produced, + COALESCE(lh.last_heartbeat, 0) as last_heartbeat FROM public_keys pk - LEFT JOIN heartbeat_presence hp ON pk.id = hp.public_key_id - LEFT JOIN time_windows tw ON hp.window_id = tw.id - LEFT JOIN produced_blocks pb ON pk.id = pb.public_key_id - WHERE tw.disabled = FALSE - GROUP BY pk.id + LEFT JOIN HeartbeatCounts hc ON hc.public_key_id = pk.id + LEFT JOIN BlockCounts bc ON bc.public_key_id = pk.id + LEFT JOIN LastHeartbeats lh ON lh.public_key_id = pk.id + WHERE hc.heartbeats > 0 OR bc.blocks > 0 ON CONFLICT(public_key_id) DO UPDATE SET score = excluded.score, blocks_produced = excluded.blocks_produced, + last_heartbeat = excluded.last_heartbeat, last_updated = strftime('%s', 'now') - "# + "#, + window_start, + current_time ) .execute(pool) .await?; @@ -537,9 +691,9 @@ pub async fn get_max_scores(pool: &SqlitePool) -> Result { Ok(MaxScores { total, current }) } -pub async fn view_scores(pool: &SqlitePool) -> Result<()> { +pub async fn view_scores(pool: &SqlitePool, config: &Config) -> Result<()> { // Make sure scores are up to date - update_scores(pool).await?; + update_scores(pool, config).await?; let scores = sqlx::query!( r#" @@ -547,7 +701,8 @@ pub async fn view_scores(pool: &SqlitePool) -> Result<()> { pk.public_key, ss.score, ss.blocks_produced, - datetime(ss.last_updated, 'unixepoch') as last_updated + datetime(ss.last_updated, 'unixepoch') as last_updated, + datetime(ss.last_heartbeat, 'unixepoch') as last_heartbeat FROM submitter_scores ss JOIN public_keys pk ON pk.id = ss.public_key_id ORDER BY ss.score DESC, ss.blocks_produced DESC @@ -558,22 +713,31 @@ pub async fn view_scores(pool: &SqlitePool) -> Result<()> { let max_scores = get_max_scores(pool).await?; + println!("\nSubmitter Scores Summary:"); + println!("Current maximum score possible: {}", max_scores.current); + println!("Total maximum score possible: {}", max_scores.total); println!("\nSubmitter Scores:"); - println!("----------------------------------------"); + println!("--------------------------------------------------------"); println!( - "Public Key | Score | Blocks | Current Max | Total Max | Last Updated" + "Public Key | Score | Score % | Blocks | Last Updated | Last Heartbeat" ); - println!("----------------------------------------"); + println!("--------------------------------------------------------"); for row in scores { + let percentage = if max_scores.current > 0 { + (row.score as f64 / max_scores.current as f64) * 100.0 + } else { + 0.0 + }; + println!( - "{:<40} | {:>5} | {:>6} | {:>11} | {:>9} | {}", + "{:<40} | {:>5} | {:>6.2}% | {:>6} | {} | {}", row.public_key, row.score, + percentage, row.blocks_produced, - max_scores.current, - max_scores.total, - row.last_updated.unwrap_or_default() + row.last_updated.unwrap_or_default(), + row.last_heartbeat.unwrap_or_default() ); } @@ -711,3 +875,17 @@ pub async fn mark_disabled_windows(pool: &SqlitePool, config: &Config) -> Result } Ok(()) } + +fn global_slot_at_time(time: DateTime) -> u32 { + use chrono::FixedOffset; + let slot_duration = 180_000; + let genesis_state_timestamp = + DateTime::::parse_from_rfc3339("2024-04-09T21:00:00Z") + .unwrap() + .to_utc(); + let slot_start_ms = genesis_state_timestamp.timestamp_millis() as u64; + let time_ms = time.timestamp_millis() as u64; + + let slot_diff = (time_ms - slot_start_ms) / slot_duration; + slot_diff as u32 +} diff --git a/tools/heartbeats-processor/src/main.rs b/tools/heartbeats-processor/src/main.rs index 80e1c4ae7d..c52f6dc039 100644 --- a/tools/heartbeats-processor/src/main.rs +++ b/tools/heartbeats-processor/src/main.rs @@ -61,9 +61,13 @@ enum Commands { }, } -async fn post_scores_to_firestore(pool: &SqlitePool, db: &FirestoreDb) -> Result<()> { +async fn post_scores_to_firestore( + pool: &SqlitePool, + db: &FirestoreDb, + config: &Config, +) -> Result<()> { // Make sure scores are up to date - local_db::update_scores(pool).await?; + local_db::update_scores(pool, config).await?; let scores = sqlx::query!( r#" @@ -71,7 +75,8 @@ async fn post_scores_to_firestore(pool: &SqlitePool, db: &FirestoreDb) -> Result pk.public_key, ss.score, ss.blocks_produced, - ss.last_updated + ss.last_updated, + ss.last_heartbeat FROM submitter_scores ss JOIN public_keys pk ON pk.id = ss.public_key_id ORDER BY ss.score DESC @@ -87,6 +92,7 @@ async fn post_scores_to_firestore(pool: &SqlitePool, db: &FirestoreDb) -> Result score: row.score, blocks_produced: row.blocks_produced, last_updated: row.last_updated, + last_heartbeat: row.last_heartbeat, }) .collect(); @@ -106,10 +112,12 @@ async fn run_process_loop( loop { println!("Processing heartbeats..."); - local_db::process_heartbeats(db, pool, config).await?; + let count = local_db::process_heartbeats(db, pool, config).await?; - println!("Posting scores..."); - post_scores_to_firestore(pool, db).await?; + if count > 0 { + println!("Posting scores..."); + post_scores_to_firestore(pool, db, config).await?; + } println!("Sleeping for {} seconds...", interval_seconds); tokio::time::sleep(interval).await; @@ -149,12 +157,12 @@ async fn main() -> Result<()> { local_db::toggle_windows(&pool, start, end, disabled).await?; } Commands::ViewScores => { - local_db::view_scores(&pool).await?; + local_db::view_scores(&pool, &config).await?; } Commands::PostScores => { println!("Initializing firestore connection..."); let db = remote_db::get_db(&config).await?; - post_scores_to_firestore(&pool, &db).await?; + post_scores_to_firestore(&pool, &db, &config).await?; } Commands::SetLastProcessed { time } => { local_db::set_last_processed_time(&pool, &time).await?; diff --git a/tools/heartbeats-processor/src/remote_db.rs b/tools/heartbeats-processor/src/remote_db.rs index bec2f5b47c..b994ddcea1 100644 --- a/tools/heartbeats-processor/src/remote_db.rs +++ b/tools/heartbeats-processor/src/remote_db.rs @@ -1,11 +1,8 @@ -use std::sync::Arc; - use anyhow::Result; use base64::{engine::general_purpose, Engine as _}; use chrono::{DateTime, Duration, Utc}; use firestore::*; use mina_p2p_messages::v2; -use openmina_core::block::{ArcBlockWithHash, BlockWithHash}; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -32,6 +29,14 @@ pub struct HeartbeatEntry { pub decoded_payload: Option, } +#[derive(Clone, Debug, Deserialize)] +pub struct ProducedBlockInfo { + pub height: u32, + pub global_slot: u32, + pub hash: String, + pub base64_encoded_header: String, +} + #[derive(Debug)] pub struct BlockInfo { pub hash: String, @@ -39,19 +44,19 @@ pub struct BlockInfo { pub global_slot: u64, } -fn base64_decode_block( - encoded: &str, -) -> Result { - use base64::{engine::general_purpose::URL_SAFE, Engine as _}; - use mina_p2p_messages::binprot::BinProtRead; +impl ProducedBlockInfo { + pub fn block_header_decoded(&self) -> Result { + use base64::{engine::general_purpose::URL_SAFE, Engine as _}; + use mina_p2p_messages::binprot::BinProtRead; - let decoded = URL_SAFE - .decode(encoded) - .map_err(|_| "Could not decode base64".to_string())?; - let block = v2::MinaBlockBlockStableV2::binprot_read(&mut &decoded[..]) - .map_err(|e| format!("Could not decode block: {:?}", e))?; + let decoded = URL_SAFE + .decode(&self.base64_encoded_header) + .map_err(|_| "Could not decode base64".to_string())?; + let block_header = v2::MinaBlockHeaderStableV2::binprot_read(&mut &decoded[..]) + .map_err(|e| format!("Could not decode block header: {:?}", e))?; - Ok(block) + Ok(block_header) + } } impl HeartbeatEntry { @@ -70,22 +75,19 @@ impl HeartbeatEntry { .map(|s| s.to_string()) } - pub fn last_produced_block_raw(&self) -> Option { - self.decoded_payload + pub fn last_produced_block_info(&self) -> Option { + let result = self + .decoded_payload .as_ref() - .and_then(|status| status.get("last_produced_block")) - .and_then(|block| block.as_str()) - .map(|s| s.to_string()) - } + .and_then(|status| status.get("last_produced_block_info")) + .filter(|v| !v.is_null()) + .map(|block_info| serde_json::from_value(block_info.clone()))?; - pub fn last_produced_block_decoded(&self) -> Result, String> { - match self.last_produced_block_raw() { - None => Ok(None), - Some(encoded) => { - let block = base64_decode_block(&encoded)?; - let block = BlockWithHash::try_new(Arc::new(block)) - .map_err(|e| format!("Invalid block: {}", e))?; - Ok(Some(block)) + match result { + Ok(info) => Some(info), + Err(e) => { + eprintln!("Invalid block header: {:?}", e); + None } } } @@ -111,6 +113,7 @@ impl HeartbeatEntry { }) } + #[allow(dead_code)] pub fn sync_status(&self) -> Option { self.transition_frontier() .and_then(|tf| tf.get("sync")) @@ -118,12 +121,26 @@ impl HeartbeatEntry { .map(|status| status.as_str().unwrap().to_string()) } + pub fn sync_phase(&self) -> Option { + self.transition_frontier() + .and_then(|tf| tf.get("sync")) + .and_then(|sync| sync.get("phase")) + .map(|phase| phase.as_str().unwrap().to_string()) + } + pub fn is_synced(&self) -> bool { - self.sync_status() + self.sync_phase() .as_ref() .map(|status| status == "Synced") .unwrap_or(false) } + + pub fn is_catchup(&self) -> bool { + self.sync_phase() + .as_ref() + .map(|status| status == "Catchup") + .unwrap_or(false) + } } #[derive(Debug, Serialize, Deserialize)] @@ -135,6 +152,8 @@ pub struct ScoreDocument { pub blocks_produced: i64, #[serde(rename = "lastUpdated")] pub last_updated: i64, + #[serde(rename = "lastHeartbeat")] + pub last_heartbeat: i64, } pub async fn get_db(config: &Config) -> Result { @@ -169,10 +188,14 @@ pub async fn fetch_heartbeat_chunk( let chunk_end = (state.chunk_start + chunk_duration).min(end_time); if state.chunk_start >= end_time { + println!("Reached end of testing window: {}", end_time); return Ok(Vec::new()); } - println!("Fetching heartbeat chunk... {}", state.chunk_start); + println!( + "Fetching heartbeat chunk... {} to {}", + state.chunk_start, chunk_end + ); let query = db .fluent() @@ -195,7 +218,10 @@ pub async fn fetch_heartbeat_chunk( q.for_all(conditions) }) - .order_by([("createTime", FirestoreQueryDirection::Ascending)]) + .order_by([ + ("createTime", FirestoreQueryDirection::Ascending), + ("__name__", FirestoreQueryDirection::Ascending), + ]) .limit(FIRESTORE_BATCH_SIZE); let mut batch: Vec = query.obj().query().await?; diff --git a/tools/ledger-tool/Cargo.toml b/tools/ledger-tool/Cargo.toml index 9f913efc62..0fde3fb55b 100644 --- a/tools/ledger-tool/Cargo.toml +++ b/tools/ledger-tool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ledger-tool" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] diff --git a/tools/salsa-simple/Cargo.toml b/tools/salsa-simple/Cargo.toml index 85a42065c2..fb85420026 100644 --- a/tools/salsa-simple/Cargo.toml +++ b/tools/salsa-simple/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "salsa-simple" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dev-dependencies] diff --git a/tools/transport/Cargo.toml b/tools/transport/Cargo.toml index 77c6bfae90..507c71e7da 100644 --- a/tools/transport/Cargo.toml +++ b/tools/transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mina-transport" -version = "0.14.0" +version = "0.16.0" edition = "2021" [dependencies] diff --git a/tools/webrtc-sniffer/Cargo.toml b/tools/webrtc-sniffer/Cargo.toml new file mode 100644 index 0000000000..c8db3bb70a --- /dev/null +++ b/tools/webrtc-sniffer/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "webrtc-sniffer" +version = "0.16.0" +edition = "2021" + +[dependencies] +clap = { version = "4.5", features = ["derive", "env"] } +env_logger = { version = "0.11.6" } +log = { version = "0.4.25" } +pcap = { version = "2.2" } +ctrlc = { version = "3.4" } +sudo = { version = "0.6.0" } +hex = { version = "0.4.3" } +etherparse = { version = "0.17.0" } +thiserror = { version = "2.0" } +nom = { version = "8.0.0" } + +sha2 = { version = "0.10.8" } +rand = { version = "0.8" } +hkdf = { version = "0.12" } +hmac = { version = "0.12" } +p256 = { version = "0.13", features = ["default", "ecdh", "ecdsa"] } +p384 = { version = "0.13" } +aes = { version = "0.8" } +cbc = { version = "0.1", features = ["block-padding", "alloc"] } +aes-gcm = { version = "0.10" } diff --git a/tools/webrtc-sniffer/src/bin/sniffer.rs b/tools/webrtc-sniffer/src/bin/sniffer.rs new file mode 100644 index 0000000000..86887c6f11 --- /dev/null +++ b/tools/webrtc-sniffer/src/bin/sniffer.rs @@ -0,0 +1,109 @@ +use std::path::PathBuf; + +use clap::Parser; +use pcap::{Capture, ConnectionStatus, Device, IfFlags}; + +// cargo run --release --bin sniffer -- --interface auto --path target/test.pcap + +#[derive(Parser)] +struct Cli { + #[arg( + long, + help = "name of the interface, use `auto` to determine automatically" + )] + interface: Option, + + #[arg( + long, + help = "if `interface` is set, the packets will be written to the `pcap` file, \ + otherwise the file will be a source of packets" + )] + path: PathBuf, + + #[arg(long, help = "bpf filter, example: \"udp and not port 443\"")] + filter: Option, + + /// rng seed + #[arg(long, short)] + rng_seed: String, +} + +fn init_logger_std() -> Box { + use env_logger::{Builder, Env}; + + let env = Env::new().filter_or("RUST_LOG", "debug"); + let logger = Builder::default().parse_env(env).build(); + Box::new(logger) as Box +} + +fn main() { + log::set_boxed_logger(init_logger_std()).unwrap_or_default(); + log::set_max_level(log::LevelFilter::max()); + + let Cli { + interface, + path, + filter, + rng_seed, + } = Cli::parse(); + + let rng_seed = <[u8; 32]>::try_from(hex::decode(rng_seed).unwrap().as_slice()).unwrap(); + + if let Some(name) = interface { + sudo::escalate_if_needed().unwrap(); + + log::info!("try to choose device"); + let mut selected = None; + match Device::list() { + Ok(list) => { + for device in list { + if name != "auto" { + if device.name.eq(&name) { + selected = Some(device); + break; + } + } else { + log::debug!("candidate: {device:?}"); + if !device.addresses.is_empty() + && device.flags.contains(IfFlags::UP | IfFlags::RUNNING) + && matches!(device.flags.connection_status, ConnectionStatus::Connected) + { + selected = Some(device); + } + } + } + } + Err(err) => log::error!("{err}"), + } + + if let Some(device) = selected { + log::info!("will use: {device:?}"); + let res = Ok(()).and_then(|()| { + let mut capture = Capture::from_device(device)?.immediate_mode(true).open()?; + capture + .filter(&filter.unwrap_or_default(), true) + .expect("Failed to apply filter"); + let savefile = capture.savefile(&path)?; + + webrtc_sniffer::run(capture, Some(savefile), rng_seed) + }); + if let Err(err) = res { + log::error!("{err}"); + } + } else { + log::error!("cannot find a device: {name}"); + } + } else { + log::info!("use file"); + let res = Ok(()).and_then(|()| { + let mut capture = Capture::from_file(&path)?; + capture + .filter(&filter.unwrap_or_default(), true) + .expect("Failed to apply filter"); + webrtc_sniffer::run(capture, None, rng_seed) + }); + if let Err(err) = res { + log::error!("{err}"); + } + } +} diff --git a/tools/webrtc-sniffer/src/dtls/handshake.rs b/tools/webrtc-sniffer/src/dtls/handshake.rs new file mode 100644 index 0000000000..13e5226bd8 --- /dev/null +++ b/tools/webrtc-sniffer/src/dtls/handshake.rs @@ -0,0 +1,346 @@ +use std::fmt; + +use nom::{ + bytes::complete::take, + combinator::map, + error::{Error, ErrorKind}, + multi::many0, + number::complete::{be_u16, be_u24, be_u8}, + Err, IResult, Parser, +}; + +pub struct HandshakeMessage { + pub length: u32, + pub message_seq: u16, + pub fragment_offset: u32, + pub fragment_length: u32, + pub inner: HandshakeInner, +} + +pub enum HandshakeInner { + ClientHello(ClientHello), + ServerHello(ServerHello), + HelloVerifyRequest(HelloVerifyRequest), + Certificates(Certificates), + ServerKeyExchange(ServerKeyExchange), + CertificateRequest(u8), + ServerHelloDone, + CertificateVerify(u8), + ClientKeyExchange(ClientKeyExchange), + Finished, +} + +impl fmt::Display for HandshakeInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::ClientHello(msg) => write!(f, "ClientHello({msg})"), + Self::ServerHello(msg) => write!(f, "ServerHello({msg})"), + Self::HelloVerifyRequest(msg) => write!(f, "HelloVerifyRequest({msg})"), + Self::Certificates(msg) => write!(f, "Certificates({msg})"), + Self::ServerKeyExchange(msg) => write!(f, "ServerKeyExchange({msg})"), + Self::CertificateRequest(msg) => write!(f, "CertificateRequest({msg})"), + Self::ServerHelloDone => write!(f, "ServerHelloDone"), + Self::CertificateVerify(msg) => write!(f, "CertificateVerify({msg})"), + Self::ClientKeyExchange(msg) => write!(f, "ClientKeyExchange({msg})"), + Self::Finished => write!(f, "Finished"), + } + } +} + +impl HandshakeMessage { + pub fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, discriminant) = be_u8(input)?; + let (input, length) = be_u24(input)?; + let (input, message_seq) = be_u16(input)?; + let (input, fragment_offset) = be_u24(input)?; + let (input, fragment_length) = be_u24(input)?; + let (input, inner) = match discriminant { + 1 => map(ClientHello::parse, HandshakeInner::ClientHello).parse(input), + 2 => map(ServerHello::parse, HandshakeInner::ServerHello).parse(input), + 3 => map( + HelloVerifyRequest::parse, + HandshakeInner::HelloVerifyRequest, + ) + .parse(input), + 11 => map(Certificates::parse, HandshakeInner::Certificates).parse(input), + 12 => map(ServerKeyExchange::parse, HandshakeInner::ServerKeyExchange).parse(input), + 13 => Ok((input, HandshakeInner::CertificateRequest(0))), + 14 => Ok((input, HandshakeInner::ServerHelloDone)), + 15 => Ok((input, HandshakeInner::CertificateVerify(0))), + 16 => map(ClientKeyExchange::parse, HandshakeInner::ClientKeyExchange).parse(input), + 20 => Ok((input, HandshakeInner::Finished)), + _ => Err(Err::Error(Error::new(input, ErrorKind::Alt))), + }?; + Ok(( + input, + HandshakeMessage { + length, + message_seq, + fragment_offset, + fragment_length, + inner, + }, + )) + } +} + +pub struct ClientHello { + pub random: [u8; 32], + pub session_id: Vec, + pub cookie: Vec, + pub cipher_suites: Vec, + pub compression_methods: Vec, + pub extensions: Vec, +} + +impl fmt::Display for ClientHello { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "random={}, session_id=\"{}\", cookie=\"{}\", cipher_suites={:?}, compression_methods={:?}", + hex::encode(self.random), + hex::encode(&self.session_id), + hex::encode(&self.cookie), + self.cipher_suites, + self.compression_methods, + ) + } +} + +impl ClientHello { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, legacy_record_version) = be_u16(input)?; + if legacy_record_version != 0xFEFD { + return Err(Err::Error(Error::new(input, ErrorKind::Alt))); + } + let (input, random) = take(32usize)(input)?; + let random = <[u8; 32]>::try_from(random).expect("cannot fail"); + let (input, l) = be_u8(input)?; + let (input, bytes) = take(l as usize)(input)?; + let session_id = bytes.to_vec(); + let (input, l) = be_u8(input)?; + let (input, bytes) = take(l as usize)(input)?; + let cookie = bytes.to_vec(); + let (input, l) = be_u16(input)?; + let (input, bytes) = take(l as usize)(input)?; + let (_, cipher_suites) = many0(be_u16).parse(bytes)?; + let (input, compression_methods_len) = be_u8(input)?; + let (input, bytes) = take(compression_methods_len as usize)(input)?; + let compression_methods = bytes.to_vec(); + let (input, l) = be_u16(input)?; + let (input, bytes) = take(l as usize)(input)?; + let (_, extensions) = many0(Extension::parse).parse(bytes)?; + + Ok(( + input, + ClientHello { + random, + session_id, + cookie, + cipher_suites, + compression_methods, + extensions, + }, + )) + } +} + +pub struct ServerHello { + pub random: [u8; 32], + pub session_id: Vec, + pub cipher_suite: u16, + pub compression_method: u8, + pub extensions: Vec, +} + +impl fmt::Display for ServerHello { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "random={}, session_id=\"{}\", cipher_suite={}, compression_method={}", + hex::encode(self.random), + hex::encode(&self.session_id), + self.cipher_suite, + self.compression_method, + ) + } +} + +impl ServerHello { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, legacy_record_version) = be_u16(input)?; + if legacy_record_version != 0xFEFD { + return Err(Err::Error(Error::new(input, ErrorKind::Alt))); + } + let (input, random) = take(32usize)(input)?; + let random = <[u8; 32]>::try_from(random).expect("cannot fail"); + let (input, l) = be_u8(input)?; + let (input, bytes) = take(l as usize)(input)?; + let session_id = bytes.to_vec(); + let (input, cipher_suite) = be_u16(input)?; + let (input, compression_method) = be_u8(input)?; + let (input, l) = be_u16(input)?; + let (input, bytes) = take(l as usize)(input)?; + let (_, extensions) = many0(Extension::parse).parse(bytes)?; + + Ok(( + input, + ServerHello { + random, + session_id, + cipher_suite, + compression_method, + extensions, + }, + )) + } +} + +pub struct HelloVerifyRequest { + pub cookie: Vec, +} + +impl fmt::Display for HelloVerifyRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "cookie={}", hex::encode(&self.cookie),) + } +} + +impl HelloVerifyRequest { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, legacy_record_version) = be_u16(input)?; + if legacy_record_version != 0xFEFD { + return Err(Err::Error(Error::new(input, ErrorKind::Alt))); + } + + let (input, l) = be_u8(input)?; + let (input, bytes) = take(l as usize)(input)?; + let cookie = bytes.to_vec(); + + Ok((input, HelloVerifyRequest { cookie })) + } +} + +pub struct Extension {} + +impl Extension { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let _ = be_u8(input)?; + Ok((&[], Extension {})) + } +} + +pub struct Certificates(pub Vec); + +impl fmt::Display for Certificates { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for certificate in &self.0 { + write!(f, "certificate({certificate})")?; + } + Ok(()) + } +} + +impl Certificates { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, length) = be_u24(input)?; + let (input, bytes) = take(length as usize)(input)?; + let (_, certificates) = many0(Certificate::parse).parse(bytes)?; + Ok((input, Certificates(certificates))) + } +} + +pub struct Certificate { + pub data: Vec, +} + +impl fmt::Display for Certificate { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", hex::encode(&self.data)) + } +} + +impl Certificate { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, length) = be_u24(input)?; + let (input, bytes) = take(length as usize)(input)?; + let data = bytes.to_vec(); + + Ok((input, Certificate { data })) + } +} + +pub struct ServerKeyExchange { + // pub curve_type: u8, + pub curve_name: u16, + pub public_key: Vec, + pub signature_hash_algorithm: u8, + pub signature_algorithm: u8, + pub signature: Vec, +} + +impl fmt::Display for ServerKeyExchange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "curve_name={}, pk={}, sig_alg=({},{}), sig={}", + self.curve_name, + hex::encode(&self.public_key), + self.signature_hash_algorithm, + self.signature_algorithm, + hex::encode(&self.signature) + ) + } +} + +impl ServerKeyExchange { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, curve_type) = be_u8(input)?; + if curve_type != 3 { + return Err(Err::Failure(Error::new(input, ErrorKind::Alt))); + } + let (input, curve_name) = be_u16(input)?; + + let (input, l) = be_u8(input)?; + let (input, bytes) = take(l as usize)(input)?; + let public_key = bytes.to_vec(); + + let (input, signature_hash_algorithm) = be_u8(input)?; + let (input, signature_algorithm) = be_u8(input)?; + + let (input, l) = be_u16(input)?; + let (input, bytes) = take(l as usize)(input)?; + let signature = bytes.to_vec(); + + Ok(( + input, + ServerKeyExchange { + curve_name, + public_key, + signature_hash_algorithm, + signature_algorithm, + signature, + }, + )) + } +} + +pub struct ClientKeyExchange { + pub public_key: Vec, +} + +impl fmt::Display for ClientKeyExchange { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "pk={}", hex::encode(&self.public_key),) + } +} + +impl ClientKeyExchange { + fn parse(input: &[u8]) -> IResult<&[u8], Self> { + let (input, l) = be_u8(input)?; + let (input, bytes) = take(l as usize)(input)?; + let public_key = bytes.to_vec(); + + Ok((input, ClientKeyExchange { public_key })) + } +} diff --git a/tools/webrtc-sniffer/src/dtls/header.rs b/tools/webrtc-sniffer/src/dtls/header.rs new file mode 100644 index 0000000000..0007040955 --- /dev/null +++ b/tools/webrtc-sniffer/src/dtls/header.rs @@ -0,0 +1,110 @@ +use std::fmt; + +use nom::{ + bytes::complete::take, + error::{Error, ErrorKind}, + number::complete::{be_u16, be_u64, be_u8}, + Err, IResult, +}; + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ContentType { + ChangeCipherSpec = 20, + Alert = 21, + Handshake = 22, + ApplicationData = 23, +} + +impl ContentType { + pub fn from_u8(value: u8) -> Option { + match value { + 20 => Some(ContentType::ChangeCipherSpec), + 21 => Some(ContentType::Alert), + 22 => Some(ContentType::Handshake), + 23 => Some(ContentType::ApplicationData), + _ => None, + } + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct Chunk<'a> { + pub ty: ContentType, + pub epoch: u16, + pub sequence_number: u64, + pub length: u16, + pub body: &'a [u8], +} + +impl fmt::Display for Chunk<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let Chunk { + ty, + epoch, + sequence_number: seq, + length, + .. + } = self; + write!( + f, + "{ty:?}, epoch={epoch}, seq={seq:012x}, len={length}, data={}", + hex::encode(self.body) + ) + } +} + +impl<'a> Chunk<'a> { + pub fn parse(input: &'a [u8]) -> IResult<&'a [u8], Self> { + let (input, ty_byte) = be_u8(input)?; + let ty = ContentType::from_u8(ty_byte) + .ok_or_else(|| Err::Error(Error::new(input, ErrorKind::Alt)))?; + + let (input, legacy_record_version) = be_u16(input)?; + if legacy_record_version != 0xFEFD { + return Err(Err::Error(Error::new(input, ErrorKind::Alt))); + } + + let (input, t) = be_u64(input)?; + let epoch = (t >> 48) as u16; + let sequence_number = t & ((1 << 48) - 1); + let (input, length) = be_u16(input)?; + let (input, body) = take(length as usize)(input)?; + + let header = Chunk { + ty, + epoch, + sequence_number, + length, + body, + }; + + Ok((input, header)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_header() { + let bytes = &[ + 22, // ContentType::Handshake + 0xFE, 0xFD, // legacy_record_version (0xFEFD for DTLS 1.0) + 0x00, 0x01, // epoch + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // sequence_number + 0x00, 0x03, // length + 0x00, 0x00, 0x00, + ]; + + let result = Chunk::parse(bytes); + let (_, chunk) = result.unwrap(); + + assert_eq!(chunk.ty, ContentType::Handshake); + assert_eq!(chunk.epoch, 1); + assert_eq!(chunk.sequence_number, 1); + assert_eq!(chunk.length, 3); + assert_eq!(chunk.body, [0; 3]); + } +} diff --git a/tools/webrtc-sniffer/src/dtls/mod.rs b/tools/webrtc-sniffer/src/dtls/mod.rs new file mode 100644 index 0000000000..ae18d3dea7 --- /dev/null +++ b/tools/webrtc-sniffer/src/dtls/mod.rs @@ -0,0 +1,6 @@ +mod header; + +mod handshake; + +mod state; +pub use self::state::State; diff --git a/tools/webrtc-sniffer/src/dtls/state.rs b/tools/webrtc-sniffer/src/dtls/state.rs new file mode 100644 index 0000000000..1d0cd66cdd --- /dev/null +++ b/tools/webrtc-sniffer/src/dtls/state.rs @@ -0,0 +1,190 @@ +use std::borrow::Cow; + +use nom::{multi::many1, IResult, Parser}; +use rand::{rngs::StdRng, SeedableRng}; + +use crate::dtls::handshake::{HandshakeInner, ServerHello}; + +use super::{ + handshake::{Extension, HandshakeMessage}, + header::{Chunk, ContentType}, +}; + +pub struct State { + rng_seed: [u8; 32], + inner: Option, +} + +enum Inner { + Initial, + ClientHello { + client_random: [u8; 32], + }, + BothHello(HelloMsgs), + ServerKey { + hello: HelloMsgs, + // https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8 + curve_name: u16, + server_pk: Vec, + }, + BothKey { + hello: HelloMsgs, + keys: BothKey, + }, +} + +#[allow(dead_code)] +struct HelloMsgs { + client_random: [u8; 32], + server_random: [u8; 32], + session_id: Vec, + cipher_suite: u16, + extensions: Vec, +} + +#[allow(dead_code)] +struct BothKey { + curve_name: u16, + server_pk: Vec, + client_pk: Vec, +} + +impl State { + pub fn new(rng_seed: [u8; 32]) -> Self { + State { + rng_seed, + inner: Some(Inner::Initial), + } + } + + pub fn handle<'d>(&mut self, data: &'d [u8], _incoming: bool) -> IResult<&'d [u8], ()> { + let (data, chunks) = many1(Chunk::parse).parse(data)?; + for chunk in chunks { + log::info!("{chunk}"); + #[allow(clippy::single_match)] + match chunk.ty { + ContentType::Handshake => self.handle_handshake(chunk.body), + _ => {} + } + } + + Ok((data, ())) + } + + fn handle_handshake(&mut self, msg_bytes: &[u8]) { + let Some(state) = self.inner.take() else { + log::warn!("ignore datagram, invalid state"); + return; + }; + + let mut msg_bytes = Cow::Borrowed(msg_bytes); + if let Inner::BothKey { hello, keys } = &state { + let bytes = msg_bytes.to_mut(); + // decrypt + let _ = (hello, keys, bytes); + } + let msg = match HandshakeMessage::parse(&msg_bytes) { + Ok((_, msg)) => msg, + Err(err) => { + log::error!("{err}"); + return; + } + }; + + let HandshakeMessage { + length, + message_seq, + fragment_offset, + fragment_length, + inner: msg, + } = msg; + let _ = message_seq; + log::info!("HANDSHAKE: {msg}"); + + if fragment_offset != 0 || length != fragment_length { + log::error!("collecting fragments is not implemented"); + self.inner = None; + return; + } + + let state = match (state, msg) { + (Inner::Initial, HandshakeInner::ClientHello(msg)) => { + let client_random = msg.random; + if msg.cookie.is_empty() { + self.inner = Some(Inner::Initial); + return; + } + + use sha2::{ + digest::{FixedOutput, Update}, + Sha256, + }; + let seed = Sha256::default() + .chain(self.rng_seed) + .chain(&msg.cookie) + .finalize_fixed() + .into(); + dbg!(format!("{seed:x?}")); + let _rng = StdRng::from_seed(seed); + + let _ = ( + msg.session_id, + msg.cookie, + msg.cipher_suites, + msg.compression_methods, + msg.extensions, + ); + Inner::ClientHello { client_random } + } + (Inner::ClientHello { client_random }, HandshakeInner::ServerHello(msg)) => { + let ServerHello { + random, + session_id, + cipher_suite, + compression_method, + extensions, + } = msg; + if compression_method != 0 { + log::error!("compression method {compression_method} is not implemented"); + return; + } + Inner::BothHello(HelloMsgs { + client_random, + server_random: random, + session_id, + cipher_suite, + extensions, + }) + } + (Inner::BothHello(hello), HandshakeInner::ServerKeyExchange(msg)) => { + // check signature + let _ = msg.signature; + Inner::ServerKey { + hello, + curve_name: msg.curve_name, + server_pk: msg.public_key, + } + } + ( + Inner::ServerKey { + hello, + curve_name, + server_pk, + }, + HandshakeInner::ClientKeyExchange(msg), + ) => { + let keys = BothKey { + curve_name, + server_pk, + client_pk: msg.public_key, + }; + Inner::BothKey { hello, keys } + } + (state, _) => { + log::warn!("ignore handshake msg"); + state + } + }; + self.inner = Some(state); + } +} diff --git a/tools/webrtc-sniffer/src/lib.rs b/tools/webrtc-sniffer/src/lib.rs new file mode 100644 index 0000000000..53fa277742 --- /dev/null +++ b/tools/webrtc-sniffer/src/lib.rs @@ -0,0 +1,78 @@ +mod net; + +mod dtls; + +use std::{borrow::Cow, collections::BTreeMap, fmt, net::SocketAddr}; + +use pcap::{Activated, Capture, Savefile}; + +type State = dtls::State; + +#[derive(Clone, Copy)] +pub struct MsgHeader { + src: SocketAddr, + dst: SocketAddr, + len: u16, +} + +impl fmt::Display for MsgHeader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let MsgHeader { src, dst, len } = self; + write!(f, "{src} -> {dst} {len}") + } +} + +pub fn run( + capture: Capture, + file: Option, + rng_seed: [u8; 32], +) -> Result<(), net::DissectError> { + let mut connections = BTreeMap::<(SocketAddr, SocketAddr), State>::new(); + + let mut buffer = None::>; + for item in net::UdpIter::new(capture, file) { + let (src, dst, data) = item?; + + // if src.to_string() != "10.63.45.16:57909" && dst.to_string() != "10.63.45.16:57909" { + // continue; + // } + + let _hdr = MsgHeader { + src, + dst, + len: data.len() as _, + }; + + // skip STUN/TURN + if data[4..8].eq(b"\x21\x12\xa4\x42") { + continue; + } + + log::info!("{_hdr}"); + + let data = if let Some(mut buffer) = buffer.take() { + buffer.extend_from_slice(&data); + Cow::Owned(buffer) + } else { + Cow::Borrowed(data.as_ref()) + }; + + let res = if let Some(cn) = connections.get_mut(&(src, dst)) { + cn.handle(&data, true) + } else { + connections + .entry((dst, src)) + .or_insert_with(|| State::new(rng_seed)) + .handle(&data, false) + }; + + if let Err(err) = res { + match err { + nom::Err::Incomplete(_) => buffer = Some(data.into_owned()), + err => log::error!("{err}"), + } + } + } + + Ok(()) +} diff --git a/tools/webrtc-sniffer/src/net.rs b/tools/webrtc-sniffer/src/net.rs new file mode 100644 index 0000000000..03eb2a12c3 --- /dev/null +++ b/tools/webrtc-sniffer/src/net.rs @@ -0,0 +1,87 @@ +use std::net::{IpAddr, SocketAddr}; + +use etherparse::{err::packet, NetSlice, SlicedPacket, TransportSlice}; +use pcap::{Activated, Capture, Packet, PacketCodec, PacketIter, Savefile}; +use thiserror::Error; + +pub struct UdpIter { + inner: PacketIter, +} + +#[derive(Debug, Error)] +pub enum DissectError { + #[error("{0}")] + Cap(#[from] pcap::Error), + #[error("{0}")] + ParsePacket(#[from] packet::SliceError), +} + +impl UdpIter { + pub fn new(capture: Capture, file: Option) -> Self { + UdpIter { + inner: capture.iter(UdpCodec { file }), + } + } +} + +impl Iterator for UdpIter { + type Item = Result<(SocketAddr, SocketAddr, Box<[u8]>), DissectError>; + + fn next(&mut self) -> Option { + loop { + match self + .inner + .next()? + .map_err(DissectError::Cap) + .and_then(|x| x) + .transpose() + { + Some(v) => break Some(v), + None => continue, + } + } + } +} + +struct UdpCodec { + file: Option, +} + +impl PacketCodec for UdpCodec { + type Item = Result)>, DissectError>; + + fn decode(&mut self, packet: Packet<'_>) -> Self::Item { + let eth = SlicedPacket::from_ethernet(packet.data)?; + if let (Some(net), Some(transport)) = (eth.net, eth.transport) { + let (src_ip, dst_ip) = match net { + NetSlice::Ipv4(ip) => ( + IpAddr::V4(ip.header().source().into()), + IpAddr::V4(ip.header().destination().into()), + ), + NetSlice::Ipv6(ip) => ( + IpAddr::V6(ip.header().source().into()), + IpAddr::V6(ip.header().destination().into()), + ), + NetSlice::Arp(_) => return Ok(None), + }; + let (src_port, dst_port, slice) = match transport { + TransportSlice::Udp(udp) => { + (udp.source_port(), udp.destination_port(), udp.payload()) + } + _ => return Ok(None), + }; + if let Some(file) = &mut self.file { + file.write(&packet); + file.flush()?; + } + + Ok(Some(( + SocketAddr::new(src_ip, src_port), + SocketAddr::new(dst_ip, dst_port), + slice.to_vec().into_boxed_slice(), + ))) + } else { + Ok(None) + } + } +} diff --git a/vrf/Cargo.toml b/vrf/Cargo.toml index 35f5aeb36a..612e850405 100644 --- a/vrf/Cargo.toml +++ b/vrf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vrf" -version = "0.14.0" +version = "0.16.0" edition = "2021" license = "Apache-2.0"