diff --git a/.github/actions/rust-setup/action.yml b/.github/actions/rust-setup/action.yml index 70c5b1f942..c8c0e80e7e 100644 --- a/.github/actions/rust-setup/action.yml +++ b/.github/actions/rust-setup/action.yml @@ -13,6 +13,10 @@ inputs: description: "Additional cache key for dependencies" required: false default: "default" + targets: + description: "Additional targets to install (e.g., thumbv7m-none-eabi)" + required: false + default: "" runs: using: "composite" @@ -22,6 +26,7 @@ runs: with: toolchain: ${{ inputs.toolchain }} components: ${{ inputs.components }} + targets: ${{ inputs.targets }} - name: Install protoc uses: arduino/setup-protoc@v3 diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml index 44f713d437..45ff139b08 100644 --- a/.github/workflows/rust-test.yml +++ b/.github/workflows/rust-test.yml @@ -5,21 +5,21 @@ permissions: on: pull_request: paths: - - 'client/**' - - 'proto/**' - - 'Cargo.toml' - - 'Cargo.lock' - - '.github/workflows/rust-*.yml' + - "client/**" + - "proto/**" + - "Cargo.toml" + - "Cargo.lock" + - ".github/workflows/rust-*.yml" push: branches: - main - release/* paths: - - 'client/**' - - 'proto/**' - - 'Cargo.toml' - - 'Cargo.lock' - - '.github/workflows/rust-*.yml' + - "client/**" + - "proto/**" + - "Cargo.toml" + - "Cargo.lock" + - ".github/workflows/rust-*.yml" jobs: test: @@ -56,6 +56,80 @@ jobs: cd client/crates/client cargo check --examples --verbose + feature-check: + name: Feature Combination Check + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v5 + + - name: Setup Rust + uses: ./.github/actions/rust-setup + with: + toolchain: stable + components: rustfmt, clippy + cache-key: features + + - name: Check ev-types with no default features + run: | + cd client/crates/types + # Check minimal build (just protobuf, no compression or grpc) + cargo check --no-default-features --verbose + + - name: Check ev-types with only std + run: | + cd client/crates/types + cargo check --no-default-features --features std --verbose + + - name: Check ev-types with compression only + run: | + cd client/crates/types + cargo check --no-default-features --features compression --verbose + + - name: Check ev-types with grpc only + run: | + cd client/crates/types + cargo check --no-default-features --features grpc --verbose + + - name: Check ev-types with default features + run: | + cd client/crates/types + cargo check --verbose + + no-std-check: + name: No-std Compatibility Check + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v5 + + - name: Setup Rust + uses: ./.github/actions/rust-setup + with: + toolchain: stable + components: rustfmt, clippy + cache-key: no-std + targets: thumbv7m-none-eabi + + - name: Check no_std compatibility for ev-types + run: | + cd client/crates/types + # Test that the crate builds for an embedded target without std + cargo check --no-default-features --target thumbv7m-none-eabi --verbose + + - name: Check no_std with alloc + run: | + cd client/crates/types + # Some embedded systems have alloc but not std + # This verifies we can use the crate with just alloc support + cargo check --no-default-features --target thumbv7m-none-eabi --verbose + + - name: Build for wasm32 target (another no_std target) + run: | + rustup target add wasm32-unknown-unknown + cd client/crates/types + cargo check --no-default-features --target wasm32-unknown-unknown --verbose + coverage: name: Code Coverage runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index 7125029d4c..689abd4fa9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -65,6 +65,12 @@ dependencies = [ "syn", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" version = "1.5.0" @@ -73,18 +79,16 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.6.20" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ - "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", "http", "http-body", - "hyper", + "http-body-util", "itoa", "matchit", "memchr", @@ -94,24 +98,26 @@ dependencies = [ "rustversion", "serde", "sync_wrapper", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" dependencies = [ - "async-trait", "bytes", - "futures-util", + "futures-core", "http", "http-body", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", ] @@ -133,15 +139,9 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.7" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bitflags" @@ -163,9 +163,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.27" +version = "1.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" +checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f" dependencies = [ "shlex", ] @@ -176,6 +176,22 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + [[package]] name = "either" version = "1.15.0" @@ -209,7 +225,7 @@ dependencies = [ "tokio", "tokio-test", "tonic", - "tower", + "tower 0.4.13", "tracing", "tracing-subscriber", ] @@ -218,11 +234,15 @@ dependencies = [ name = "ev-types" version = "0.0.1" dependencies = [ + "bytes", "prost", "prost-build", "prost-types", + "ruzstd", + "snafu", "tonic", - "tonic-build", + "tonic-prost", + "tonic-prost-build", "walkdir", ] @@ -364,15 +384,15 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "h2" -version = "0.3.26" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http", "indexmap 2.10.0", "slab", @@ -411,9 +431,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "http" -version = "0.2.12" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -422,12 +442,24 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.6" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", "pin-project-lite", ] @@ -445,13 +477,12 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.32" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", - "futures-core", "futures-util", "h2", "http", @@ -460,23 +491,43 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "smallvec", "tokio", - "tower-service", - "tracing", "want", ] [[package]] name = "hyper-timeout" -version = "0.4.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", + "hyper-util", "pin-project-lite", "tokio", - "tokio-io-timeout", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", ] [[package]] @@ -550,9 +601,9 @@ checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "matchit" -version = "0.7.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" @@ -626,6 +677,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + [[package]] name = "overload" version = "0.1.1" @@ -733,9 +790,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -743,11 +800,10 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "bytes", "heck", "itertools", "log", @@ -757,6 +813,8 @@ dependencies = [ "prettyplease", "prost", "prost-types", + "pulldown-cmark", + "pulldown-cmark-to-cmark", "regex", "syn", "tempfile", @@ -764,9 +822,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools", @@ -777,13 +835,33 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" dependencies = [ "prost", ] +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" +dependencies = [ + "pulldown-cmark", +] + [[package]] name = "quote" version = "1.0.40" @@ -835,7 +913,7 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ - "bitflags 2.9.1", + "bitflags", ] [[package]] @@ -893,7 +971,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags 2.9.1", + "bitflags", "errno", "libc", "linux-raw-sys", @@ -902,32 +980,47 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.12" +version = "0.23.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" dependencies = [ "log", - "ring", + "once_cell", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] [[package]] -name = "rustls-pemfile" -version = "1.0.4" +name = "rustls-native-certs" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ - "base64", + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", + "rustls-pki-types", "untrusted", ] @@ -937,6 +1030,12 @@ version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +[[package]] +name = "ruzstd" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640bec8aad418d7d03c72ea2de10d5c646a598f9883c7babc160d91e3c1b26c" + [[package]] name = "same-file" version = "1.0.6" @@ -946,6 +1045,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -953,13 +1061,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "sct" -version = "0.7.1" +name = "security-framework" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "ring", - "untrusted", + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", ] [[package]] @@ -1018,6 +1139,27 @@ version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +[[package]] +name = "snafu" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320b01e011bf8d5d7a4a4a4be966d9160968935849c83b918827f6a435e7f627" +dependencies = [ + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1961e2ef424c1424204d3a5d6975f934f56b6d50ff5732382d84ebf460e147f7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "socket2" version = "0.5.10" @@ -1028,6 +1170,22 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "2.0.104" @@ -1041,9 +1199,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "tempfile" @@ -1100,21 +1258,11 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.10", "tokio-macros", "windows-sys 0.52.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.5.0" @@ -1128,9 +1276,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -1175,11 +1323,10 @@ dependencies = [ [[package]] name = "tonic" -version = "0.10.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "67ac5a8627ada0968acec063a4746bf79588aa03ccb66db2f75d7dce26722a40" dependencies = [ - "async-stream", "async-trait", "axum", "base64", @@ -1187,17 +1334,19 @@ dependencies = [ "h2", "http", "http-body", + "http-body-util", "hyper", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", - "prost", - "rustls", - "rustls-pemfile", + "rustls-native-certs", + "socket2 0.6.0", + "sync_wrapper", "tokio", "tokio-rustls", "tokio-stream", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -1205,15 +1354,41 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.10.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "49e323d8bba3be30833707e36d046deabf10a35ae8ad3cae576943ea8933e25d" +dependencies = [ + "prettyplease", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tonic-prost" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9c511b9a96d40cb12b7d5d00464446acf3b9105fd3ce25437cfe41c92b1c87d" +dependencies = [ + "bytes", + "prost", + "tonic", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ef298fcd01b15e135440c4b8c974460ceca4e6a5af7f1c933b08e4d2875efa1" dependencies = [ "prettyplease", "proc-macro2", "prost-build", + "prost-types", "quote", "syn", + "tempfile", + "tonic-build", ] [[package]] @@ -1237,6 +1412,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 2.10.0", + "pin-project-lite", + "slab", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -1313,6 +1507,12 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-ident" version = "1.0.18" @@ -1557,7 +1757,7 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.1", + "bitflags", ] [[package]] @@ -1579,3 +1779,9 @@ dependencies = [ "quote", "syn", ] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" diff --git a/Cargo.toml b/Cargo.toml index feb8c905f9..6bda1089f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,12 +6,14 @@ repository = "https://github.com/evstack/ev-node" [workspace] members = ["client/crates/types", "client/crates/client"] +default-members = ["client/crates/types", "client/crates/client"] resolver = "2" [workspace.dependencies] -prost = "0.12" -prost-build = "0.12" -prost-types = "0.12" -tonic = { version = "0.10", features = ["transport"] } -tonic-build = "0.10" +prost = { version = "0.14", default-features = false, features = ["derive"] } +prost-build = "0.14" +prost-types = { version = "0.14", default-features = false } +tonic = { version = "0.14", features = ["transport", "tls-native-roots"] } +tonic-prost = { version = "0.14", default-features = false } +tonic-prost-build = "0.14" walkdir = "2.5.0" diff --git a/client/crates/client/Cargo.toml b/client/crates/client/Cargo.toml index d325eb73fb..8ad9c12ecf 100644 --- a/client/crates/client/Cargo.toml +++ b/client/crates/client/Cargo.toml @@ -13,7 +13,7 @@ categories = ["api-bindings", "network-programming"] [dependencies] ev-types = { version = "0.0.1", path = "../types" } -tonic = { workspace = true, features = ["transport", "tls"] } +tonic = { workspace = true, features = ["transport"] } tokio = { version = "1.45", features = ["full"] } tower = { version = "0.4", features = ["full"] } thiserror = "1.0" diff --git a/client/crates/client/src/lib.rs b/client/crates/client/src/lib.rs index a75101c7c9..bb1bb429d0 100644 --- a/client/crates/client/src/lib.rs +++ b/client/crates/client/src/lib.rs @@ -11,18 +11,18 @@ //! async fn main() -> Result<(), Box> { //! // Connect to a Evolve node //! let client = Client::connect("http://localhost:50051").await?; -//! +//! //! // Check health //! let health = HealthClient::new(&client); //! let is_healthy = health.is_healthy().await?; //! println!("Node healthy: {}", is_healthy); -//! +//! //! // Get namespace configuration //! let config = ConfigClient::new(&client); //! let namespace = config.get_namespace().await?; //! println!("Header namespace: {}", namespace.header_namespace); //! println!("Data namespace: {}", namespace.data_namespace); -//! +//! //! Ok(()) //! } //! ``` @@ -42,7 +42,7 @@ //! .connect_timeout(Duration::from_secs(10)) //! .build() //! .await?; -//! +//! //! Ok(()) //! } //! ``` @@ -61,17 +61,17 @@ //! .tls() // Enable TLS with default configuration //! .build() //! .await?; -//! +//! //! // Or with custom TLS configuration //! let tls_config = ClientTlsConfig::new() //! .domain_name("secure-node.ev.xyz"); -//! +//! //! let client = Client::builder() //! .endpoint("https://secure-node.ev.xyz") //! .tls_config(tls_config) //! .build() //! .await?; -//! +//! //! Ok(()) //! } //! ``` diff --git a/client/crates/types/Cargo.toml b/client/crates/types/Cargo.toml index 467f3e07e6..a3b9f628b9 100644 --- a/client/crates/types/Cargo.toml +++ b/client/crates/types/Cargo.toml @@ -12,12 +12,13 @@ keywords = ["ev", "blockchain", "protobuf", "grpc"] categories = ["api-bindings", "encoding"] [features] -default = ["grpc"] -grpc = ["tonic", "transport"] -transport = ["tonic/transport"] +default = ["std", "grpc", "compression"] +std = ["prost/std", "prost-types/std", "bytes?/std", "snafu?/std", "ruzstd?/std"] +compression = ["bytes", "snafu", "ruzstd"] +grpc = ["tonic", "tonic-prost", "std"] # Enable gRPC support (both client and server code are always generated) [build-dependencies] -tonic-build = { workspace = true } +tonic-prost-build = { workspace = true } prost-build = { workspace = true } walkdir = { workspace = true } @@ -25,3 +26,8 @@ walkdir = { workspace = true } prost = { workspace = true } prost-types = { workspace = true } tonic = { workspace = true, optional = true, features = ["transport"] } +tonic-prost = { workspace = true, optional = true } + +bytes = { version = "1.5", optional = true, default-features = false } +snafu = { version = "0.8", optional = true, default-features = false } +ruzstd = { version = "0.8.1", optional = true, default-features = false } diff --git a/client/crates/types/build.rs b/client/crates/types/build.rs index 13bad04728..791fd97131 100644 --- a/client/crates/types/build.rs +++ b/client/crates/types/build.rs @@ -9,15 +9,14 @@ fn main() -> Result<(), Box> { let proto_dir = manifest_dir.join("src/proto"); fs::create_dir_all(&proto_dir)?; - // Check if generated files already exist - let messages_file = proto_dir.join("evnode.v1.messages.rs"); - let services_file = proto_dir.join("evnode.v1.services.rs"); + // Check if generated file already exists + let generated_file = proto_dir.join("evnode.v1.rs"); // Check for environment variable to force regeneration let force_regen = env::var("EV_TYPES_FORCE_PROTO_GEN").is_ok(); - // If files exist and we're not forcing regeneration, skip generation - if !force_regen && messages_file.exists() && services_file.exists() { + // If file exists and we're not forcing regeneration, skip generation + if !force_regen && generated_file.exists() { println!("cargo:warning=Using pre-generated proto files. Set EV_TYPES_FORCE_PROTO_GEN=1 to regenerate."); return Ok(()); } @@ -26,8 +25,8 @@ fn main() -> Result<(), Box> { let proto_root = match manifest_dir.join("../../../proto").canonicalize() { Ok(path) => path, Err(e) => { - // If proto files don't exist but generated files do, that's ok - if messages_file.exists() && services_file.exists() { + // If proto files don't exist but generated file does, that's ok + if generated_file.exists() { println!("cargo:warning=Proto source files not found at ../../../proto, using pre-generated files"); return Ok(()); } @@ -47,35 +46,18 @@ fn main() -> Result<(), Box> { }) .collect(); - // Always generate both versions and keep them checked in - // This way users don't need to regenerate based on features - - // 1. Generate pure message types (no tonic dependencies) - let mut prost_config = prost_build::Config::new(); - prost_config.out_dir(&proto_dir); - // Important: we need to rename the output to avoid conflicts - prost_config.compile_protos(&proto_files, &[proto_root.as_path()])?; - - // Rename the generated file to messages.rs - let generated_file = proto_dir.join("evnode.v1.rs"); - let messages_file = proto_dir.join("evnode.v1.messages.rs"); - if generated_file.exists() { - fs::rename(&generated_file, &messages_file)?; - } - - // 2. Generate full code with gRPC services (always generate, conditionally include) - tonic_build::configure() + // Generate a single file with proper feature gates for server and client code + tonic_prost_build::configure() .build_server(true) .build_client(true) + // Add cfg attributes to gate both server and client code behind the "grpc" feature + .server_mod_attribute(".", "#[cfg(feature = \"grpc\")]") + .client_mod_attribute(".", "#[cfg(feature = \"grpc\")]") + // Use BTreeMap instead of HashMap for no_std compatibility + .btree_map(".") + // Generate to our output directory .out_dir(&proto_dir) - .compile(&proto_files, &[proto_root.as_path()])?; - - // Rename to services.rs - let generated_file_2 = proto_dir.join("evnode.v1.rs"); - let services_file = proto_dir.join("evnode.v1.services.rs"); - if generated_file_2.exists() { - fs::rename(&generated_file_2, &services_file)?; - } + .compile_protos(&proto_files, std::slice::from_ref(&proto_root.clone()))?; println!("cargo:rerun-if-changed={}", proto_root.display()); Ok(()) diff --git a/client/crates/types/src/compression.rs b/client/crates/types/src/compression.rs new file mode 100644 index 0000000000..1cf12ff8d7 --- /dev/null +++ b/client/crates/types/src/compression.rs @@ -0,0 +1,280 @@ +//! Blob decompression module +//! +//! This module provides decompression functionality for blobs, +//! matching the Go implementation in the ev-node DA layer. +//! Uses ruzstd for pure Rust zstd decompression without C dependencies. + +use bytes::Bytes; +use ruzstd::decoding::StreamingDecoder; +use snafu::Snafu; + +#[cfg(not(feature = "std"))] +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; + +#[cfg(not(feature = "std"))] +use ruzstd::io::Read; + +#[cfg(feature = "std")] +use std::io::Read; + +/// Size of the compression header in bytes (1 byte flag + 8 bytes original size) +const COMPRESSION_HEADER_SIZE: usize = 9; + +/// Compression flag for uncompressed data +const FLAG_UNCOMPRESSED: u8 = 0x00; + +/// Compression flag for zstd compressed data +const FLAG_ZSTD: u8 = 0x01; + +/// Compression-related errors +#[derive(Debug, Snafu)] +pub enum CompressionError { + #[snafu(display("invalid compression header"))] + InvalidHeader, + + #[snafu(display("invalid compression flag: {flag}"))] + InvalidCompressionFlag { flag: u8 }, + + #[snafu(display("decompression failed: {message}"))] + DecompressionFailed { message: String }, +} + +/// Result type for compression operations +pub type Result = core::result::Result; + +/// Information about a compressed blob +#[derive(Debug, Clone)] +pub struct CompressionInfo { + /// Whether the blob is compressed + pub is_compressed: bool, + /// Compression algorithm used ("none", "zstd") + pub algorithm: String, + /// Original size before compression + pub original_size: u64, + /// Compressed size + pub compressed_size: usize, + /// Compression ratio (compressed_size / original_size) + pub compression_ratio: f64, +} + +/// Parse compression header from blob +fn parse_compression_header(blob: &[u8]) -> Result<(u8, u64, &[u8])> { + if blob.len() < COMPRESSION_HEADER_SIZE { + return Err(CompressionError::InvalidHeader); + } + + let flag = blob[0]; + let original_size = u64::from_le_bytes( + blob[1..9] + .try_into() + .map_err(|_| CompressionError::InvalidHeader)?, + ); + let payload = &blob[COMPRESSION_HEADER_SIZE..]; + + // Validate the compression flag + if flag != FLAG_UNCOMPRESSED && flag != FLAG_ZSTD { + return Err(CompressionError::InvalidCompressionFlag { flag }); + } + + Ok((flag, original_size, payload)) +} + +/// Decompress a blob +pub fn decompress_blob(compressed_blob: &[u8]) -> Result { + // Check if blob is too small to have a header + if compressed_blob.len() < COMPRESSION_HEADER_SIZE { + // Assume legacy uncompressed blob + return Ok(Bytes::copy_from_slice(compressed_blob)); + } + + // Check the compression flag + let flag = compressed_blob[0]; + + // Handle invalid flags with legacy blob heuristics + if flag != FLAG_UNCOMPRESSED && flag != FLAG_ZSTD { + // This could be either a legacy blob or a corrupted header + // Use heuristics to determine which + + let original_size = u64::from_le_bytes(compressed_blob[1..9].try_into().unwrap_or([0; 8])); + + // If flag is in printable ASCII range (32-126) and size is unreasonable, + // it's likely a legacy text blob + if (32..=126).contains(&flag) + && (original_size == 0 || original_size > (compressed_blob.len() as u64 * 100)) + { + // Likely a legacy blob + return Ok(Bytes::copy_from_slice(compressed_blob)); + } + + // Otherwise, it's likely a corrupted compressed blob + return Err(CompressionError::InvalidCompressionFlag { flag }); + } + + // Parse the header + let (flag, original_size, payload) = parse_compression_header(compressed_blob)?; + + match flag { + FLAG_UNCOMPRESSED => { + // Data is uncompressed, just return the payload + Ok(Bytes::copy_from_slice(payload)) + } + FLAG_ZSTD => { + // Decompress with ruzstd + let mut decoder = StreamingDecoder::new(payload).map_err(|e| { + CompressionError::DecompressionFailed { + message: format!("{}", e), + } + })?; + + let mut decompressed = Vec::new(); + decoder.read_to_end(&mut decompressed).map_err(|e| { + CompressionError::DecompressionFailed { + message: format!("{}", e), + } + })?; + + // Verify the decompressed size matches + if decompressed.len() as u64 != original_size { + return Err(CompressionError::DecompressionFailed { + message: format!( + "size mismatch: expected {}, got {}", + original_size, + decompressed.len() + ), + }); + } + + Ok(Bytes::from(decompressed)) + } + _ => { + // Should not happen as we validated the flag earlier + Err(CompressionError::InvalidCompressionFlag { flag }) + } + } +} + +/// Get compression information about a blob +pub fn get_compression_info(blob: &[u8]) -> CompressionInfo { + if blob.len() < COMPRESSION_HEADER_SIZE { + return CompressionInfo { + is_compressed: false, + algorithm: "none".to_string(), + original_size: blob.len() as u64, + compressed_size: blob.len(), + compression_ratio: 1.0, + }; + } + + let flag = blob[0]; + if flag != FLAG_UNCOMPRESSED && flag != FLAG_ZSTD { + // Legacy or invalid blob + return CompressionInfo { + is_compressed: false, + algorithm: "none".to_string(), + original_size: blob.len() as u64, + compressed_size: blob.len(), + compression_ratio: 1.0, + }; + } + + if let Ok((flag, original_size, _)) = parse_compression_header(blob) { + let algorithm = match flag { + FLAG_UNCOMPRESSED => "none", + FLAG_ZSTD => "zstd", + _ => "unknown", + }; + + CompressionInfo { + is_compressed: flag == FLAG_ZSTD, + algorithm: algorithm.to_string(), + original_size, + compressed_size: blob.len(), + compression_ratio: if original_size > 0 { + blob.len() as f64 / original_size as f64 + } else { + 1.0 + }, + } + } else { + CompressionInfo { + is_compressed: false, + algorithm: "none".to_string(), + original_size: blob.len() as u64, + compressed_size: blob.len(), + compression_ratio: 1.0, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_legacy_blob() { + // Test with legacy blob (no compression header) + let legacy_blob = b"legacy data without header"; + + // Should return as-is + let decompressed = decompress_blob(legacy_blob).unwrap(); + assert_eq!(legacy_blob, decompressed.as_ref()); + } + + #[test] + fn test_invalid_compression_flag() { + // Create blob with invalid flag + let mut invalid_blob = vec![0u8; COMPRESSION_HEADER_SIZE + 10]; + invalid_blob[0] = 0xFF; // Invalid flag + + // Should return error + let result = decompress_blob(&invalid_blob); + assert!(result.is_err()); + + match result.unwrap_err() { + CompressionError::InvalidCompressionFlag { flag } => { + assert_eq!(flag, 0xFF); + } + _ => panic!("Expected InvalidCompressionFlag error"), + } + } + + #[test] + fn test_uncompressed_with_header() { + // Create a blob with uncompressed header + let original_data = b"test data"; + let mut blob = Vec::with_capacity(COMPRESSION_HEADER_SIZE + original_data.len()); + + // Add header + blob.push(FLAG_UNCOMPRESSED); + blob.extend_from_slice(&(original_data.len() as u64).to_le_bytes()); + blob.extend_from_slice(original_data); + + // Decompress + let decompressed = decompress_blob(&blob).unwrap(); + assert_eq!(original_data, decompressed.as_ref()); + + // Check info + let info = get_compression_info(&blob); + assert!(!info.is_compressed); + assert_eq!(info.algorithm, "none"); + assert_eq!(info.original_size, original_data.len() as u64); + } + + #[test] + fn test_compression_info() { + // Test with uncompressed data + let mut blob = Vec::new(); + blob.push(FLAG_UNCOMPRESSED); + blob.extend_from_slice(&100u64.to_le_bytes()); + blob.extend_from_slice(&[0u8; 100]); + + let info = get_compression_info(&blob); + assert!(!info.is_compressed); + assert_eq!(info.algorithm, "none"); + assert_eq!(info.original_size, 100); + } +} diff --git a/client/crates/types/src/lib.rs b/client/crates/types/src/lib.rs index c274a05417..7fdf8e2234 100644 --- a/client/crates/types/src/lib.rs +++ b/client/crates/types/src/lib.rs @@ -1,9 +1,18 @@ -pub mod v1 { - // Always include the pure message types (no tonic dependencies) - #[cfg(not(feature = "grpc"))] - include!("proto/evnode.v1.messages.rs"); +#![cfg_attr(not(feature = "std"), no_std)] + +// When no_std, we need alloc for prost's Vec and String types +#[cfg(not(feature = "std"))] +extern crate alloc; + +#[cfg(feature = "compression")] +pub mod compression; - // Include the full version with gRPC services when the feature is enabled - #[cfg(feature = "grpc")] - include!("proto/evnode.v1.services.rs"); +pub mod v1 { + // Include the generated protobuf code + // The generated code has feature gates for client and server code + include!("proto/evnode.v1.rs"); } + +// Re-export compression types for convenience when compression is enabled +#[cfg(feature = "compression")] +pub use compression::{decompress_blob, get_compression_info, CompressionError, CompressionInfo}; diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs deleted file mode 100644 index 62c5d4ba90..0000000000 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ /dev/null @@ -1,429 +0,0 @@ -// This file is @generated by prost-build. -/// The SignRequest holds the bytes we want to sign. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignRequest { - #[prost(bytes = "vec", tag = "1")] - pub message: ::prost::alloc::vec::Vec, -} -/// The SignResponse returns the signature bytes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignResponse { - #[prost(bytes = "vec", tag = "1")] - pub signature: ::prost::alloc::vec::Vec, -} -/// The GetPublicRequest is an empty request. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetPublicRequest {} -/// The GetPublicResponse returns the public key. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetPublicResponse { - #[prost(bytes = "vec", tag = "1")] - pub public_key: ::prost::alloc::vec::Vec, -} -/// Version captures the consensus rules for processing a block in the blockchain, -/// including all blockchain data structures and the rules of the application's -/// state transition machine. -/// This is equivalent to the tmversion.Consensus type in Tendermint. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Version { - #[prost(uint64, tag = "1")] - pub block: u64, - #[prost(uint64, tag = "2")] - pub app: u64, -} -/// Header is the header of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Header { - /// Block and App version - #[prost(message, optional, tag = "1")] - pub version: ::core::option::Option, - /// Block height - #[prost(uint64, tag = "2")] - pub height: u64, - /// Block creation time - #[prost(uint64, tag = "3")] - pub time: u64, - /// Previous block info - #[prost(bytes = "vec", tag = "4")] - pub last_header_hash: ::prost::alloc::vec::Vec, - /// Commit from aggregator(s) from the last block - #[prost(bytes = "vec", tag = "5")] - pub last_commit_hash: ::prost::alloc::vec::Vec, - /// Block.Data root aka Transactions - #[prost(bytes = "vec", tag = "6")] - pub data_hash: ::prost::alloc::vec::Vec, - /// Consensus params for current block - #[prost(bytes = "vec", tag = "7")] - pub consensus_hash: ::prost::alloc::vec::Vec, - /// State after applying txs from the current block - #[prost(bytes = "vec", tag = "8")] - pub app_hash: ::prost::alloc::vec::Vec, - /// Root hash of all results from the txs from the previous block. - /// This is ABCI specific but smart-contract chains require some way of committing - /// to transaction receipts/results. - #[prost(bytes = "vec", tag = "9")] - pub last_results_hash: ::prost::alloc::vec::Vec, - /// Original proposer of the block - /// Note that the address can be derived from the pubkey which can be derived - /// from the signature when using secp256k. - /// We keep this in case users choose another signature format where the - /// pubkey can't be recovered by the signature (e.g. ed25519). - #[prost(bytes = "vec", tag = "10")] - pub proposer_address: ::prost::alloc::vec::Vec, - /// validatorhash for compatibility with tendermint light client. - #[prost(bytes = "vec", tag = "11")] - pub validator_hash: ::prost::alloc::vec::Vec, - /// Chain ID the block belongs to - #[prost(string, tag = "12")] - pub chain_id: ::prost::alloc::string::String, -} -/// SignedHeader is a header with a signature and a signer. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignedHeader { - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option
, - #[prost(bytes = "vec", tag = "2")] - pub signature: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "3")] - pub signer: ::core::option::Option, -} -/// Signer is a signer of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Signer { - /// Address of the signer - #[prost(bytes = "vec", tag = "1")] - pub address: ::prost::alloc::vec::Vec, - /// Public key of the signer - #[prost(bytes = "vec", tag = "2")] - pub pub_key: ::prost::alloc::vec::Vec, -} -/// Metadata is the metadata of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Metadata { - /// chain id - #[prost(string, tag = "1")] - pub chain_id: ::prost::alloc::string::String, - /// Block height - #[prost(uint64, tag = "2")] - pub height: u64, - /// Block creation time - #[prost(uint64, tag = "3")] - pub time: u64, - /// Previous block info - #[prost(bytes = "vec", tag = "4")] - pub last_data_hash: ::prost::alloc::vec::Vec, -} -/// Data is the data of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Data { - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - #[prost(bytes = "vec", repeated, tag = "2")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, -} -/// SignedData is a data with a signature and a signer. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignedData { - #[prost(message, optional, tag = "1")] - pub data: ::core::option::Option, - #[prost(bytes = "vec", tag = "2")] - pub signature: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "3")] - pub signer: ::core::option::Option, -} -/// Vote is a vote for a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Vote { - /// Chain ID - #[prost(string, tag = "1")] - pub chain_id: ::prost::alloc::string::String, - /// Block height - #[prost(uint64, tag = "2")] - pub height: u64, - /// Timestamp - #[prost(message, optional, tag = "3")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Block ID hash - #[prost(bytes = "vec", tag = "4")] - pub block_id_hash: ::prost::alloc::vec::Vec, - /// Validator address - #[prost(bytes = "vec", tag = "5")] - pub validator_address: ::prost::alloc::vec::Vec, -} -/// State is the state of the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct State { - #[prost(message, optional, tag = "1")] - pub version: ::core::option::Option, - #[prost(string, tag = "2")] - pub chain_id: ::prost::alloc::string::String, - #[prost(uint64, tag = "3")] - pub initial_height: u64, - #[prost(uint64, tag = "4")] - pub last_block_height: u64, - #[prost(message, optional, tag = "5")] - pub last_block_time: ::core::option::Option<::prost_types::Timestamp>, - #[prost(uint64, tag = "6")] - pub da_height: u64, - #[prost(bytes = "vec", tag = "7")] - pub last_results_hash: ::prost::alloc::vec::Vec, - #[prost(bytes = "vec", tag = "8")] - pub app_hash: ::prost::alloc::vec::Vec, -} -/// GetPeerInfoResponse defines the response for retrieving peer information -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetPeerInfoResponse { - /// List of connected peers - #[prost(message, repeated, tag = "1")] - pub peers: ::prost::alloc::vec::Vec, -} -/// GetNetInfoResponse defines the response for retrieving network information -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetNetInfoResponse { - /// Network information - #[prost(message, optional, tag = "1")] - pub net_info: ::core::option::Option, -} -/// PeerInfo contains information about a connected peer -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PeerInfo { - /// Peer ID - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Peer address - #[prost(string, tag = "2")] - pub address: ::prost::alloc::string::String, -} -/// NetInfo contains information about the network -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NetInfo { - /// Network ID - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Listen address - #[prost(string, repeated, tag = "2")] - pub listen_addresses: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// List of connected peers - #[prost(string, repeated, tag = "3")] - pub connected_peers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Batch is a collection of transactions. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Batch { - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, -} -/// GetHealthResponse defines the response for retrieving health status -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetHealthResponse { - /// Health status - #[prost(enumeration = "HealthStatus", tag = "1")] - pub status: i32, -} -/// HealthStatus defines the health status of the node -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum HealthStatus { - /// Unknown health status - Unknown = 0, - /// Healthy status (Healthy) - Pass = 1, - /// Degraded but still serving - Warn = 2, - /// Hard fail - Fail = 3, -} -impl HealthStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - HealthStatus::Unknown => "UNKNOWN", - HealthStatus::Pass => "PASS", - HealthStatus::Warn => "WARN", - HealthStatus::Fail => "FAIL", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "PASS" => Some(Self::Pass), - "WARN" => Some(Self::Warn), - "FAIL" => Some(Self::Fail), - _ => None, - } - } -} -/// InitChainRequest contains the genesis parameters for chain initialization -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InitChainRequest { - /// Timestamp marking chain start time in UTC - #[prost(message, optional, tag = "1")] - pub genesis_time: ::core::option::Option<::prost_types::Timestamp>, - /// First block height (must be > 0) - #[prost(uint64, tag = "2")] - pub initial_height: u64, - /// Unique identifier string for the blockchain - #[prost(string, tag = "3")] - pub chain_id: ::prost::alloc::string::String, -} -/// InitChainResponse contains the initial state and configuration -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InitChainResponse { - /// Hash representing initial state - #[prost(bytes = "vec", tag = "1")] - pub state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed bytes for transactions in a block - #[prost(uint64, tag = "2")] - pub max_bytes: u64, -} -/// GetTxsRequest is the request for fetching transactions -/// -/// Empty for now, may include filtering criteria in the future -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTxsRequest {} -/// GetTxsResponse contains the available transactions -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTxsResponse { - /// Slice of valid transactions from mempool - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, -} -/// ExecuteTxsRequest contains transactions and block context for execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteTxsRequest { - /// Ordered list of transactions to execute - #[prost(bytes = "vec", repeated, tag = "1")] - pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - /// Height of block being created (must be > 0) - #[prost(uint64, tag = "2")] - pub block_height: u64, - /// Block creation time in UTC - #[prost(message, optional, tag = "3")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - /// Previous block's state root hash - #[prost(bytes = "vec", tag = "4")] - pub prev_state_root: ::prost::alloc::vec::Vec, -} -/// ExecuteTxsResponse contains the result of transaction execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteTxsResponse { - /// New state root after executing transactions - #[prost(bytes = "vec", tag = "1")] - pub updated_state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed transaction size (may change with protocol updates) - #[prost(uint64, tag = "2")] - pub max_bytes: u64, -} -/// SetFinalRequest marks a block as finalized -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetFinalRequest { - /// Height of block to finalize - #[prost(uint64, tag = "1")] - pub block_height: u64, -} -/// SetFinalResponse indicates whether finalization was successful -/// -/// Empty response, errors are returned via gRPC status -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SetFinalResponse {} -/// Block contains all the components of a complete block -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Block { - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub data: ::core::option::Option, -} -/// GetBlockRequest defines the request for retrieving a block -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetBlockRequest { - /// The height or hash of the block to retrieve - #[prost(oneof = "get_block_request::Identifier", tags = "1, 2")] - pub identifier: ::core::option::Option, -} -/// Nested message and enum types in `GetBlockRequest`. -pub mod get_block_request { - /// The height or hash of the block to retrieve - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Identifier { - #[prost(uint64, tag = "1")] - Height(u64), - #[prost(bytes, tag = "2")] - Hash(::prost::alloc::vec::Vec), - } -} -/// GetBlockResponse defines the response for retrieving a block -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetBlockResponse { - #[prost(message, optional, tag = "1")] - pub block: ::core::option::Option, - #[prost(uint64, tag = "2")] - pub header_da_height: u64, - #[prost(uint64, tag = "3")] - pub data_da_height: u64, -} -/// GetStateResponse defines the response for retrieving the current state -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetStateResponse { - #[prost(message, optional, tag = "1")] - pub state: ::core::option::Option, -} -/// GetMetadataRequest defines the request for retrieving metadata by key -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetMetadataRequest { - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, -} -/// GetMetadataResponse defines the response for retrieving metadata -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetMetadataResponse { - #[prost(bytes = "vec", tag = "1")] - pub value: ::prost::alloc::vec::Vec, -} -/// GetNamespaceResponse returns the namespace for this network -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetNamespaceResponse { - #[prost(string, tag = "1")] - pub header_namespace: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub data_namespace: ::prost::alloc::string::String, -} diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.rs similarity index 84% rename from client/crates/types/src/proto/evnode.v1.services.rs rename to client/crates/types/src/proto/evnode.v1.rs index 089666949a..a961d9a50e 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.rs @@ -1,32 +1,35 @@ // This file is @generated by prost-build. /// The SignRequest holds the bytes we want to sign. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct SignRequest { #[prost(bytes = "vec", tag = "1")] pub message: ::prost::alloc::vec::Vec, } /// The SignResponse returns the signature bytes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct SignResponse { #[prost(bytes = "vec", tag = "1")] pub signature: ::prost::alloc::vec::Vec, } /// The GetPublicRequest is an empty request. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetPublicRequest {} /// The GetPublicResponse returns the public key. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetPublicResponse { #[prost(bytes = "vec", tag = "1")] pub public_key: ::prost::alloc::vec::Vec, } /// Generated client implementations. +#[cfg(feature = "grpc")] pub mod signer_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// The SignerService defines the RPCs to sign and to retrieve the public key. @@ -47,10 +50,10 @@ pub mod signer_service_client { } impl SignerServiceClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -68,14 +71,14 @@ pub mod signer_service_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { SignerServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -119,12 +122,11 @@ pub mod signer_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.SignerService/Sign", ); @@ -145,12 +147,11 @@ pub mod signer_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.SignerService/GetPublic", ); @@ -162,12 +163,19 @@ pub mod signer_service_client { } } /// Generated server implementations. +#[cfg(feature = "grpc")] pub mod signer_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with SignerServiceServer. #[async_trait] - pub trait SignerService: Send + Sync + 'static { + pub trait SignerService: std::marker::Send + std::marker::Sync + 'static { /// Sign signs the given message. async fn sign( &self, @@ -184,20 +192,18 @@ pub mod signer_service_server { } /// The SignerService defines the RPCs to sign and to retrieve the public key. #[derive(Debug)] - pub struct SignerServiceServer { - inner: _Inner, + pub struct SignerServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl SignerServiceServer { + impl SignerServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -247,10 +253,10 @@ pub mod signer_service_server { impl tonic::codegen::Service> for SignerServiceServer where T: SignerService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -260,7 +266,6 @@ pub mod signer_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/evnode.v1.SignerService/Sign" => { #[allow(non_camel_case_types)] @@ -290,9 +295,8 @@ pub mod signer_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SignSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -336,9 +340,8 @@ pub mod signer_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetPublicSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -355,20 +358,27 @@ pub mod signer_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for SignerServiceServer { + impl Clone for SignerServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -380,26 +390,17 @@ pub mod signer_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for SignerServiceServer { - const NAME: &'static str = "evnode.v1.SignerService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "evnode.v1.SignerService"; + impl tonic::server::NamedService for SignerServiceServer { + const NAME: &'static str = SERVICE_NAME; } } /// Version captures the consensus rules for processing a block in the blockchain, /// including all blockchain data structures and the rules of the application's /// state transition machine. /// This is equivalent to the tmversion.Consensus type in Tendermint. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct Version { #[prost(uint64, tag = "1")] pub block: u64, @@ -407,8 +408,7 @@ pub struct Version { pub app: u64, } /// Header is the header of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Header { /// Block and App version #[prost(message, optional, tag = "1")] @@ -454,8 +454,7 @@ pub struct Header { pub chain_id: ::prost::alloc::string::String, } /// SignedHeader is a header with a signature and a signer. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct SignedHeader { #[prost(message, optional, tag = "1")] pub header: ::core::option::Option
, @@ -465,8 +464,7 @@ pub struct SignedHeader { pub signer: ::core::option::Option, } /// Signer is a signer of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Signer { /// Address of the signer #[prost(bytes = "vec", tag = "1")] @@ -476,8 +474,7 @@ pub struct Signer { pub pub_key: ::prost::alloc::vec::Vec, } /// Metadata is the metadata of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Metadata { /// chain id #[prost(string, tag = "1")] @@ -493,8 +490,7 @@ pub struct Metadata { pub last_data_hash: ::prost::alloc::vec::Vec, } /// Data is the data of a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Data { #[prost(message, optional, tag = "1")] pub metadata: ::core::option::Option, @@ -502,8 +498,7 @@ pub struct Data { pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } /// SignedData is a data with a signature and a signer. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct SignedData { #[prost(message, optional, tag = "1")] pub data: ::core::option::Option, @@ -513,8 +508,7 @@ pub struct SignedData { pub signer: ::core::option::Option, } /// Vote is a vote for a block in the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Vote { /// Chain ID #[prost(string, tag = "1")] @@ -533,8 +527,7 @@ pub struct Vote { pub validator_address: ::prost::alloc::vec::Vec, } /// State is the state of the blockchain. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct State { #[prost(message, optional, tag = "1")] pub version: ::core::option::Option, @@ -554,7 +547,6 @@ pub struct State { pub app_hash: ::prost::alloc::vec::Vec, } /// GetPeerInfoResponse defines the response for retrieving peer information -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GetPeerInfoResponse { /// List of connected peers @@ -562,16 +554,14 @@ pub struct GetPeerInfoResponse { pub peers: ::prost::alloc::vec::Vec, } /// GetNetInfoResponse defines the response for retrieving network information -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetNetInfoResponse { /// Network information #[prost(message, optional, tag = "1")] pub net_info: ::core::option::Option, } /// PeerInfo contains information about a connected peer -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct PeerInfo { /// Peer ID #[prost(string, tag = "1")] @@ -581,8 +571,7 @@ pub struct PeerInfo { pub address: ::prost::alloc::string::String, } /// NetInfo contains information about the network -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct NetInfo { /// Network ID #[prost(string, tag = "1")] @@ -595,8 +584,15 @@ pub struct NetInfo { pub connected_peers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Generated client implementations. +#[cfg(feature = "grpc")] pub mod p2p_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// P2PService defines the RPC service for the P2P package @@ -617,10 +613,10 @@ pub mod p2p_service_client { } impl P2pServiceClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -638,14 +634,14 @@ pub mod p2p_service_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { P2pServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -692,12 +688,11 @@ pub mod p2p_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.P2PService/GetPeerInfo", ); @@ -718,12 +713,11 @@ pub mod p2p_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.P2PService/GetNetInfo", ); @@ -735,12 +729,19 @@ pub mod p2p_service_client { } } /// Generated server implementations. +#[cfg(feature = "grpc")] pub mod p2p_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with P2pServiceServer. #[async_trait] - pub trait P2pService: Send + Sync + 'static { + pub trait P2pService: std::marker::Send + std::marker::Sync + 'static { /// GetPeerInfo returns information about the connected peers async fn get_peer_info( &self, @@ -760,20 +761,18 @@ pub mod p2p_service_server { } /// P2PService defines the RPC service for the P2P package #[derive(Debug)] - pub struct P2pServiceServer { - inner: _Inner, + pub struct P2pServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl P2pServiceServer { + impl P2pServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -823,10 +822,10 @@ pub mod p2p_service_server { impl tonic::codegen::Service> for P2pServiceServer where T: P2pService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -836,7 +835,6 @@ pub mod p2p_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/evnode.v1.P2PService/GetPeerInfo" => { #[allow(non_camel_case_types)] @@ -862,9 +860,8 @@ pub mod p2p_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetPeerInfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -903,9 +900,8 @@ pub mod p2p_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetNetInfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -922,20 +918,27 @@ pub mod p2p_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for P2pServiceServer { + impl Clone for P2pServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -947,30 +950,20 @@ pub mod p2p_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for P2pServiceServer { - const NAME: &'static str = "evnode.v1.P2PService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "evnode.v1.P2PService"; + impl tonic::server::NamedService for P2pServiceServer { + const NAME: &'static str = SERVICE_NAME; } } /// Batch is a collection of transactions. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Batch { #[prost(bytes = "vec", repeated, tag = "1")] pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } /// GetHealthResponse defines the response for retrieving health status -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetHealthResponse { /// Health status #[prost(enumeration = "HealthStatus", tag = "1")] @@ -996,10 +989,10 @@ impl HealthStatus { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - HealthStatus::Unknown => "UNKNOWN", - HealthStatus::Pass => "PASS", - HealthStatus::Warn => "WARN", - HealthStatus::Fail => "FAIL", + Self::Unknown => "UNKNOWN", + Self::Pass => "PASS", + Self::Warn => "WARN", + Self::Fail => "FAIL", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1014,8 +1007,15 @@ impl HealthStatus { } } /// Generated client implementations. +#[cfg(feature = "grpc")] pub mod health_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// HealthService defines the RPC service for the health package @@ -1036,10 +1036,10 @@ pub mod health_service_client { } impl HealthServiceClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -1057,14 +1057,14 @@ pub mod health_service_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { HealthServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -1111,12 +1111,11 @@ pub mod health_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.HealthService/Livez", ); @@ -1128,12 +1127,19 @@ pub mod health_service_client { } } /// Generated server implementations. +#[cfg(feature = "grpc")] pub mod health_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with HealthServiceServer. #[async_trait] - pub trait HealthService: Send + Sync + 'static { + pub trait HealthService: std::marker::Send + std::marker::Sync + 'static { /// Livez returns the health status of the node async fn livez( &self, @@ -1145,20 +1151,18 @@ pub mod health_service_server { } /// HealthService defines the RPC service for the health package #[derive(Debug)] - pub struct HealthServiceServer { - inner: _Inner, + pub struct HealthServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl HealthServiceServer { + impl HealthServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -1208,10 +1212,10 @@ pub mod health_service_server { impl tonic::codegen::Service> for HealthServiceServer where T: HealthService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -1221,7 +1225,6 @@ pub mod health_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/evnode.v1.HealthService/Livez" => { #[allow(non_camel_case_types)] @@ -1247,9 +1250,8 @@ pub mod health_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = LivezSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1266,20 +1268,27 @@ pub mod health_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for HealthServiceServer { + impl Clone for HealthServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -1291,23 +1300,14 @@ pub mod health_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for HealthServiceServer { - const NAME: &'static str = "evnode.v1.HealthService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "evnode.v1.HealthService"; + impl tonic::server::NamedService for HealthServiceServer { + const NAME: &'static str = SERVICE_NAME; } } /// InitChainRequest contains the genesis parameters for chain initialization -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct InitChainRequest { /// Timestamp marking chain start time in UTC #[prost(message, optional, tag = "1")] @@ -1320,8 +1320,7 @@ pub struct InitChainRequest { pub chain_id: ::prost::alloc::string::String, } /// InitChainResponse contains the initial state and configuration -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct InitChainResponse { /// Hash representing initial state #[prost(bytes = "vec", tag = "1")] @@ -1333,20 +1332,17 @@ pub struct InitChainResponse { /// GetTxsRequest is the request for fetching transactions /// /// Empty for now, may include filtering criteria in the future -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetTxsRequest {} /// GetTxsResponse contains the available transactions -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetTxsResponse { /// Slice of valid transactions from mempool #[prost(bytes = "vec", repeated, tag = "1")] pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } /// ExecuteTxsRequest contains transactions and block context for execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExecuteTxsRequest { /// Ordered list of transactions to execute #[prost(bytes = "vec", repeated, tag = "1")] @@ -1362,8 +1358,7 @@ pub struct ExecuteTxsRequest { pub prev_state_root: ::prost::alloc::vec::Vec, } /// ExecuteTxsResponse contains the result of transaction execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ExecuteTxsResponse { /// New state root after executing transactions #[prost(bytes = "vec", tag = "1")] @@ -1373,8 +1368,7 @@ pub struct ExecuteTxsResponse { pub max_bytes: u64, } /// SetFinalRequest marks a block as finalized -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct SetFinalRequest { /// Height of block to finalize #[prost(uint64, tag = "1")] @@ -1383,12 +1377,18 @@ pub struct SetFinalRequest { /// SetFinalResponse indicates whether finalization was successful /// /// Empty response, errors are returned via gRPC status -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct SetFinalResponse {} /// Generated client implementations. +#[cfg(feature = "grpc")] pub mod executor_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// ExecutorService defines the execution layer interface for EVNode @@ -1409,10 +1409,10 @@ pub mod executor_service_client { } impl ExecutorServiceClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -1430,14 +1430,14 @@ pub mod executor_service_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { ExecutorServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -1484,12 +1484,11 @@ pub mod executor_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.ExecutorService/InitChain", ); @@ -1507,12 +1506,11 @@ pub mod executor_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.ExecutorService/GetTxs", ); @@ -1533,12 +1531,11 @@ pub mod executor_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.ExecutorService/ExecuteTxs", ); @@ -1559,12 +1556,11 @@ pub mod executor_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.ExecutorService/SetFinal", ); @@ -1576,12 +1572,19 @@ pub mod executor_service_client { } } /// Generated server implementations. +#[cfg(feature = "grpc")] pub mod executor_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with ExecutorServiceServer. #[async_trait] - pub trait ExecutorService: Send + Sync + 'static { + pub trait ExecutorService: std::marker::Send + std::marker::Sync + 'static { /// InitChain initializes a new blockchain instance with genesis parameters async fn init_chain( &self, @@ -1614,20 +1617,18 @@ pub mod executor_service_server { } /// ExecutorService defines the execution layer interface for EVNode #[derive(Debug)] - pub struct ExecutorServiceServer { - inner: _Inner, + pub struct ExecutorServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl ExecutorServiceServer { + impl ExecutorServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -1677,10 +1678,10 @@ pub mod executor_service_server { impl tonic::codegen::Service> for ExecutorServiceServer where T: ExecutorService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -1690,7 +1691,6 @@ pub mod executor_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/evnode.v1.ExecutorService/InitChain" => { #[allow(non_camel_case_types)] @@ -1721,9 +1721,8 @@ pub mod executor_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = InitChainSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1767,9 +1766,8 @@ pub mod executor_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTxsSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1813,9 +1811,8 @@ pub mod executor_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = ExecuteTxsSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1859,9 +1856,8 @@ pub mod executor_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SetFinalSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -1878,20 +1874,27 @@ pub mod executor_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for ExecutorServiceServer { + impl Clone for ExecutorServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -1903,23 +1906,14 @@ pub mod executor_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for ExecutorServiceServer { - const NAME: &'static str = "evnode.v1.ExecutorService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "evnode.v1.ExecutorService"; + impl tonic::server::NamedService for ExecutorServiceServer { + const NAME: &'static str = SERVICE_NAME; } } /// Block contains all the components of a complete block -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { #[prost(message, optional, tag = "1")] pub header: ::core::option::Option, @@ -1927,8 +1921,7 @@ pub struct Block { pub data: ::core::option::Option, } /// GetBlockRequest defines the request for retrieving a block -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetBlockRequest { /// The height or hash of the block to retrieve #[prost(oneof = "get_block_request::Identifier", tags = "1, 2")] @@ -1937,8 +1930,7 @@ pub struct GetBlockRequest { /// Nested message and enum types in `GetBlockRequest`. pub mod get_block_request { /// The height or hash of the block to retrieve - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)] pub enum Identifier { #[prost(uint64, tag = "1")] Height(u64), @@ -1947,8 +1939,7 @@ pub mod get_block_request { } } /// GetBlockResponse defines the response for retrieving a block -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetBlockResponse { #[prost(message, optional, tag = "1")] pub block: ::core::option::Option, @@ -1958,29 +1949,33 @@ pub struct GetBlockResponse { pub data_da_height: u64, } /// GetStateResponse defines the response for retrieving the current state -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetStateResponse { #[prost(message, optional, tag = "1")] pub state: ::core::option::Option, } /// GetMetadataRequest defines the request for retrieving metadata by key -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetMetadataRequest { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, } /// GetMetadataResponse defines the response for retrieving metadata -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetMetadataResponse { #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } /// Generated client implementations. +#[cfg(feature = "grpc")] pub mod store_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// StoreService defines the RPC service for the store package @@ -2001,10 +1996,10 @@ pub mod store_service_client { } impl StoreServiceClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -2022,14 +2017,14 @@ pub mod store_service_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { StoreServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -2076,12 +2071,11 @@ pub mod store_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.StoreService/GetBlock", ); @@ -2102,12 +2096,11 @@ pub mod store_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.StoreService/GetState", ); @@ -2128,12 +2121,11 @@ pub mod store_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.StoreService/GetMetadata", ); @@ -2145,12 +2137,19 @@ pub mod store_service_client { } } /// Generated server implementations. +#[cfg(feature = "grpc")] pub mod store_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with StoreServiceServer. #[async_trait] - pub trait StoreService: Send + Sync + 'static { + pub trait StoreService: std::marker::Send + std::marker::Sync + 'static { /// GetBlock returns a block by height or hash async fn get_block( &self, @@ -2178,20 +2177,18 @@ pub mod store_service_server { } /// StoreService defines the RPC service for the store package #[derive(Debug)] - pub struct StoreServiceServer { - inner: _Inner, + pub struct StoreServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl StoreServiceServer { + impl StoreServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -2241,10 +2238,10 @@ pub mod store_service_server { impl tonic::codegen::Service> for StoreServiceServer where T: StoreService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -2254,7 +2251,6 @@ pub mod store_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/evnode.v1.StoreService/GetBlock" => { #[allow(non_camel_case_types)] @@ -2285,9 +2281,8 @@ pub mod store_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetBlockSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2326,9 +2321,8 @@ pub mod store_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetStateSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2372,9 +2366,8 @@ pub mod store_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetMetadataSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2391,20 +2384,27 @@ pub mod store_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for StoreServiceServer { + impl Clone for StoreServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -2416,23 +2416,14 @@ pub mod store_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for StoreServiceServer { - const NAME: &'static str = "evnode.v1.StoreService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "evnode.v1.StoreService"; + impl tonic::server::NamedService for StoreServiceServer { + const NAME: &'static str = SERVICE_NAME; } } /// GetNamespaceResponse returns the namespace for this network -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct GetNamespaceResponse { #[prost(string, tag = "1")] pub header_namespace: ::prost::alloc::string::String, @@ -2440,8 +2431,15 @@ pub struct GetNamespaceResponse { pub data_namespace: ::prost::alloc::string::String, } /// Generated client implementations. +#[cfg(feature = "grpc")] pub mod config_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// StoreService defines the RPC service for the store package @@ -2462,10 +2460,10 @@ pub mod config_service_client { } impl ConfigServiceClient where - T: tonic::client::GrpcService, + T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -2483,14 +2481,14 @@ pub mod config_service_client { F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< - http::Request, + http::Request, Response = http::Response< - >::ResponseBody, + >::ResponseBody, >, >, , - >>::Error: Into + Send + Sync, + http::Request, + >>::Error: Into + std::marker::Send + std::marker::Sync, { ConfigServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -2537,12 +2535,11 @@ pub mod config_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/evnode.v1.ConfigService/GetNamespace", ); @@ -2554,12 +2551,19 @@ pub mod config_service_client { } } /// Generated server implementations. +#[cfg(feature = "grpc")] pub mod config_service_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; /// Generated trait containing gRPC methods that should be implemented for use with ConfigServiceServer. #[async_trait] - pub trait ConfigService: Send + Sync + 'static { + pub trait ConfigService: std::marker::Send + std::marker::Sync + 'static { /// GetNamespace returns the namespace for this network async fn get_namespace( &self, @@ -2571,20 +2575,18 @@ pub mod config_service_server { } /// StoreService defines the RPC service for the store package #[derive(Debug)] - pub struct ConfigServiceServer { - inner: _Inner, + pub struct ConfigServiceServer { + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); - impl ConfigServiceServer { + impl ConfigServiceServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -2634,10 +2636,10 @@ pub mod config_service_server { impl tonic::codegen::Service> for ConfigServiceServer where T: ConfigService, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, { - type Response = http::Response; + type Response = http::Response; type Error = std::convert::Infallible; type Future = BoxFuture; fn poll_ready( @@ -2647,7 +2649,6 @@ pub mod config_service_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/evnode.v1.ConfigService/GetNamespace" => { #[allow(non_camel_case_types)] @@ -2673,9 +2674,8 @@ pub mod config_service_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetNamespaceSvc(inner); - let codec = tonic::codec::ProstCodec::default(); + let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( accept_compression_encodings, @@ -2692,20 +2692,27 @@ pub mod config_service_server { } _ => { Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) }) } } } } - impl Clone for ConfigServiceServer { + impl Clone for ConfigServiceServer { fn clone(&self) -> Self { let inner = self.inner.clone(); Self { @@ -2717,17 +2724,9 @@ pub mod config_service_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::server::NamedService for ConfigServiceServer { - const NAME: &'static str = "evnode.v1.ConfigService"; + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "evnode.v1.ConfigService"; + impl tonic::server::NamedService for ConfigServiceServer { + const NAME: &'static str = SERVICE_NAME; } } diff --git a/client/crates/types/tests/compression_test.rs b/client/crates/types/tests/compression_test.rs new file mode 100644 index 0000000000..a32f75ed41 --- /dev/null +++ b/client/crates/types/tests/compression_test.rs @@ -0,0 +1,106 @@ +//! Comprehensive tests for blob decompression +//! These tests match the behavior of the Go implementation + +use ev_types::{decompress_blob, get_compression_info, CompressionError}; + +#[test] +fn test_legacy_blobs() { + // Test with legacy blob (no compression header) + let legacy_blob = b"legacy data without header"; + + // Should return as-is + let decompressed = decompress_blob(legacy_blob).unwrap(); + assert_eq!(legacy_blob, decompressed.as_ref()); +} + +#[test] +fn test_invalid_compression_flag() { + // Create blob with invalid flag + let mut invalid_blob = vec![0u8; 9 + 10]; // COMPRESSION_HEADER_SIZE + 10 + invalid_blob[0] = 0xFF; // Invalid flag + + // Should return error + let result = decompress_blob(&invalid_blob); + assert!(result.is_err()); + + if let Err(CompressionError::InvalidCompressionFlag { flag }) = result { + assert_eq!(flag, 0xFF); + } else { + panic!("Expected InvalidCompressionFlag error"); + } +} + +#[test] +fn test_uncompressed_with_header() { + // Create a blob with uncompressed header + let original_data = b"test data"; + let mut blob = Vec::with_capacity(9 + original_data.len()); + + // Add header (flag + 8 bytes for size) + blob.push(0x00); // FLAG_UNCOMPRESSED + blob.extend_from_slice(&(original_data.len() as u64).to_le_bytes()); + blob.extend_from_slice(original_data); + + // Decompress + let decompressed = decompress_blob(&blob).unwrap(); + assert_eq!(original_data, decompressed.as_ref()); +} + +#[test] +fn test_compression_info() { + // Test with uncompressed data + let original_data = b"test data"; + let mut blob = Vec::new(); + blob.push(0x00); // FLAG_UNCOMPRESSED + blob.extend_from_slice(&(original_data.len() as u64).to_le_bytes()); + blob.extend_from_slice(original_data); + + let info = get_compression_info(&blob); + assert!(!info.is_compressed); + assert_eq!(info.algorithm, "none"); + assert_eq!(info.original_size, original_data.len() as u64); +} + +#[test] +fn test_empty_blob() { + let empty = vec![]; + + // Should handle empty blob gracefully + let decompressed = decompress_blob(&empty).unwrap(); + assert_eq!(empty, decompressed.as_ref()); +} + +#[test] +fn test_legacy_blob_heuristics() { + // Test various legacy blobs that should be detected + let legacy_blobs = vec![ + b"plain text data".to_vec(), + b"JSON: {\"key\": \"value\"}".to_vec(), + b"log entry: 2024-01-01 00:00:00 INFO message".to_vec(), + ]; + + for blob in legacy_blobs { + // Ensure blob is large enough to potentially have a header + let mut padded_blob = blob.clone(); + while padded_blob.len() < 20 { + padded_blob.push(b' '); + } + + let decompressed = decompress_blob(&padded_blob).unwrap(); + assert_eq!(padded_blob, decompressed.as_ref()); + } +} + +#[test] +fn test_corrupted_blob_detection() { + // Create a blob that looks like it has a header but is corrupted + let mut corrupted = vec![0u8; 20]; + corrupted[0] = 0xAB; // Invalid flag that's not ASCII + // Set a reasonable size that suggests this was meant to be compressed + let size_bytes = 1000u64.to_le_bytes(); + corrupted[1..9].copy_from_slice(&size_bytes); + + // Should detect as corrupted + let result = decompress_blob(&corrupted); + assert!(result.is_err()); +} diff --git a/da/cmd/local-da/local.go b/da/cmd/local-da/local.go index aa10124317..f0f4c14123 100644 --- a/da/cmd/local-da/local.go +++ b/da/cmd/local-da/local.go @@ -7,6 +7,7 @@ import ( "crypto/rand" "crypto/sha256" "encoding/binary" + "encoding/hex" "errors" "fmt" "sync" @@ -189,7 +190,7 @@ func (d *LocalDA) SubmitWithOptions(ctx context.Context, blobs []coreda.Blob, ga d.logger.Error().Err(err).Msg("SubmitWithOptions: invalid namespace") return nil, err } - d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", string(ns)).Msg("SubmitWithOptions called") + d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("SubmitWithOptions called") // Validate blob sizes before processing for i, blob := range blobs { @@ -219,7 +220,7 @@ func (d *LocalDA) Submit(ctx context.Context, blobs []coreda.Blob, gasPrice floa d.logger.Error().Err(err).Msg("Submit: invalid namespace") return nil, err } - d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", string(ns)).Msg("Submit called") + d.logger.Info().Int("numBlobs", len(blobs)).Float64("gasPrice", gasPrice).Str("namespace", hex.EncodeToString(ns)).Msg("Submit called") // Validate blob sizes before processing for i, blob := range blobs { diff --git a/da/compression/benchmark_test.go b/da/compression/benchmark_test.go new file mode 100644 index 0000000000..a34f1d6716 --- /dev/null +++ b/da/compression/benchmark_test.go @@ -0,0 +1,440 @@ +package compression + +import ( + "crypto/rand" + "encoding/json" + "fmt" + "testing" + + "github.com/evstack/ev-node/core/da" + "github.com/stretchr/testify/require" +) + +// TestLargeBlobCompressionEfficiency tests compression efficiency for blob sizes from 20KB to 2MB +func TestLargeBlobCompressionEfficiency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping large blob compression test in short mode") + } + + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + // Test sizes: 20KB, 50KB, 100KB, 200KB, 500KB, 1MB, 2MB + testSizes := []int{ + 20 * 1024, // 20KB + 50 * 1024, // 50KB + 100 * 1024, // 100KB + 200 * 1024, // 200KB + 500 * 1024, // 500KB + 1024 * 1024, // 1MB + 2048 * 1024, // 2MB + } + + dataTypes := []struct { + name string + generator func(size int) da.Blob + }{ + { + name: "Repetitive", + generator: generateRepetitiveData, + }, + { + name: "JSON", + generator: generateJSONData, + }, + { + name: "Text", + generator: generateTextData, + }, + { + name: "Binary", + generator: generateBinaryData, + }, + { + name: "Random", + generator: generateRandomData, + }, + } + + fmt.Printf("\n=== Blob Compression Efficiency Test ===\n") + fmt.Printf("%-15s %-10s %-12s %-12s %-10s %-15s\n", + "Data Type", "Size", "Compressed", "Saved", "Ratio", "Compression") + fmt.Printf("%-15s %-10s %-12s %-12s %-10s %-15s\n", + "---------", "----", "----------", "-----", "-----", "-----------") + + for _, dt := range dataTypes { + for _, size := range testSizes { + data := dt.generator(size) + compressed, err := compressor.compressBlob(data) + require.NoError(t, err) + + info := GetCompressionInfo(compressed) + + var saved string + var compressionStatus string + + if info.IsCompressed { + savedPercent := (1.0 - info.CompressionRatio) * 100 + saved = fmt.Sprintf("%.1f%%", savedPercent) + compressionStatus = "Yes" + } else { + saved = "0%" + compressionStatus = "No (inefficient)" + } + + fmt.Printf("%-15s %-10s %-12s %-12s %-10.3f %-15s\n", + dt.name, + formatSize(size), + formatSize(int(info.CompressedSize)), + saved, + info.CompressionRatio, + compressionStatus, + ) + + // Verify decompression works correctly + decompressed, err := compressor.decompressBlob(compressed) + require.NoError(t, err) + require.Equal(t, data, decompressed, "Decompressed data should match original") + } + fmt.Println() // Add spacing between data types + } +} + +// BenchmarkLargeBlobCompression benchmarks compression performance for large blobs +func BenchmarkLargeBlobCompression(b *testing.B) { + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + if err != nil { + b.Fatal(err) + } + defer compressor.Close() + + benchmarkSizes := []int{ + 20 * 1024, // 20KB + 100 * 1024, // 100KB + 500 * 1024, // 500KB + 1024 * 1024, // 1MB + 2048 * 1024, // 2MB + } + + for _, size := range benchmarkSizes { + // Benchmark with different data types + b.Run(fmt.Sprintf("JSON_%s", formatSize(size)), func(b *testing.B) { + data := generateJSONData(size) + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := compressor.compressBlob(data) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run(fmt.Sprintf("Text_%s", formatSize(size)), func(b *testing.B) { + data := generateTextData(size) + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := compressor.compressBlob(data) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run(fmt.Sprintf("Binary_%s", formatSize(size)), func(b *testing.B) { + data := generateBinaryData(size) + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := compressor.compressBlob(data) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkLargeBlobDecompression benchmarks decompression performance +func BenchmarkLargeBlobDecompression(b *testing.B) { + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + if err != nil { + b.Fatal(err) + } + defer compressor.Close() + + benchmarkSizes := []int{ + 20 * 1024, // 20KB + 100 * 1024, // 100KB + 500 * 1024, // 500KB + 1024 * 1024, // 1MB + 2048 * 1024, // 2MB + } + + for _, size := range benchmarkSizes { + // Pre-compress data for decompression benchmark + jsonData := generateJSONData(size) + compressedJSON, _ := compressor.compressBlob(jsonData) + + textData := generateTextData(size) + compressedText, _ := compressor.compressBlob(textData) + + binaryData := generateBinaryData(size) + compressedBinary, _ := compressor.compressBlob(binaryData) + + b.Run(fmt.Sprintf("JSON_%s", formatSize(size)), func(b *testing.B) { + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := compressor.decompressBlob(compressedJSON) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run(fmt.Sprintf("Text_%s", formatSize(size)), func(b *testing.B) { + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := compressor.decompressBlob(compressedText) + if err != nil { + b.Fatal(err) + } + } + }) + + b.Run(fmt.Sprintf("Binary_%s", formatSize(size)), func(b *testing.B) { + b.SetBytes(int64(size)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := compressor.decompressBlob(compressedBinary) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// TestCompressionThresholds tests the MinCompressionRatio threshold behavior +func TestCompressionThresholds(t *testing.T) { + testCases := []struct { + name string + minCompressionRatio float64 + dataSize int + dataType func(int) da.Blob + expectCompressed bool + }{ + { + name: "High_Threshold_Repetitive_Data", + minCompressionRatio: 0.5, // Require 50% savings + dataSize: 100 * 1024, + dataType: generateRepetitiveData, + expectCompressed: true, // Repetitive data should achieve >50% savings + }, + { + name: "High_Threshold_Random_Data", + minCompressionRatio: 0.5, + dataSize: 100 * 1024, + dataType: generateRandomData, + expectCompressed: false, // Random data won't achieve 50% savings + }, + { + name: "Default_Threshold_JSON", + minCompressionRatio: 0.1, // Default 10% savings + dataSize: 500 * 1024, + dataType: generateJSONData, + expectCompressed: true, // JSON should achieve >10% savings + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config := Config{ + Enabled: true, + ZstdLevel: 3, + MinCompressionRatio: tc.minCompressionRatio, + } + + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + data := tc.dataType(tc.dataSize) + compressed, err := compressor.compressBlob(data) + require.NoError(t, err) + + info := GetCompressionInfo(compressed) + + if tc.expectCompressed { + require.True(t, info.IsCompressed, + "Expected data to be compressed with threshold %.2f, but it wasn't. Ratio: %.3f", + tc.minCompressionRatio, info.CompressionRatio) + } else { + require.False(t, info.IsCompressed, + "Expected data to NOT be compressed with threshold %.2f, but it was. Ratio: %.3f", + tc.minCompressionRatio, info.CompressionRatio) + } + }) + } +} + +// Data generation functions +func generateRepetitiveData(size int) da.Blob { + pattern := []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + data := make([]byte, 0, size) + for len(data) < size { + remaining := size - len(data) + if remaining >= len(pattern) { + data = append(data, pattern...) + } else { + data = append(data, pattern[:remaining]...) + } + } + return data +} + +func generateJSONData(size int) da.Blob { + // Generate realistic JSON data with nested structures + type Record struct { + ID int `json:"id"` + Name string `json:"name"` + Email string `json:"email"` + Active bool `json:"active"` + Score float64 `json:"score"` + Tags []string `json:"tags"` + Metadata map[string]interface{} `json:"metadata"` + Description string `json:"description"` + } + + records := make([]Record, 0) + currentSize := 0 + id := 0 + + for currentSize < size { + record := Record{ + ID: id, + Name: fmt.Sprintf("User_%d", id), + Email: fmt.Sprintf("user%d@example.com", id), + Active: id%2 == 0, + Score: float64(id) * 1.5, + Tags: []string{"tag1", "tag2", fmt.Sprintf("tag_%d", id)}, + Metadata: map[string]interface{}{ + "created_at": "2024-01-01", + "updated_at": "2024-01-02", + "version": id, + }, + Description: fmt.Sprintf("This is a description for record %d with some repetitive content to simulate real data", id), + } + + records = append(records, record) + + // Estimate size + tempData, _ := json.Marshal(records) + currentSize = len(tempData) + id++ + + if currentSize >= size { + break + } + } + + data, _ := json.Marshal(records) + if len(data) > size { + data = data[:size] + } + return data +} + +func generateTextData(size int) da.Blob { + // Generate natural language text + sentences := []string{ + "The quick brown fox jumps over the lazy dog.", + "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", + "In a hole in the ground there lived a hobbit.", + "It was the best of times, it was the worst of times.", + "To be or not to be, that is the question.", + "All happy families are alike; each unhappy family is unhappy in its own way.", + "It is a truth universally acknowledged that a single man in possession of a good fortune must be in want of a wife.", + "The sun did not shine, it was too wet to play.", + } + + data := make([]byte, 0, size) + sentenceIndex := 0 + + for len(data) < size { + sentence := sentences[sentenceIndex%len(sentences)] + if len(data)+len(sentence)+1 <= size { + if len(data) > 0 { + data = append(data, ' ') + } + data = append(data, sentence...) + } else { + remaining := size - len(data) + if remaining > 0 { + data = append(data, sentence[:remaining]...) + } + break + } + sentenceIndex++ + } + + return data +} + +func generateBinaryData(size int) da.Blob { + // Generate semi-structured binary data (like compiled code or encrypted data) + data := make([]byte, size) + + // Add some structure to make it somewhat compressible + for i := 0; i < size; i += 256 { + // Header-like structure + if i+4 <= size { + data[i] = 0xDE + data[i+1] = 0xAD + data[i+2] = 0xBE + data[i+3] = 0xEF + } + + // Some repetitive patterns + for j := 4; j < 128 && i+j < size; j++ { + data[i+j] = byte(j % 256) + } + + // Some random data + if i+128 < size { + randSection := make([]byte, min(128, size-i-128)) + rand.Read(randSection) + copy(data[i+128:], randSection) + } + } + + return data +} + +func generateRandomData(size int) da.Blob { + data := make([]byte, size) + rand.Read(data) + return data +} + +func formatSize(bytes int) string { + if bytes < 1024 { + return fmt.Sprintf("%dB", bytes) + } else if bytes < 1024*1024 { + return fmt.Sprintf("%dKB", bytes/1024) + } + return fmt.Sprintf("%.1fMB", float64(bytes)/1024/1024) +} diff --git a/da/compression/compression.go b/da/compression/compression.go new file mode 100644 index 0000000000..6417bb5f09 --- /dev/null +++ b/da/compression/compression.go @@ -0,0 +1,558 @@ +package compression + +import ( + "encoding/binary" + "errors" + "fmt" + "sync" + + "github.com/evstack/ev-node/core/da" + "github.com/klauspost/compress/zstd" +) + +// Compression constants +const ( + // CompressionHeaderSize is the size of the compression metadata header + CompressionHeaderSize = 9 // 1 byte flags + 8 bytes original size + + // Compression levels + DefaultZstdLevel = 3 + + // Flags + FlagUncompressed = 0x00 + FlagZstd = 0x01 + + // Default minimum compression ratio threshold (10% savings) + DefaultMinCompressionRatio = 0.1 +) + +var ( + ErrInvalidHeader = errors.New("invalid compression header") + ErrInvalidCompressionFlag = errors.New("invalid compression flag") + ErrDecompressionFailed = errors.New("decompression failed") +) + +// Config holds compression configuration +type Config struct { + // Enabled controls whether compression is active + Enabled bool + + // ZstdLevel is the compression level for zstd (1-22, default 3) + ZstdLevel int + + // MinCompressionRatio is the minimum compression ratio required to store compressed data + // If compression doesn't achieve this ratio, original data is stored uncompressed + MinCompressionRatio float64 +} + +// DefaultConfig returns a configuration optimized for zstd level 3 +func DefaultConfig() Config { + return Config{ + Enabled: true, + ZstdLevel: DefaultZstdLevel, + MinCompressionRatio: DefaultMinCompressionRatio, + } +} + +// Global sync.Pools for encoder/decoder reuse +var ( + encoderPools map[int]*sync.Pool + decoderPool *sync.Pool + poolsOnce sync.Once +) + +// initPools initializes the encoder and decoder pools +func initPools() { + poolsOnce.Do(func() { + // Create encoder pools for different compression levels + encoderPools = make(map[int]*sync.Pool) + + // Pre-create pools for common compression levels (1-9) + for level := 1; level <= 9; level++ { + lvl := level // Capture loop variable + encoderPools[lvl] = &sync.Pool{ + New: func() interface{} { + encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(lvl))) + if err != nil { + // This should not happen with valid levels + panic(fmt.Sprintf("failed to create zstd encoder with level %d: %v", lvl, err)) + } + return encoder + }, + } + } + + // Create decoder pool + decoderPool = &sync.Pool{ + New: func() interface{} { + decoder, err := zstd.NewReader(nil) + if err != nil { + // This should not happen + panic(fmt.Sprintf("failed to create zstd decoder: %v", err)) + } + return decoder + }, + } + }) +} + +// getEncoder retrieves an encoder from the pool for the specified compression level +func getEncoder(level int) *zstd.Encoder { + initPools() + + pool, exists := encoderPools[level] + if !exists { + // Create a new pool for this level if it doesn't exist + pool = &sync.Pool{ + New: func() interface{} { + encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level))) + if err != nil { + panic(fmt.Sprintf("failed to create zstd encoder with level %d: %v", level, err)) + } + return encoder + }, + } + encoderPools[level] = pool + } + + return pool.Get().(*zstd.Encoder) +} + +// putEncoder returns an encoder to the pool +func putEncoder(encoder *zstd.Encoder, level int) { + if encoder == nil { + return + } + + // Reset the encoder for reuse + encoder.Reset(nil) + + if pool, exists := encoderPools[level]; exists { + pool.Put(encoder) + } +} + +// getDecoder retrieves a decoder from the pool +func getDecoder() *zstd.Decoder { + initPools() + return decoderPool.Get().(*zstd.Decoder) +} + +// putDecoder returns a decoder to the pool +func putDecoder(decoder *zstd.Decoder) { + if decoder == nil { + return + } + + // Reset the decoder for reuse + decoder.Reset(nil) + decoderPool.Put(decoder) +} + +// CompressibleDA wraps a DA implementation to add transparent compression support +type CompressibleDA struct { + config Config + encoder *zstd.Encoder + decoder *zstd.Decoder +} + +// NewCompressibleDA creates a new CompressibleDA wrapper +func NewCompressibleDA(baseDA da.DA, config Config) (*CompressibleDA, error) { + // Allow nil baseDA for testing purposes (when only using compression functions) + // The baseDA will only be used when calling Submit, Get, GetIDs methods + + var encoder *zstd.Encoder + var decoder *zstd.Decoder + var err error + + if config.Enabled { + // Create zstd encoder with specified level + encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(config.ZstdLevel))) + if err != nil { + return nil, fmt.Errorf("failed to create zstd encoder: %w", err) + } + + // Create zstd decoder + decoder, err = zstd.NewReader(nil) + if err != nil { + encoder.Close() + return nil, fmt.Errorf("failed to create zstd decoder: %w", err) + } + } + + return &CompressibleDA{ + config: config, + encoder: encoder, + decoder: decoder, + }, nil +} + +// Close cleans up compression resources +func (c *CompressibleDA) Close() error { + if c.encoder != nil { + c.encoder.Close() + } + if c.decoder != nil { + c.decoder.Close() + } + return nil +} + +// compressBlob compresses a single blob using zstd +func (c *CompressibleDA) compressBlob(blob da.Blob) (da.Blob, error) { + if !c.config.Enabled || len(blob) == 0 { + return c.addCompressionHeader(blob, FlagUncompressed, uint64(len(blob))), nil + } + + // Compress the blob + compressed := c.encoder.EncodeAll(blob, make([]byte, 0, len(blob))) + + // Check if compression is beneficial + compressionRatio := float64(len(compressed)) / float64(len(blob)) + if compressionRatio > (1.0 - c.config.MinCompressionRatio) { + // Compression not beneficial, store uncompressed + return c.addCompressionHeader(blob, FlagUncompressed, uint64(len(blob))), nil + } + + return c.addCompressionHeader(compressed, FlagZstd, uint64(len(blob))), nil +} + +// decompressBlob decompresses a single blob +func (c *CompressibleDA) decompressBlob(compressedBlob da.Blob) (da.Blob, error) { + if len(compressedBlob) < CompressionHeaderSize { + // Assume legacy uncompressed blob + return compressedBlob, nil + } + + // Check if this could be a compressed blob with a valid header + flag := compressedBlob[0] + if flag != FlagUncompressed && flag != FlagZstd { + // This could be either: + // 1. A legacy blob without any header (most likely) + // 2. A corrupted blob with an invalid header + // + // For better heuristics, check if the bytes look like a valid header structure: + // - If flag is way outside expected range (e.g., printable ASCII for text), likely legacy + // - If the size field has a reasonable value for compressed data, likely corrupted header + originalSize := binary.LittleEndian.Uint64(compressedBlob[1:9]) + + // Heuristic: If flag is in printable ASCII range (32-126) and size is unreasonable, + // it's likely a legacy text blob. Otherwise, if flag is outside normal range (like 0xFF), + // it's likely a corrupted header. + if (flag >= 32 && flag <= 126) && (originalSize == 0 || originalSize > uint64(len(compressedBlob)*100)) { + // Likely a legacy blob (starts with printable text) + return compressedBlob, nil + } + + // Otherwise, it's likely a corrupted compressed blob or intentionally invalid + return nil, fmt.Errorf("%w: flag %d", ErrInvalidCompressionFlag, flag) + } + + // Valid flag, proceed with normal parsing + flag, originalSize, payload, err := c.parseCompressionHeader(compressedBlob) + if err != nil { + + return compressedBlob, nil + } + + switch flag { + case FlagUncompressed: + return payload, nil + case FlagZstd: + if !c.config.Enabled { + return nil, errors.New("received compressed blob but compression is disabled") + } + + decompressed, err := c.decoder.DecodeAll(payload, make([]byte, 0, originalSize)) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrDecompressionFailed, err) + } + + if uint64(len(decompressed)) != originalSize { + return nil, fmt.Errorf("decompressed size mismatch: expected %d, got %d", originalSize, len(decompressed)) + } + + return decompressed, nil + default: + return nil, fmt.Errorf("%w: flag %d", ErrInvalidCompressionFlag, flag) + } +} + +// addCompressionHeader adds compression metadata to the blob +func (c *CompressibleDA) addCompressionHeader(payload da.Blob, flag uint8, originalSize uint64) da.Blob { + // Single allocation for header + payload + result := make([]byte, CompressionHeaderSize+len(payload)) + + // Write header directly into result + result[0] = flag + binary.LittleEndian.PutUint64(result[1:9], originalSize) + + // Copy payload + copy(result[CompressionHeaderSize:], payload) + + return result +} + +// parseCompressionHeader extracts compression metadata from a blob +func (c *CompressibleDA) parseCompressionHeader(blob da.Blob) (uint8, uint64, da.Blob, error) { + if len(blob) < CompressionHeaderSize { + return 0, 0, nil, ErrInvalidHeader + } + + flag := blob[0] + originalSize := binary.LittleEndian.Uint64(blob[1:9]) + payload := blob[CompressionHeaderSize:] + + // Validate the compression flag + if flag != FlagUncompressed && flag != FlagZstd { + return 0, 0, nil, fmt.Errorf("%w: flag %d", ErrInvalidCompressionFlag, flag) + } + + return flag, originalSize, payload, nil +} + +// Helper functions for external use + +// Package-level compressor for efficient helper function usage +var ( + helperCompressor *HelperCompressor + helperOnce sync.Once +) + +// HelperCompressor provides efficient compression/decompression for helper functions +type HelperCompressor struct { + encoder *zstd.Encoder + decoder *zstd.Decoder + config Config + mu sync.Mutex // Protects encoder/decoder usage +} + +// getHelperCompressor returns a singleton helper compressor instance +func getHelperCompressor() *HelperCompressor { + helperOnce.Do(func() { + config := DefaultConfig() + encoder, _ := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(config.ZstdLevel))) + decoder, _ := zstd.NewReader(nil) + helperCompressor = &HelperCompressor{ + encoder: encoder, + decoder: decoder, + config: config, + } + }) + return helperCompressor +} + +// CompressBlob compresses a blob using the default zstd level 3 configuration +func CompressBlob(blob da.Blob) (da.Blob, error) { + helper := getHelperCompressor() + helper.mu.Lock() + defer helper.mu.Unlock() + + if !helper.config.Enabled || len(blob) == 0 { + // Return with uncompressed header + return addCompressionHeaderStandalone(blob, FlagUncompressed, uint64(len(blob))), nil + } + + // Compress the blob using the shared encoder + compressed := helper.encoder.EncodeAll(blob, make([]byte, 0, len(blob))) + + // Check if compression is beneficial + compressionRatio := float64(len(compressed)) / float64(len(blob)) + if compressionRatio > (1.0 - helper.config.MinCompressionRatio) { + // Compression not beneficial, store uncompressed + return addCompressionHeaderStandalone(blob, FlagUncompressed, uint64(len(blob))), nil + } + + return addCompressionHeaderStandalone(compressed, FlagZstd, uint64(len(blob))), nil +} + +// DecompressBlob decompresses a blob +func DecompressBlob(compressedBlob da.Blob) (da.Blob, error) { + if len(compressedBlob) < CompressionHeaderSize { + // Assume legacy uncompressed blob + return compressedBlob, nil + } + + flag, originalSize, payload, err := parseCompressionHeaderStandalone(compressedBlob) + if err != nil { + // Assume legacy uncompressed blob + return compressedBlob, nil + } + + switch flag { + case FlagUncompressed: + return payload, nil + case FlagZstd: + helper := getHelperCompressor() + helper.mu.Lock() + defer helper.mu.Unlock() + + decompressed, err := helper.decoder.DecodeAll(payload, make([]byte, 0, originalSize)) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrDecompressionFailed, err) + } + + if uint64(len(decompressed)) != originalSize { + return nil, fmt.Errorf("decompressed size mismatch: expected %d, got %d", originalSize, len(decompressed)) + } + + return decompressed, nil + default: + return nil, fmt.Errorf("unsupported compression flag: %d", flag) + } +} + +// Standalone helper functions for use without CompressibleDA instance + +// addCompressionHeaderStandalone adds compression metadata header to data +func addCompressionHeaderStandalone(data []byte, flag uint8, originalSize uint64) []byte { + // Single allocation for header + data + result := make([]byte, CompressionHeaderSize+len(data)) + + // Write header directly into result + result[0] = flag + binary.LittleEndian.PutUint64(result[1:9], originalSize) + + // Copy data + copy(result[CompressionHeaderSize:], data) + + return result +} + +// parseCompressionHeaderStandalone parses compression metadata from blob +func parseCompressionHeaderStandalone(blob []byte) (flag uint8, originalSize uint64, payload []byte, err error) { + if len(blob) < CompressionHeaderSize { + return 0, 0, nil, errors.New("blob too small for compression header") + } + + flag = blob[0] + originalSize = binary.LittleEndian.Uint64(blob[1:9]) + payload = blob[CompressionHeaderSize:] + + return flag, originalSize, payload, nil +} + +// CompressionInfo provides information about a blob's compression +type CompressionInfo struct { + IsCompressed bool + Algorithm string + OriginalSize uint64 + CompressedSize uint64 + CompressionRatio float64 +} + +// CompressBatch compresses multiple blobs efficiently without repeated pool access +func CompressBatch(blobs []da.Blob) ([]da.Blob, error) { + if len(blobs) == 0 { + return blobs, nil + } + + helper := getHelperCompressor() + helper.mu.Lock() + defer helper.mu.Unlock() + + compressed := make([]da.Blob, len(blobs)) + for i, blob := range blobs { + if !helper.config.Enabled || len(blob) == 0 { + compressed[i] = addCompressionHeaderStandalone(blob, FlagUncompressed, uint64(len(blob))) + continue + } + + // Compress the blob using the shared encoder + compressedData := helper.encoder.EncodeAll(blob, make([]byte, 0, len(blob))) + + // Check if compression is beneficial + compressionRatio := float64(len(compressedData)) / float64(len(blob)) + if compressionRatio > (1.0 - helper.config.MinCompressionRatio) { + // Compression not beneficial, store uncompressed + compressed[i] = addCompressionHeaderStandalone(blob, FlagUncompressed, uint64(len(blob))) + } else { + compressed[i] = addCompressionHeaderStandalone(compressedData, FlagZstd, uint64(len(blob))) + } + } + + return compressed, nil +} + +// DecompressBatch decompresses multiple blobs efficiently without repeated pool access +func DecompressBatch(compressedBlobs []da.Blob) ([]da.Blob, error) { + if len(compressedBlobs) == 0 { + return compressedBlobs, nil + } + + helper := getHelperCompressor() + helper.mu.Lock() + defer helper.mu.Unlock() + + decompressed := make([]da.Blob, len(compressedBlobs)) + for i, compressedBlob := range compressedBlobs { + if len(compressedBlob) < CompressionHeaderSize { + // Assume legacy uncompressed blob + decompressed[i] = compressedBlob + continue + } + + flag, originalSize, payload, err := parseCompressionHeaderStandalone(compressedBlob) + if err != nil { + // Assume legacy uncompressed blob + decompressed[i] = compressedBlob + continue + } + + switch flag { + case FlagUncompressed: + decompressed[i] = payload + case FlagZstd: + decompressedData, err := helper.decoder.DecodeAll(payload, make([]byte, 0, originalSize)) + if err != nil { + return nil, fmt.Errorf("failed to decompress blob at index %d: %w", i, err) + } + + if uint64(len(decompressedData)) != originalSize { + return nil, fmt.Errorf("decompressed size mismatch at index %d: expected %d, got %d", + i, originalSize, len(decompressedData)) + } + + decompressed[i] = decompressedData + default: + return nil, fmt.Errorf("unsupported compression flag at index %d: %d", i, flag) + } + } + + return decompressed, nil +} + +// GetCompressionInfo analyzes a blob to determine its compression status +func GetCompressionInfo(blob da.Blob) CompressionInfo { + info := CompressionInfo{ + IsCompressed: false, + Algorithm: "none", + OriginalSize: uint64(len(blob)), + CompressedSize: uint64(len(blob)), + } + + if len(blob) < CompressionHeaderSize { + return info + } + + flag := blob[0] + originalSize := binary.LittleEndian.Uint64(blob[1:9]) + payloadSize := uint64(len(blob) - CompressionHeaderSize) + + switch flag { + case FlagZstd: + info.IsCompressed = true + info.Algorithm = "zstd" + info.OriginalSize = originalSize + info.CompressedSize = payloadSize + if originalSize > 0 { + info.CompressionRatio = float64(payloadSize) / float64(originalSize) + } + case FlagUncompressed: + info.Algorithm = "none" + info.OriginalSize = originalSize + info.CompressedSize = payloadSize + } + + return info +} diff --git a/da/compression/compression_test.go b/da/compression/compression_test.go new file mode 100644 index 0000000000..846e35f643 --- /dev/null +++ b/da/compression/compression_test.go @@ -0,0 +1,239 @@ +package compression + +import ( + "bytes" + "context" + "crypto/rand" + "testing" + "time" + + "github.com/evstack/ev-node/core/da" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockDA implements a simple in-memory DA for testing +type mockDA struct { + blobs map[string]da.Blob + ids []da.ID +} + +func newMockDA() *mockDA { + return &mockDA{ + blobs: make(map[string]da.Blob), + ids: make([]da.ID, 0), + } +} + +func (m *mockDA) Get(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Blob, error) { + blobs := make([]da.Blob, len(ids)) + for i, id := range ids { + blob, exists := m.blobs[string(id)] + if !exists { + return nil, da.ErrBlobNotFound + } + blobs[i] = blob + } + return blobs, nil +} + +func (m *mockDA) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte) ([]da.ID, error) { + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + id := da.ID([]byte{byte(len(m.ids))}) + m.blobs[string(id)] = blob + m.ids = append(m.ids, id) + ids[i] = id + } + return ids, nil +} + +func (m *mockDA) SubmitWithOptions(ctx context.Context, blobs []da.Blob, gasPrice float64, namespace []byte, options []byte) ([]da.ID, error) { + return m.Submit(ctx, blobs, gasPrice, namespace) +} + +func (m *mockDA) GetIDs(ctx context.Context, height uint64, namespace []byte) (*da.GetIDsResult, error) { + return &da.GetIDsResult{IDs: m.ids, Timestamp: time.Now()}, nil +} + +func (m *mockDA) GetProofs(ctx context.Context, ids []da.ID, namespace []byte) ([]da.Proof, error) { + proofs := make([]da.Proof, len(ids)) + for i := range ids { + proofs[i] = da.Proof("mock_proof") + } + return proofs, nil +} + +func (m *mockDA) Commit(ctx context.Context, blobs []da.Blob, namespace []byte) ([]da.Commitment, error) { + commitments := make([]da.Commitment, len(blobs)) + for i, blob := range blobs { + commitments[i] = da.Commitment(blob[:min(len(blob), 32)]) + } + return commitments, nil +} + +func (m *mockDA) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, namespace []byte) ([]bool, error) { + results := make([]bool, len(ids)) + for i := range ids { + results[i] = true + } + return results, nil +} + +func (m *mockDA) GasPrice(ctx context.Context) (float64, error) { + return 1.0, nil +} + +func (m *mockDA) GasMultiplier(ctx context.Context) (float64, error) { + return 1.0, nil +} + +func TestCompression_ZstdLevel3(t *testing.T) { + config := Config{ + Enabled: true, + ZstdLevel: 3, + MinCompressionRatio: 0.1, + } + + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + // Test with compressible data + originalData := bytes.Repeat([]byte("hello world "), 100) + + compressed, err := compressor.compressBlob(originalData) + require.NoError(t, err) + + // Check that compression header is present + require.GreaterOrEqual(t, len(compressed), CompressionHeaderSize) + + // Verify compression flag + flag := compressed[0] + assert.Equal(t, uint8(FlagZstd), flag) + + // Decompress and verify + decompressed, err := compressor.decompressBlob(compressed) + require.NoError(t, err) + assert.Equal(t, originalData, decompressed) +} + +func TestCompression_UncompressedFallback(t *testing.T) { + config := Config{ + Enabled: true, + ZstdLevel: 3, + MinCompressionRatio: 0.1, + } + + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + // Generate random data that won't compress well + randomData := make([]byte, 100) + _, err = rand.Read(randomData) + require.NoError(t, err) + + compressed, err := compressor.compressBlob(randomData) + require.NoError(t, err) + + // Should use uncompressed flag + flag := compressed[0] + assert.Equal(t, uint8(FlagUncompressed), flag) + + // Decompress and verify + decompressed, err := compressor.decompressBlob(compressed) + require.NoError(t, err) + assert.Equal(t, randomData, decompressed) +} + +func TestCompression_DisabledMode(t *testing.T) { + config := Config{ + Enabled: false, + ZstdLevel: 3, + MinCompressionRatio: 0.1, + } + + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + originalData := bytes.Repeat([]byte("test data "), 50) + + compressed, err := compressor.compressBlob(originalData) + require.NoError(t, err) + + // Should use uncompressed flag when disabled + flag := compressed[0] + assert.Equal(t, uint8(FlagUncompressed), flag) + + decompressed, err := compressor.decompressBlob(compressed) + require.NoError(t, err) + assert.Equal(t, originalData, decompressed) +} + +func TestCompression_LegacyBlobs(t *testing.T) { + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + // Test with legacy blob (no compression header) + legacyBlob := []byte("legacy data without header") + + // Should return as-is + decompressed, err := compressor.decompressBlob(legacyBlob) + require.NoError(t, err) + assert.Equal(t, legacyBlob, decompressed) +} + +func TestCompression_ErrorCases(t *testing.T) { + + t.Run("invalid compression flag", func(t *testing.T) { + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + // Create blob with invalid flag + invalidBlob := make([]byte, CompressionHeaderSize+10) + invalidBlob[0] = 0xFF // Invalid flag + + _, err = compressor.decompressBlob(invalidBlob) + assert.ErrorIs(t, err, ErrInvalidCompressionFlag) + }) +} + +func TestCompressionInfo(t *testing.T) { + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + // Test with compressible data + originalData := bytes.Repeat([]byte("compress me "), 100) + + compressed, err := compressor.compressBlob(originalData) + require.NoError(t, err) + + info := GetCompressionInfo(compressed) + assert.True(t, info.IsCompressed) + assert.Equal(t, "zstd", info.Algorithm) + assert.Equal(t, uint64(len(originalData)), info.OriginalSize) + assert.Less(t, info.CompressionRatio, 1.0) + assert.Greater(t, info.CompressionRatio, 0.0) +} + +func TestHelperFunctions(t *testing.T) { + originalData := bytes.Repeat([]byte("test "), 100) + + // Test standalone compress function + compressed, err := CompressBlob(originalData) + require.NoError(t, err) + + // Test standalone decompress function + decompressed, err := DecompressBlob(compressed) + require.NoError(t, err) + + assert.Equal(t, originalData, decompressed) +} diff --git a/da/compression/efficiency_test.go b/da/compression/efficiency_test.go new file mode 100644 index 0000000000..6a6ce2b4a3 --- /dev/null +++ b/da/compression/efficiency_test.go @@ -0,0 +1,237 @@ +package compression + +import ( + "bytes" + "testing" + "time" + + "github.com/evstack/ev-node/core/da" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestBatchCompression tests the batch compression functions +func TestBatchCompression(t *testing.T) { + // Create test data + testBlobs := []da.Blob{ + bytes.Repeat([]byte("compressible "), 100), // Should compress + bytes.Repeat([]byte("a"), 1000), // Highly compressible + []byte("small"), // Small blob + make([]byte, 100), // Random data (may not compress) + } + + // Fill random data + for i := range testBlobs[3] { + testBlobs[3][i] = byte(i * 7 % 256) + } + + t.Run("CompressBatch", func(t *testing.T) { + compressed, err := CompressBatch(testBlobs) + require.NoError(t, err) + require.Len(t, compressed, len(testBlobs)) + + // Verify each blob has a header + for i, blob := range compressed { + require.GreaterOrEqual(t, len(blob), CompressionHeaderSize, + "Blob %d should have compression header", i) + } + }) + + t.Run("RoundTrip", func(t *testing.T) { + // Compress + compressed, err := CompressBatch(testBlobs) + require.NoError(t, err) + + // Decompress + decompressed, err := DecompressBatch(compressed) + require.NoError(t, err) + require.Len(t, decompressed, len(testBlobs)) + + // Verify data integrity + for i, original := range testBlobs { + assert.Equal(t, original, decompressed[i], + "Blob %d should match after round trip", i) + } + }) + + t.Run("EmptyBatch", func(t *testing.T) { + compressed, err := CompressBatch([]da.Blob{}) + require.NoError(t, err) + require.Empty(t, compressed) + + decompressed, err := DecompressBatch([]da.Blob{}) + require.NoError(t, err) + require.Empty(t, decompressed) + }) + + t.Run("MixedCompressionResults", func(t *testing.T) { + compressed, err := CompressBatch(testBlobs) + require.NoError(t, err) + + // Check compression info for each blob + for i, blob := range compressed { + info := GetCompressionInfo(blob) + t.Logf("Blob %d: Compressed=%v, Ratio=%.3f", + i, info.IsCompressed, info.CompressionRatio) + } + }) +} + +// BenchmarkHelperEfficiency compares the performance of single vs batch operations +func BenchmarkHelperEfficiency(b *testing.B) { + // Create test data + numBlobs := 10 + testBlobs := make([]da.Blob, numBlobs) + for i := range testBlobs { + testBlobs[i] = bytes.Repeat([]byte("test data "), 100) + } + + b.Run("Single_Compress", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, blob := range testBlobs { + _, err := CompressBlob(blob) + if err != nil { + b.Fatal(err) + } + } + } + }) + + b.Run("Batch_Compress", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := CompressBatch(testBlobs) + if err != nil { + b.Fatal(err) + } + } + }) + + // Pre-compress for decompression benchmarks + compressedBlobs, _ := CompressBatch(testBlobs) + + b.Run("Single_Decompress", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, blob := range compressedBlobs { + _, err := DecompressBlob(blob) + if err != nil { + b.Fatal(err) + } + } + } + }) + + b.Run("Batch_Decompress", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := DecompressBatch(compressedBlobs) + if err != nil { + b.Fatal(err) + } + } + }) +} + +// TestHelperCompressorSingleton verifies the helper compressor is properly initialized +func TestHelperCompressorSingleton(t *testing.T) { + // Get helper instance + helper1 := getHelperCompressor() + require.NotNil(t, helper1) + require.NotNil(t, helper1.encoder) + require.NotNil(t, helper1.decoder) + + // Get again - should be same instance + helper2 := getHelperCompressor() + assert.Same(t, helper1, helper2, "Should return same singleton instance") +} + +// TestConcurrentHelperUsage tests thread safety of the helper compressor +func TestConcurrentHelperUsage(t *testing.T) { + testData := bytes.Repeat([]byte("concurrent test "), 50) + + // Run concurrent compressions + done := make(chan bool, 10) + for i := 0; i < 10; i++ { + go func() { + compressed, err := CompressBlob(testData) + require.NoError(t, err) + + decompressed, err := DecompressBlob(compressed) + require.NoError(t, err) + require.Equal(t, testData, decompressed) + + done <- true + }() + } + + // Wait for all goroutines + for i := 0; i < 10; i++ { + <-done + } +} + +// BenchmarkPoolOverhead measures the overhead of pool operations +func BenchmarkPoolOverhead(b *testing.B) { + b.Run("GetPut_Encoder", func(b *testing.B) { + initPools() + b.ResetTimer() + for i := 0; i < b.N; i++ { + encoder := getEncoder(DefaultZstdLevel) + putEncoder(encoder, DefaultZstdLevel) + } + }) + + b.Run("GetPut_Decoder", func(b *testing.B) { + initPools() + b.ResetTimer() + for i := 0; i < b.N; i++ { + decoder := getDecoder() + putDecoder(decoder) + } + }) + + b.Run("Helper_Lock_Unlock", func(b *testing.B) { + helper := getHelperCompressor() + b.ResetTimer() + for i := 0; i < b.N; i++ { + helper.mu.Lock() + helper.mu.Unlock() + } + }) +} + +// TestMemoryAllocationOptimization verifies the header allocation optimization +func TestMemoryAllocationOptimization(t *testing.T) { + testData := []byte("test data for header") + originalSize := uint64(len(testData)) + flag := uint8(FlagZstd) + + // Test instance method + config := DefaultConfig() + compressor, err := NewCompressibleDA(nil, config) + require.NoError(t, err) + defer compressor.Close() + + start := time.Now() + for i := 0; i < 10000; i++ { + _ = compressor.addCompressionHeader(testData, flag, originalSize) + } + instanceTime := time.Since(start) + + // Test standalone function + start = time.Now() + for i := 0; i < 10000; i++ { + _ = addCompressionHeaderStandalone(testData, flag, originalSize) + } + standaloneTime := time.Since(start) + + t.Logf("Instance method: %v", instanceTime) + t.Logf("Standalone function: %v", standaloneTime) + + // Both should be similarly fast with optimized allocation + ratio := float64(instanceTime) / float64(standaloneTime) + assert.InDelta(t, 1.0, ratio, 0.5, + "Both methods should have similar performance after optimization") +} \ No newline at end of file diff --git a/da/go.mod b/da/go.mod index de84e2e906..2d08d84aa4 100644 --- a/da/go.mod +++ b/da/go.mod @@ -6,6 +6,7 @@ replace github.com/evstack/ev-node/core => ../core require ( github.com/evstack/ev-node/core v0.0.0-20250312114929-104787ba1a4c + github.com/klauspost/compress v1.18.0 github.com/filecoin-project/go-jsonrpc v0.8.0 github.com/rs/zerolog v1.34.0 github.com/stretchr/testify v1.10.0 diff --git a/da/go.sum b/da/go.sum index 227252b6ed..87438d6d59 100644 --- a/da/go.sum +++ b/da/go.sum @@ -47,6 +47,8 @@ github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/ipfs/go-log/v2 v2.0.8 h1:3b3YNopMHlj4AvyhWAx0pDxqSQWYi4/WuWO7yRV6/Qg= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= diff --git a/da/jsonrpc/client.go b/da/jsonrpc/client.go index cba3574971..9327de2bc4 100644 --- a/da/jsonrpc/client.go +++ b/da/jsonrpc/client.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da/compression" internal "github.com/evstack/ev-node/da/jsonrpc/internal" ) @@ -21,11 +22,13 @@ type Module interface { // API defines the jsonrpc service module API type API struct { - Logger zerolog.Logger - MaxBlobSize uint64 - gasPrice float64 - gasMultiplier float64 - Internal struct { + Logger zerolog.Logger + MaxBlobSize uint64 + gasPrice float64 + gasMultiplier float64 + compressionEnabled bool + compressionConfig compression.Config + Internal struct { Get func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) `perm:"read"` GetIDs func(ctx context.Context, height uint64, ns []byte) (*da.GetIDsResult, error) `perm:"read"` GetProofs func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Proof, error) `perm:"read"` @@ -53,6 +56,31 @@ func (api *API) Get(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, err return nil, fmt.Errorf("failed to get blobs: %w", err) } api.Logger.Debug().Str("method", "Get").Int("num_blobs_returned", len(res)).Msg("RPC call successful") + + // Decompress blobs if compression is enabled + if api.compressionEnabled && len(res) > 0 { + decompressed, err := compression.DecompressBatch(res) + if err != nil { + api.Logger.Error().Err(err).Msg("Failed to decompress blobs") + return nil, fmt.Errorf("failed to decompress blobs: %w", err) + } + + // Log decompression stats + for i, blob := range res { + info := compression.GetCompressionInfo(blob) + if info.IsCompressed { + api.Logger.Debug(). + Int("blob_index", i). + Uint64("compressed_size", info.CompressedSize). + Uint64("original_size", info.OriginalSize). + Float64("ratio", info.CompressionRatio). + Msg("Blob decompression stats") + } + } + + return decompressed, nil + } + return res, nil } @@ -106,8 +134,20 @@ func (api *API) GetProofs(ctx context.Context, ids []da.ID, ns []byte) ([]da.Pro // Commit creates a Commitment for each given Blob. func (api *API) Commit(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { preparedNs := da.PrepareNamespace(ns) - api.Logger.Debug().Str("method", "Commit").Int("num_blobs", len(blobs)).Str("namespace", hex.EncodeToString(preparedNs)).Msg("Making RPC call") - res, err := api.Internal.Commit(ctx, blobs, preparedNs) + + // Compress blobs if compression is enabled + blobsToCommit := blobs + if api.compressionEnabled && len(blobs) > 0 { + compressed, err := compression.CompressBatch(blobs) + if err != nil { + api.Logger.Error().Err(err).Msg("Failed to compress blobs for commit") + return nil, fmt.Errorf("failed to compress blobs: %w", err) + } + blobsToCommit = compressed + } + + api.Logger.Debug().Str("method", "Commit").Int("num_blobs", len(blobsToCommit)).Str("namespace", hex.EncodeToString(preparedNs)).Msg("Making RPC call") + res, err := api.Internal.Commit(ctx, blobsToCommit, preparedNs) if err != nil { api.Logger.Error().Err(err).Str("method", "Commit").Msg("RPC call failed") } else { @@ -132,8 +172,46 @@ func (api *API) Validate(ctx context.Context, ids []da.ID, proofs []da.Proof, ns // Submit submits the Blobs to Data Availability layer. func (api *API) Submit(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { preparedNs := da.PrepareNamespace(ns) - api.Logger.Debug().Str("method", "Submit").Int("num_blobs", len(blobs)).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(preparedNs)).Msg("Making RPC call") - res, err := api.Internal.Submit(ctx, blobs, gasPrice, preparedNs) + + // Compress blobs if compression is enabled + blobsToSubmit := blobs + if api.compressionEnabled && len(blobs) > 0 { + compressed, err := compression.CompressBatch(blobs) + if err != nil { + api.Logger.Error().Err(err).Msg("Failed to compress blobs") + return nil, fmt.Errorf("failed to compress blobs: %w", err) + } + + // Log compression stats + var totalOriginal, totalCompressed uint64 + for i, blob := range compressed { + info := compression.GetCompressionInfo(blob) + if info.IsCompressed { + totalOriginal += info.OriginalSize + totalCompressed += info.CompressedSize + api.Logger.Debug(). + Int("blob_index", i). + Uint64("original_size", info.OriginalSize). + Uint64("compressed_size", info.CompressedSize). + Float64("ratio", info.CompressionRatio). + Msg("Blob compression stats") + } + } + + if totalOriginal > 0 { + savings := float64(totalOriginal-totalCompressed) / float64(totalOriginal) * 100 + api.Logger.Debug(). + Uint64("total_original", totalOriginal). + Uint64("total_compressed", totalCompressed). + Float64("savings_percent", savings). + Msg("Compression summary") + } + + blobsToSubmit = compressed + } + + api.Logger.Debug().Str("method", "Submit").Int("num_blobs", len(blobsToSubmit)).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(preparedNs)).Msg("Making RPC call") + res, err := api.Internal.Submit(ctx, blobsToSubmit, gasPrice, preparedNs) if err != nil { if strings.Contains(err.Error(), context.Canceled.Error()) { api.Logger.Debug().Str("method", "Submit").Msg("RPC call canceled due to context cancellation") @@ -156,9 +234,46 @@ func (api *API) SubmitWithOptions(ctx context.Context, inputBlobs []da.Blob, gas return []da.ID{}, nil } + // Compress blobs first if compression is enabled + blobsToSubmit := inputBlobs + if api.compressionEnabled && len(inputBlobs) > 0 { + compressed, err := compression.CompressBatch(inputBlobs) + if err != nil { + api.Logger.Error().Err(err).Msg("Failed to compress blobs") + return nil, fmt.Errorf("failed to compress blobs: %w", err) + } + + // Log compression stats + var totalOriginal, totalCompressed uint64 + for i, blob := range compressed { + info := compression.GetCompressionInfo(blob) + if info.IsCompressed { + totalOriginal += info.OriginalSize + totalCompressed += info.CompressedSize + api.Logger.Debug(). + Int("blob_index", i). + Uint64("original_size", info.OriginalSize). + Uint64("compressed_size", info.CompressedSize). + Float64("ratio", info.CompressionRatio). + Msg("Blob compression stats") + } + } + + if totalOriginal > 0 { + savings := float64(totalOriginal-totalCompressed) / float64(totalOriginal) * 100 + api.Logger.Debug(). + Uint64("total_original", totalOriginal). + Uint64("total_compressed", totalCompressed). + Float64("savings_percent", savings). + Msg("Compression summary") + } + + blobsToSubmit = compressed + } + // Validate each blob individually and calculate total size var totalSize uint64 - for i, blob := range inputBlobs { + for i, blob := range blobsToSubmit { blobLen := uint64(len(blob)) if blobLen > maxBlobSize { api.Logger.Warn().Int("index", i).Uint64("blobSize", blobLen).Uint64("maxBlobSize", maxBlobSize).Msg("Individual blob exceeds MaxBlobSize") @@ -173,8 +288,8 @@ func (api *API) SubmitWithOptions(ctx context.Context, inputBlobs []da.Blob, gas } preparedNs := da.PrepareNamespace(ns) - api.Logger.Debug().Str("method", "SubmitWithOptions").Int("num_blobs", len(inputBlobs)).Uint64("total_size", totalSize).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(preparedNs)).Msg("Making RPC call") - res, err := api.Internal.SubmitWithOptions(ctx, inputBlobs, gasPrice, preparedNs, options) + api.Logger.Debug().Str("method", "SubmitWithOptions").Int("num_blobs", len(blobsToSubmit)).Uint64("total_size", totalSize).Float64("gas_price", gasPrice).Str("namespace", hex.EncodeToString(preparedNs)).Msg("Making RPC call") + res, err := api.Internal.SubmitWithOptions(ctx, blobsToSubmit, gasPrice, preparedNs, options) if err != nil { if strings.Contains(err.Error(), context.Canceled.Error()) { api.Logger.Debug().Str("method", "SubmitWithOptions").Msg("RPC call canceled due to context cancellation") @@ -228,14 +343,37 @@ func (c *Client) Close() { c.closer.closeAll() } +// ClientOptions contains configuration options for the client +type ClientOptions struct { + // Compression settings + CompressionEnabled bool + CompressionLevel int // 1-22, default 3 + MinCompressionRatio float64 // Minimum compression ratio to store compressed, default 0.1 +} + +// DefaultClientOptions returns default client options with compression enabled +func DefaultClientOptions() ClientOptions { + return ClientOptions{ + CompressionEnabled: true, + CompressionLevel: compression.DefaultZstdLevel, + MinCompressionRatio: compression.DefaultMinCompressionRatio, + } +} + // NewClient creates a new Client with one connection per namespace with the // given token as the authorization token. func NewClient(ctx context.Context, logger zerolog.Logger, addr, token string, gasPrice, gasMultiplier float64) (*Client, error) { authHeader := http.Header{"Authorization": []string{fmt.Sprintf("Bearer %s", token)}} - return newClient(ctx, logger, addr, authHeader, gasPrice, gasMultiplier) + return newClient(ctx, logger, addr, authHeader, gasPrice, gasMultiplier, DefaultClientOptions()) } -func newClient(ctx context.Context, logger zerolog.Logger, addr string, authHeader http.Header, gasPrice, gasMultiplier float64) (*Client, error) { +// NewClientWithOptions creates a new Client with custom options +func NewClientWithOptions(ctx context.Context, logger zerolog.Logger, addr, token string, gasPrice, gasMultiplier float64, opts ClientOptions) (*Client, error) { + authHeader := http.Header{"Authorization": []string{fmt.Sprintf("Bearer %s", token)}} + return newClient(ctx, logger, addr, authHeader, gasPrice, gasMultiplier, opts) +} + +func newClient(ctx context.Context, logger zerolog.Logger, addr string, authHeader http.Header, gasPrice, gasMultiplier float64, opts ClientOptions) (*Client, error) { var multiCloser multiClientCloser var client Client client.DA.Logger = logger @@ -243,6 +381,22 @@ func newClient(ctx context.Context, logger zerolog.Logger, addr string, authHead client.DA.gasPrice = gasPrice client.DA.gasMultiplier = gasMultiplier + // Set compression configuration + client.DA.compressionEnabled = opts.CompressionEnabled + client.DA.compressionConfig = compression.Config{ + Enabled: opts.CompressionEnabled, + ZstdLevel: opts.CompressionLevel, + MinCompressionRatio: opts.MinCompressionRatio, + } + + if opts.CompressionEnabled { + logger.Info(). + Bool("compression", opts.CompressionEnabled). + Int("level", opts.CompressionLevel). + Float64("min_ratio", opts.MinCompressionRatio). + Msg("Compression enabled for JSONRPC client") + } + errs := getKnownErrorsMapping() for name, module := range moduleMap(&client) { closer, err := jsonrpc.NewMergeClient(ctx, addr, name, []interface{}{module}, authHeader, jsonrpc.WithErrors(errs)) diff --git a/da/jsonrpc/client_compression_test.go b/da/jsonrpc/client_compression_test.go new file mode 100644 index 0000000000..296be1bb23 --- /dev/null +++ b/da/jsonrpc/client_compression_test.go @@ -0,0 +1,311 @@ +package jsonrpc + +import ( + "bytes" + "context" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/core/da" + "github.com/evstack/ev-node/da/compression" +) + +// mockRPCServer simulates the RPC server for testing +type mockRPCServer struct { + blobs map[string]da.Blob + compressionDetected bool +} + +func newMockRPCServer() *mockRPCServer { + return &mockRPCServer{ + blobs: make(map[string]da.Blob), + } +} + +func (m *mockRPCServer) submit(blobs []da.Blob) []da.ID { + ids := make([]da.ID, len(blobs)) + for i, blob := range blobs { + // Check if blob is compressed + if len(blob) >= compression.CompressionHeaderSize { + info := compression.GetCompressionInfo(blob) + if info.IsCompressed || blob[0] == compression.FlagUncompressed { + m.compressionDetected = true + } + } + + id := da.ID([]byte{byte(len(m.blobs))}) + m.blobs[string(id)] = blob + ids[i] = id + } + return ids +} + +func (m *mockRPCServer) get(ids []da.ID) []da.Blob { + blobs := make([]da.Blob, len(ids)) + for i, id := range ids { + blobs[i] = m.blobs[string(id)] + } + return blobs +} + +// TestClientCompressionSubmit tests that the client compresses data before submission +func TestClientCompressionSubmit(t *testing.T) { + logger := zerolog.Nop() + mockServer := newMockRPCServer() + + // Create API with compression enabled + api := &API{ + Logger: logger, + MaxBlobSize: 1024 * 1024, + compressionEnabled: true, + compressionConfig: compression.Config{ + Enabled: true, + ZstdLevel: 3, + MinCompressionRatio: 0.1, + }, + } + + // Mock the internal submit function + api.Internal.Submit = func(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { + return mockServer.submit(blobs), nil + } + + // Test data that should compress well + testBlobs := []da.Blob{ + bytes.Repeat([]byte("compress me "), 100), + bytes.Repeat([]byte("a"), 1000), + } + + ctx := context.Background() + ids, err := api.Submit(ctx, testBlobs, 1.0, []byte("test")) + require.NoError(t, err) + require.Len(t, ids, len(testBlobs)) + + // Verify compression was applied + assert.True(t, mockServer.compressionDetected, "Compression should be detected in submitted blobs") + + // Check that compressed blobs are smaller + for _, id := range ids { + compressedBlob := mockServer.blobs[string(id)] + info := compression.GetCompressionInfo(compressedBlob) + + // At least one blob should be compressed + if info.IsCompressed { + assert.Less(t, info.CompressedSize, info.OriginalSize, + "Compressed size should be less than original") + } + } +} + +// TestClientCompressionGet tests that the client decompresses data after retrieval +func TestClientCompressionGet(t *testing.T) { + logger := zerolog.Nop() + mockServer := newMockRPCServer() + + // Original test data + originalBlobs := []da.Blob{ + bytes.Repeat([]byte("test data "), 50), + []byte("small data"), + } + + // Compress blobs manually for the mock server + compressedBlobs := make([]da.Blob, len(originalBlobs)) + for i, blob := range originalBlobs { + compressed, err := compression.CompressBlob(blob) + require.NoError(t, err) + compressedBlobs[i] = compressed + } + + // Store compressed blobs in mock server + ids := mockServer.submit(compressedBlobs) + + // Create API with compression enabled + api := &API{ + Logger: logger, + compressionEnabled: true, + compressionConfig: compression.Config{ + Enabled: true, + }, + } + + // Mock the internal get function + api.Internal.Get = func(ctx context.Context, ids []da.ID, ns []byte) ([]da.Blob, error) { + return mockServer.get(ids), nil + } + + // Retrieve and decompress + ctx := context.Background() + retrievedBlobs, err := api.Get(ctx, ids, []byte("test")) + require.NoError(t, err) + require.Len(t, retrievedBlobs, len(originalBlobs)) + + // Verify data integrity + for i, retrieved := range retrievedBlobs { + assert.Equal(t, originalBlobs[i], retrieved, + "Retrieved blob should match original after decompression") + } +} + +// TestClientCompressionDisabled tests that compression can be disabled +func TestClientCompressionDisabled(t *testing.T) { + logger := zerolog.Nop() + mockServer := newMockRPCServer() + + // Create API with compression disabled + api := &API{ + Logger: logger, + MaxBlobSize: 1024 * 1024, + compressionEnabled: false, + } + + // Mock the internal submit function + api.Internal.Submit = func(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte) ([]da.ID, error) { + return mockServer.submit(blobs), nil + } + + // Test data + testBlobs := []da.Blob{ + bytes.Repeat([]byte("don't compress me "), 50), + } + + ctx := context.Background() + ids, err := api.Submit(ctx, testBlobs, 1.0, []byte("test")) + require.NoError(t, err) + require.Len(t, ids, len(testBlobs)) + + // Verify no compression was applied + assert.False(t, mockServer.compressionDetected, + "Compression should not be detected when disabled") + + // Check that blobs are unmodified + for i, id := range ids { + storedBlob := mockServer.blobs[string(id)] + assert.Equal(t, testBlobs[i], storedBlob, + "Blob should be unmodified when compression is disabled") + } +} + +// TestClientOptionsCreation tests creating clients with different compression options +func TestClientOptionsCreation(t *testing.T) { + t.Run("DefaultOptions", func(t *testing.T) { + opts := DefaultClientOptions() + assert.True(t, opts.CompressionEnabled) + assert.Equal(t, compression.DefaultZstdLevel, opts.CompressionLevel) + assert.Equal(t, compression.DefaultMinCompressionRatio, opts.MinCompressionRatio) + }) + + t.Run("CustomOptions", func(t *testing.T) { + opts := ClientOptions{ + CompressionEnabled: true, + CompressionLevel: 9, + MinCompressionRatio: 0.2, + } + + assert.True(t, opts.CompressionEnabled) + assert.Equal(t, 9, opts.CompressionLevel) + assert.Equal(t, 0.2, opts.MinCompressionRatio) + }) +} + +// TestSubmitWithOptionsCompression tests compression with SubmitWithOptions +func TestSubmitWithOptionsCompression(t *testing.T) { + logger := zerolog.Nop() + mockServer := newMockRPCServer() + + // Create API with compression enabled + api := &API{ + Logger: logger, + MaxBlobSize: 1024 * 1024, + compressionEnabled: true, + compressionConfig: compression.Config{ + Enabled: true, + ZstdLevel: 3, + MinCompressionRatio: 0.1, + }, + } + + // Mock the internal submit with options function + api.Internal.SubmitWithOptions = func(ctx context.Context, blobs []da.Blob, gasPrice float64, ns []byte, options []byte) ([]da.ID, error) { + // Verify blobs are compressed + for _, blob := range blobs { + if len(blob) >= compression.CompressionHeaderSize { + info := compression.GetCompressionInfo(blob) + if info.IsCompressed || blob[0] == compression.FlagUncompressed { + mockServer.compressionDetected = true + break + } + } + } + return mockServer.submit(blobs), nil + } + + // Test data that should compress well + testBlobs := []da.Blob{ + bytes.Repeat([]byte("compress with options "), 100), + } + + ctx := context.Background() + ids, err := api.SubmitWithOptions(ctx, testBlobs, 1.0, []byte("test"), []byte("options")) + require.NoError(t, err) + require.Len(t, ids, len(testBlobs)) + + // Verify compression was applied + assert.True(t, mockServer.compressionDetected, + "Compression should be detected in submitted blobs with options") +} + +// TestCommitWithCompression tests that Commit handles compression +func TestCommitWithCompression(t *testing.T) { + logger := zerolog.Nop() + + // Create API with compression enabled + api := &API{ + Logger: logger, + compressionEnabled: true, + compressionConfig: compression.Config{ + Enabled: true, + ZstdLevel: 3, + MinCompressionRatio: 0.1, + }, + } + + compressionDetected := false + + // Mock the internal commit function + api.Internal.Commit = func(ctx context.Context, blobs []da.Blob, ns []byte) ([]da.Commitment, error) { + // Check if blobs are compressed + for _, blob := range blobs { + if len(blob) >= compression.CompressionHeaderSize { + info := compression.GetCompressionInfo(blob) + if info.IsCompressed || blob[0] == compression.FlagUncompressed { + compressionDetected = true + break + } + } + } + + // Return mock commitments + commitments := make([]da.Commitment, len(blobs)) + for i := range blobs { + commitments[i] = da.Commitment([]byte{byte(i)}) + } + return commitments, nil + } + + // Test data + testBlobs := []da.Blob{ + bytes.Repeat([]byte("commit this "), 100), + } + + ctx := context.Background() + commitments, err := api.Commit(ctx, testBlobs, []byte("test")) + require.NoError(t, err) + require.Len(t, commitments, len(testBlobs)) + + // Verify compression was applied + assert.True(t, compressionDetected, + "Compression should be detected in committed blobs") +} \ No newline at end of file diff --git a/pkg/cmd/run_node.go b/pkg/cmd/run_node.go index 49719bd5e6..b20b93a20d 100644 --- a/pkg/cmd/run_node.go +++ b/pkg/cmd/run_node.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "os/signal" - "path/filepath" "syscall" "time" @@ -98,9 +97,6 @@ func StartNode( // Resolve signer path relative to root directory if it's not an absolute path signerPath := nodeConfig.Signer.SignerPath - if !filepath.IsAbs(signerPath) { - signerPath = filepath.Join(nodeConfig.RootDir, signerPath) - } signer, err = file.LoadFileSystemSigner(signerPath, []byte(passphrase)) if err != nil { return err diff --git a/scripts/test.go b/scripts/test.go index 43f921ce26..2563482845 100644 --- a/scripts/test.go +++ b/scripts/test.go @@ -1,6 +1,7 @@ package main import ( + "flag" "fmt" "log" "os" @@ -10,6 +11,10 @@ import ( ) func main() { + // Parse command line flags + shortMode := flag.Bool("short", true, "Run tests in short mode (skip long-running tests)") + flag.Parse() + rootDir := "." // Start from the current directory var testFailures bool err := filepath.WalkDir(rootDir, func(path string, d os.DirEntry, err error) error { @@ -28,8 +33,16 @@ func main() { // or adjust logic if root tests are also desired. // For this example, we'll run tests in all directories with go.mod. - fmt.Printf("--> Running tests in: %s\n", modDir) - cmd := exec.Command("go", "test", "./...", "-cover") + // Build test command with optional -short flag + testArgs := []string{"test", "./...", "-cover"} + if *shortMode { + testArgs = append([]string{"test", "./...", "-short", "-cover"}, testArgs[3:]...) + fmt.Printf("--> Running tests in short mode in: %s\n", modDir) + } else { + fmt.Printf("--> Running full tests in: %s\n", modDir) + } + + cmd := exec.Command("go", testArgs...) cmd.Dir = modDir // Set the working directory for the command cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/scripts/test.mk b/scripts/test.mk index 221fb94709..3237ea9b3d 100644 --- a/scripts/test.mk +++ b/scripts/test.mk @@ -4,12 +4,18 @@ clean-testcache: @go clean --testcache .PHONY: clean-testcache -## test: Running unit tests for all go.mods +## test: Running unit tests for all go.mods (fast mode with -short flag) test: - @echo "--> Running unit tests" - @go run -tags='run integration' scripts/test.go + @echo "--> Running unit tests (fast mode)" + @go run -tags='run integration' scripts/test.go -short .PHONY: test +## test-full: Running full unit tests for all go.mods (includes long-running tests) +test-full: + @echo "--> Running full unit tests (including long-running tests)" + @go run -tags='run integration' scripts/test.go -short=false +.PHONY: test-full + ## test-all: Running all tests including Docker E2E test-all: test test-docker-e2e @echo "--> All tests completed" @@ -33,12 +39,18 @@ test-integration-cover: @cd node && go test -mod=readonly -failfast -timeout=15m -tags='integration' -coverprofile=coverage.txt -covermode=atomic ./... .PHONY: test-integration-cover -## test-cover: generate code coverage report. +## test-cover: generate code coverage report (fast mode with -short flag). test-cover: - @echo "--> Running unit tests" - @go run -tags=cover scripts/test_cover.go + @echo "--> Running unit tests with coverage (fast mode)" + @go run -tags=cover scripts/test_cover.go -short .PHONY: test-cover +## test-cover-full: generate code coverage report with all tests. +test-cover-full: + @echo "--> Running full unit tests with coverage" + @go run -tags=cover scripts/test_cover.go -short=false +.PHONY: test-cover-full + ## test-evm: Running EVM tests test-evm: @echo "--> Running EVM tests" diff --git a/scripts/test_cover.go b/scripts/test_cover.go index 22c5c81555..34c55968eb 100644 --- a/scripts/test_cover.go +++ b/scripts/test_cover.go @@ -5,6 +5,7 @@ package main import ( "bufio" + "flag" "fmt" "log" "os" @@ -14,6 +15,10 @@ import ( ) func main() { + // Parse command line flags + shortMode := flag.Bool("short", true, "Run tests in short mode (skip long-running tests)") + flag.Parse() + rootDir := "." var coverFiles []string @@ -38,8 +43,16 @@ func main() { fullCoverProfilePath := filepath.Join(modDir, "cover.out") relativeCoverProfileArg := "cover.out" - fmt.Printf("--> Running tests with coverage in: %s (profile: %s)\n", modDir, relativeCoverProfileArg) - cmd := exec.Command("go", "test", "./...", "-race", "-coverprofile="+relativeCoverProfileArg, "-covermode=atomic") + // Build test command with optional -short flag + testArgs := []string{"test", "./...", "-race", "-coverprofile=" + relativeCoverProfileArg, "-covermode=atomic"} + if *shortMode { + testArgs = []string{"test", "./...", "-short", "-race", "-coverprofile=" + relativeCoverProfileArg, "-covermode=atomic"} + fmt.Printf("--> Running tests with coverage in short mode in: %s (profile: %s)\n", modDir, relativeCoverProfileArg) + } else { + fmt.Printf("--> Running full tests with coverage in: %s (profile: %s)\n", modDir, relativeCoverProfileArg) + } + + cmd := exec.Command("go", testArgs...) cmd.Dir = modDir cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr