diff --git a/.github/workflows/tests-e2e.yaml b/.github/workflows/tests-e2e.yaml index 41fa5d6cb5..e994119fd2 100644 --- a/.github/workflows/tests-e2e.yaml +++ b/.github/workflows/tests-e2e.yaml @@ -47,6 +47,9 @@ jobs: with: shared-key: build + - name: Remove target folder to free disk space + run: rm -rf target + - name: Run e2e tests run: | set -e diff --git a/Cargo.lock b/Cargo.lock index 57e186d6f5..26975c4601 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -22,7 +22,7 @@ dependencies = [ "const-hex", "fs-err", "futures-util", - "reqwest 0.12.24", + "reqwest 0.12.28", "sha2 0.10.9", "tempfile", "tokio", @@ -79,10 +79,18 @@ name = "actors-umbrella" version = "0.1.0" dependencies = [ "fendermint_actor_activity_tracker", + "fendermint_actor_adm", + "fendermint_actor_blob_reader", + "fendermint_actor_blobs", + "fendermint_actor_bucket", "fendermint_actor_chainmetadata", "fendermint_actor_eam", "fendermint_actor_f3_light_client", "fendermint_actor_gas_market_eip1559", + "fendermint_actor_init", + "fendermint_actor_ipc_storage_config", + "fendermint_actor_machine", + "fendermint_actor_timehub", ] [[package]] @@ -121,8 +129,9 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "crypto-common 0.1.6", - "generic-array 0.14.9", + "bytes", + "crypto-common 0.1.7", + "generic-array 0.14.7", ] [[package]] @@ -156,7 +165,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "once_cell", "version_check", ] @@ -189,6 +198,129 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy-json-abi" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4584e3641181ff073e9d5bec5b3b8f78f9749d9fb108a1cfbc4399a4a139c72a" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "777d58b30eb9a4db0e5f59bc30e8c2caef877fee7dc8734cf242a51a60f22e05" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.1.1", + "foldhash", + "hashbrown 0.15.5", + "indexmap 2.13.0", + "itoa", + "k256 0.13.4", + "keccak-asm", + "paste", + "proptest", + "rand 0.8.5", + "ruint", + "rustc-hash 2.1.1", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" +dependencies = [ + "arrayvec 0.7.6", + "bytes", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e68b32b6fa0d09bb74b4cefe35ccc8269d711c26629bc7cd98a47eeb12fe353f" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2afe6879ac373e58fd53581636f2cce843998ae0b058ebe1e4f649195e2bd23c" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.5.0", + "indexmap 2.13.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.114", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ba01aee235a8c699d07e5be97ba215607564e71be72f433665329bec307d28" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "macro-string", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.114", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c13fc168b97411e04465f03e632f31ef94cad1c7c8951bf799237fd7870d535" +dependencies = [ + "serde", + "winnow 0.7.14", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e960c4b52508ef2ae1e37cae5058e905e9ae099b107900067a503f8c454036f" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + [[package]] name = "ambassador" version = "0.4.2" @@ -251,22 +383,22 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.10" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -326,6 +458,195 @@ dependencies = [ "password-hash 0.5.0", ] +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.114", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-std 0.5.0", + "arrayvec 0.7.6", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -383,7 +704,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", "synstructure 0.13.2", ] @@ -395,7 +716,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -437,6 +758,19 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compat" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ba85bc55464dcbf728b56d97e119d673f4cf9062be330a9a26f3acf504a590" +dependencies = [ + "futures-core", + "futures-io", + "once_cell", + "pin-project-lite", + "tokio", +] + [[package]] name = "async-executor" version = "1.13.3" @@ -457,7 +791,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8034a681df4aed8b8edbd7fbe472401ecf009251c8b40556b304567052e294c5" dependencies = [ - "async-lock 3.4.1", + "async-lock 3.4.2", "blocking", "futures-lite 2.6.1", ] @@ -471,7 +805,7 @@ dependencies = [ "async-channel 2.5.0", "async-executor", "async-io 2.6.0", - "async-lock 3.4.1", + "async-lock 3.4.2", "blocking", "futures-lite 2.6.1", "once_cell", @@ -510,7 +844,7 @@ dependencies = [ "futures-lite 2.6.1", "parking", "polling 3.11.0", - "rustix 1.1.2", + "rustix 1.1.3", "slab", "windows-sys 0.61.2", ] @@ -526,9 +860,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.1" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" dependencies = [ "event-listener 5.4.1", "event-listener-strategy", @@ -554,14 +888,14 @@ checksum = "fc50921ec0055cdd8a16de48773bfeec5c972598674347252c0399676be7da75" dependencies = [ "async-channel 2.5.0", "async-io 2.6.0", - "async-lock 3.4.1", + "async-lock 3.4.2", "async-signal", "async-task", "blocking", "cfg-if", "event-listener 5.4.1", "futures-lite 2.6.1", - "rustix 1.1.2", + "rustix 1.1.3", ] [[package]] @@ -572,7 +906,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -582,12 +916,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43c070bbf59cd3570b6b2dd54cd772527c7c3620fce8be898406dd3ed6adc64c" dependencies = [ "async-io 2.6.0", - "async-lock 3.4.1", + "async-lock 3.4.2", "atomic-waker", "cfg-if", "futures-core", "futures-io", - "rustix 1.1.2", + "rustix 1.1.3", "signal-hook-registry", "slab", "windows-sys 0.61.2", @@ -603,7 +937,7 @@ dependencies = [ "async-channel 1.9.0", "async-global-executor", "async-io 2.6.0", - "async-lock 3.4.1", + "async-lock 3.4.2", "async-process", "crossbeam-utils", "futures-channel", @@ -631,7 +965,7 @@ dependencies = [ "async-trait", "futures-io", "futures-util", - "hickory-resolver", + "hickory-resolver 0.24.4", "pin-utils", "socket2 0.5.10", ] @@ -660,7 +994,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -687,7 +1021,7 @@ checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" dependencies = [ "futures", "pharos", - "rustc_version", + "rustc_version 0.4.1", ] [[package]] @@ -716,6 +1050,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "atomic-polyfill" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" +dependencies = [ + "critical-section", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -733,6 +1076,18 @@ dependencies = [ "url", ] +[[package]] +name = "attohttpc" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" +dependencies = [ + "base64 0.22.1", + "http 1.4.0", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -752,7 +1107,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -813,6 +1168,17 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backon" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef" +dependencies = [ + "fastrand 2.3.0", + "gloo-timers 0.3.0", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.71" @@ -828,6 +1194,23 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "bao-tree" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff16d65e48353db458be63ee395c03028f24564fd48668389bd65fd945f5ac36" +dependencies = [ + "blake3", + "bytes", + "futures-lite 2.6.1", + "genawaiter", + "iroh-io", + "positioned-io", + "range-collections", + "self_cell", + "smallvec", +] + [[package]] name = "base-x" version = "0.2.11" @@ -882,9 +1265,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" [[package]] name = "bech32" @@ -909,7 +1292,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2c9a1b2f748c59938bc72165ebdf34efffeecee9cfbe0bb7d6b01aea21cd523" dependencies = [ - "blake2s_simd 1.0.3", + "blake2s_simd 1.0.4", "byteorder", "ff 0.13.1", "serde", @@ -924,8 +1307,8 @@ checksum = "5c41bd83b8437856d267eb311de13dcd9bff9077cc5ba35c7ec886070dea8a45" dependencies = [ "bellpepper-core", "bincode", - "blake2s_simd 1.0.3", - "blstrs", + "blake2s_simd 1.0.4", + "blstrs 0.7.1", "byteorder", "crossbeam-channel", "digest 0.10.7", @@ -935,7 +1318,7 @@ dependencies = [ "group 0.13.0", "log", "memmap2", - "pairing", + "pairing 0.23.0", "rand 0.8.5", "rand_core 0.6.4", "rayon", @@ -945,6 +1328,12 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "binary-merge" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597bb81c80a54b6a4381b23faba8d7774b144c94cbd1d6fe3f1329bd776554ab" + [[package]] name = "bincode" version = "1.3.3" @@ -972,7 +1361,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -990,7 +1379,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -1002,6 +1391,15 @@ dependencies = [ "bit-vec 0.6.3", ] +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec 0.8.0", +] + [[package]] name = "bit-vec" version = "0.4.4" @@ -1014,6 +1412,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + [[package]] name = "bitflags" version = "1.3.2" @@ -1064,18 +1468,18 @@ name = "blake2" version = "0.11.0-rc.2" source = "git+https://github.com/huitseeker/hashes.git?branch=blake2x-pr#4d3debf264a45da9e33d52645eb6ee9963336f66" dependencies = [ - "digest 0.11.0-rc.3", + "digest 0.11.0-rc.2", ] [[package]] name = "blake2b_simd" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e903a20b159e944f91ec8499fe1e55651480c541ea0a584f5d967c49ad9d99" +checksum = "b79834656f71332577234b50bfc009996f7449e0c056884e6a02492ded0ca2f3" dependencies = [ "arrayref", "arrayvec 0.7.6", - "constant_time_eq 0.3.1", + "constant_time_eq 0.4.2", ] [[package]] @@ -1091,26 +1495,27 @@ dependencies = [ [[package]] name = "blake2s_simd" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e90f7deecfac93095eb874a40febd69427776e24e1bd7f87f33ac62d6f0174df" +checksum = "ee29928bad1e3f94c9d1528da29e07a1d3d04817ae8332de1e8b846c8439f4b3" dependencies = [ "arrayref", "arrayvec 0.7.6", - "constant_time_eq 0.3.1", + "constant_time_eq 0.4.2", ] [[package]] name = "blake3" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" dependencies = [ "arrayref", "arrayvec 0.7.6", "cc", "cfg-if", - "constant_time_eq 0.3.1", + "constant_time_eq 0.4.2", + "cpufeatures", ] [[package]] @@ -1119,7 +1524,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", ] [[package]] @@ -1128,14 +1533,14 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", ] [[package]] name = "block-buffer" -version = "0.11.0-rc.5" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ef36a6fcdb072aa548f3da057640ec10859eb4e91ddf526ee648d50c76a949" +checksum = "96eb4cdd6cf1b31d671e9efe75c5d1ec614776856cefbe109ca373554a6d514f" dependencies = [ "hybrid-array", ] @@ -1146,7 +1551,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", ] [[package]] @@ -1171,6 +1576,22 @@ dependencies = [ "bit-vec 0.4.4", ] +[[package]] +name = "bls-signatures" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1659e487883b92123806f16ff3568dd57563991231d187d29b23eea5d910e800" +dependencies = [ + "blst", + "blstrs 0.6.2", + "ff 0.12.1", + "group 0.12.1", + "pairing 0.22.0", + "rand_core 0.6.4", + "subtle", + "thiserror 1.0.69", +] + [[package]] name = "bls-signatures" version = "0.15.0" @@ -1178,10 +1599,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecc7fce0356b52c2483bb6188cc8bdc11add526bce75d1a44e5e5d889a6ab008" dependencies = [ "blst", - "blstrs", + "blstrs 0.7.1", "ff 0.13.1", "group 0.13.0", - "pairing", + "pairing 0.23.0", "rand_core 0.6.4", "subtle", "thiserror 1.0.69", @@ -1199,6 +1620,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "blstrs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ff3694b352ece02eb664a09ffb948ee69b35afa2e6ac444a6b8cb9d515deebd" +dependencies = [ + "blst", + "byte-slice-cast", + "ff 0.12.1", + "group 0.12.1", + "pairing 0.22.0", + "rand_core 0.6.4", + "serde", + "subtle", +] + [[package]] name = "blstrs" version = "0.7.1" @@ -1210,7 +1647,7 @@ dependencies = [ "ec-gpu", "ff 0.13.1", "group 0.13.0", - "pairing", + "pairing 0.23.0", "rand_core 0.6.4", "serde", "subtle", @@ -1240,7 +1677,7 @@ dependencies = [ "serde_urlencoded", "thiserror 1.0.69", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "url", "winapi", ] @@ -1253,9 +1690,15 @@ checksum = "b58071e8fd9ec1e930efd28e3a90c1251015872a2ce49f81f36421b86466932e" dependencies = [ "serde", "serde_repr", - "serde_with 3.15.1", + "serde_with 3.16.1", ] +[[package]] +name = "bounded-integer" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "102dbef1187b1893e6dfe05a774e79fd52265f49f214f6879c8ff49f52c8188b" + [[package]] name = "bs58" version = "0.5.1" @@ -1292,9 +1735,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" dependencies = [ "allocator-api2", ] @@ -1319,9 +1762,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" dependencies = [ "serde", ] @@ -1348,9 +1791,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" +checksum = "e629a66d692cb9ff1a1c664e41771b3dcaf961985a9774c0eb0bd1b51cf60a48" dependencies = [ "serde_core", ] @@ -1372,7 +1815,7 @@ checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.27", "serde", "serde_json", "thiserror 1.0.69", @@ -1386,7 +1829,7 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver", + "semver 1.0.27", "serde", "serde_json", "thiserror 2.0.17", @@ -1421,9 +1864,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.44" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "jobserver", @@ -1539,7 +1982,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common 0.1.6", + "crypto-common 0.1.7", "inout", "zeroize", ] @@ -1572,9 +2015,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive", @@ -1582,9 +2025,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ "anstream", "anstyle", @@ -1594,11 +2037,11 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.5.60" +version = "4.5.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e602857739c5a4291dfa33b5a298aeac9006185229a700e5810a3ef7272d971" +checksum = "430b4dc2b5e3861848de79627b2bedc9f3342c7da5173a14eaa5d0f8dc18ae5d" dependencies = [ - "clap 4.5.51", + "clap 4.5.54", ] [[package]] @@ -1610,14 +2053,14 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cobs" @@ -1670,7 +2113,7 @@ dependencies = [ "bech32", "bs58", "digest 0.10.7", - "generic-array 0.14.9", + "generic-array 0.14.7", "hex", "ripemd", "serde", @@ -1842,9 +2285,9 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "constant_time_eq" -version = "0.3.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" [[package]] name = "contracts-artifacts" @@ -1856,12 +2299,22 @@ dependencies = [ ] [[package]] -name = "convert_case" -version = "0.7.1" +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "cordyceps" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" dependencies = [ - "unicode-segmentation", + "loom", + "tracing", ] [[package]] @@ -2034,6 +2487,21 @@ dependencies = [ "target-lexicon", ] +[[package]] +name = "crc" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eb8a2a1cd12ab0d987a5d5e825195d372001a4094a0376319d5a0ad71c1ba0d" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" version = "1.5.0" @@ -2043,6 +2511,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossbeam" version = "0.8.4" @@ -2111,7 +2585,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -2123,7 +2597,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", "rand_core 0.6.4", "subtle", "zeroize", @@ -2131,11 +2605,11 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", "rand_core 0.6.4", "typenum", ] @@ -2143,8 +2617,7 @@ dependencies = [ [[package]] name = "crypto-common" version = "0.2.0-rc.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8235645834fbc6832939736ce2f2d08192652269e11010a6240f61b908a1c6" +source = "git+https://github.com/RustCrypto/traits?tag=digest-v0.11.0-rc.2#64bcc11e82873ab50a17a38f1322e8b27aa805ab" dependencies = [ "hybrid-array", ] @@ -2155,8 +2628,40 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", + "subtle", +] + +[[package]] +name = "crypto_box" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16182b4f39a82ec8a6851155cc4c0cda3065bb1db33651726a29e1951de0f009" +dependencies = [ + "aead", + "chacha20", + "crypto_secretbox", + "curve25519-dalek", + "salsa20", + "serdect", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto_secretbox" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d6cf87adf719ddf43a805e92c6870a531aedda35ff640442cbaf8674e141e1" +dependencies = [ + "aead", + "chacha20", + "cipher", + "generic-array 0.14.7", + "poly1305", + "salsa20", "subtle", + "zeroize", ] [[package]] @@ -2207,7 +2712,9 @@ dependencies = [ "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "rustc_version", + "rand_core 0.6.4", + "rustc_version 0.4.1", + "serde", "subtle", "zeroize", ] @@ -2220,7 +2727,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -2257,7 +2764,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -2268,7 +2775,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -2286,15 +2793,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] name = "data-encoding-macro" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" +checksum = "8142a83c17aa9461d637e649271eae18bf2edd00e91f2e105df36c3c16355bdb" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2302,12 +2809,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" +checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -2327,6 +2834,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", + "der_derive", + "pem-rfc7468", "zeroize", ] @@ -2344,6 +2853,17 @@ dependencies = [ "rusticata-macros", ] +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "deranged" version = "0.5.5" @@ -2354,6 +2874,17 @@ dependencies = [ "serde_core", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_arbitrary" version = "1.4.2" @@ -2362,7 +2893,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -2376,11 +2907,11 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ - "derive_more-impl 2.0.1", + "derive_more-impl 2.1.1", ] [[package]] @@ -2391,22 +2922,30 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", + "unicode-xid", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.108", + "rustc_version 0.4.1", + "syn 2.0.114", "unicode-xid", ] +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "diff" version = "0.1.13" @@ -2419,7 +2958,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.9", + "generic-array 0.14.7", ] [[package]] @@ -2430,17 +2969,16 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "const-oid", - "crypto-common 0.1.6", + "crypto-common 0.1.7", "subtle", ] [[package]] name = "digest" -version = "0.11.0-rc.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac89f8a64533a9b0eaa73a68e424db0fb1fd6271c74cc0125336a05f090568d" +version = "0.11.0-rc.2" +source = "git+https://github.com/RustCrypto/traits?tag=digest-v0.11.0-rc.2#64bcc11e82873ab50a17a38f1322e8b27aa805ab" dependencies = [ - "block-buffer 0.11.0-rc.5", + "block-buffer 0.11.0", "crypto-common 0.2.0-rc.4", "subtle", ] @@ -2506,7 +3044,18 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", +] + +[[package]] +name = "dlopen2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" +dependencies = [ + "libc", + "once_cell", + "winapi", ] [[package]] @@ -2515,11 +3064,20 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + [[package]] name = "dtoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" +checksum = "4c3cf4824e2d5f025c7b531afcb2325364084a16806f6d47fbc1f5fbd9960590" [[package]] name = "dunce" @@ -2594,6 +3152,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8 0.10.2", + "serde", "signature 2.2.0", ] @@ -2618,12 +3177,25 @@ checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", + "rand_core 0.6.4", "serde", "sha2 0.10.9", "subtle", "zeroize", ] +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "either" version = "1.15.0" @@ -2641,7 +3213,7 @@ dependencies = [ "der 0.6.1", "digest 0.10.7", "ff 0.12.1", - "generic-array 0.14.9", + "generic-array 0.14.7", "group 0.12.1", "pkcs8 0.9.0", "rand_core 0.6.4", @@ -2660,7 +3232,7 @@ dependencies = [ "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.1", - "generic-array 0.14.9", + "generic-array 0.14.7", "group 0.13.0", "pkcs8 0.10.2", "rand_core 0.6.4", @@ -2726,7 +3298,47 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", +] + +[[package]] +name = "enum-ordinalize" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "enumflags2" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1027f7680c853e056ebcec683615fb6fbbc07dbaa13b4d5d9442b146ded4ecef" +dependencies = [ + "enumflags2_derive", +] + +[[package]] +name = "enumflags2_derive" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c78a4d8fdf9953a5c9d458f9efe940fd97a0cab0941c075a813ac594733827" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", ] [[package]] @@ -2919,7 +3531,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.108", + "syn 2.0.114", "toml 0.8.23", "walkdir", ] @@ -2937,7 +3549,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -2953,7 +3565,7 @@ dependencies = [ "const-hex", "elliptic-curve 0.13.8", "ethabi", - "generic-array 0.14.9", + "generic-array 0.14.7", "k256 0.13.4", "num_enum", "once_cell", @@ -2963,7 +3575,7 @@ dependencies = [ "serde", "serde_json", "strum 0.26.3", - "syn 2.0.108", + "syn 2.0.114", "tempfile", "thiserror 1.0.69", "tiny-keccak", @@ -2979,7 +3591,7 @@ dependencies = [ "chrono", "ethers-core", "reqwest 0.11.27", - "semver", + "semver 1.0.27", "serde", "serde_json", "thiserror 1.0.69", @@ -3089,7 +3701,7 @@ dependencies = [ "path-slash", "rayon", "regex", - "semver", + "semver 1.0.27", "serde", "serde_json", "solang-parser", @@ -3131,9 +3743,9 @@ dependencies = [ [[package]] name = "execute" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a82608ee96ce76aeab659e9b8d3c2b787bffd223199af88c674923d861ada10" +checksum = "0be3cc61fe54b4cae4463cdbda0401978ffe19d4dcc7a5201a312cddf64726dd" dependencies = [ "execute-command-macro", "execute-command-tokens", @@ -3142,29 +3754,29 @@ dependencies = [ [[package]] name = "execute-command-macro" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90dec53d547564e911dc4ff3ecb726a64cf41a6fa01a2370ebc0d95175dd08bd" +checksum = "b3e748391d89b43c52decaed8645b4a83a09d14f5ee868071c6813389e9e7036" dependencies = [ "execute-command-macro-impl", ] [[package]] name = "execute-command-macro-impl" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce8cd46a041ad005ab9c71263f9a0ff5b529eac0fe4cc9b4a20f4f0765d8cf4b" +checksum = "57dd896da3fbb77138059b015c013459d96063c66bcdd3b9094ff2e9d3f19a47" dependencies = [ "execute-command-tokens", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] name = "execute-command-tokens" -version = "0.1.7" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69dc321eb6be977f44674620ca3aa21703cb20ffbe560e1ae97da08401ffbcad" +checksum = "729eda2ea2f6c5ef85150c85a9b2ce0a8e01f040e59cdb32521eaa6c840c9d51" [[package]] name = "extensions" @@ -3213,38 +3825,184 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] -name = "fdlimit" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec 0.7.6", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec 0.7.6", + "auto_impl", + "bytes", +] + +[[package]] +name = "fdlimit" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" +dependencies = [ + "libc", + "thiserror 1.0.69", +] + +[[package]] +name = "fendermint_abci" +version = "0.1.0" +dependencies = [ + "async-stm", + "async-trait", + "futures", + "im", + "structopt", + "tendermint 0.31.1", + "tokio", + "tower 0.4.13", + "tower-abci", + "tracing", + "tracing-subscriber 0.3.22", +] + +[[package]] +name = "fendermint_actor_activity_tracker" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actors_evm_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "log", + "multihash 0.18.1", + "num-derive 0.4.2", + "num-traits", + "serde", + "serde_tuple 0.5.0", +] + +[[package]] +name = "fendermint_actor_adm" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fendermint_actor_machine", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "integer-encoding 3.0.4", + "ipc_storage_actor_sdk", + "ipc_storage_sol_facade", + "log", + "multihash 0.18.1", + "num-derive 0.4.2", + "num-traits", + "serde", +] + +[[package]] +name = "fendermint_actor_blob_reader" +version = "0.1.0" +dependencies = [ + "anyhow", + "fendermint_actor_blobs_shared", + "fendermint_actor_blobs_testing", + "fil_actors_evm_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "ipc_storage_actor_sdk", + "ipc_storage_ipld", + "ipc_storage_sol_facade", + "log", + "num-derive 0.4.2", + "num-traits", + "serde", +] + +[[package]] +name = "fendermint_actor_blobs" +version = "0.1.0" +dependencies = [ + "anyhow", + "bls-signatures 0.13.1", + "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_actor_blobs_testing", + "fendermint_actor_ipc_storage_config_shared", + "fil_actors_evm_shared", + "fil_actors_runtime", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "ipc_storage_actor_sdk", + "ipc_storage_ipld", + "ipc_storage_sol_facade", + "log", + "num-traits", + "rand 0.8.5", + "serde", +] + +[[package]] +name = "fendermint_actor_blobs_shared" +version = "0.1.0" dependencies = [ - "libc", - "thiserror 1.0.69", + "anyhow", + "blake3", + "data-encoding", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "ipc_storage_ipld", + "num-derive 0.4.2", + "num-traits", + "serde", ] [[package]] -name = "fendermint_abci" +name = "fendermint_actor_blobs_testing" version = "0.1.0" dependencies = [ - "async-stm", - "async-trait", - "futures", - "im", - "structopt", - "tendermint 0.31.1", - "tokio", - "tower 0.4.13", - "tower-abci", - "tracing", - "tracing-subscriber 0.3.20", + "fendermint_actor_blobs_shared", + "fvm_shared", + "iroh-blobs", + "rand 0.8.5", + "tracing-subscriber 0.3.22", ] [[package]] -name = "fendermint_actor_activity_tracker" +name = "fendermint_actor_bucket" version = "0.1.0" dependencies = [ "anyhow", + "blake3", "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_actor_blobs_testing", + "fendermint_actor_machine", "fil_actors_evm_shared", "fil_actors_runtime", "frc42_dispatch 8.0.0", @@ -3252,12 +4010,14 @@ dependencies = [ "fvm_ipld_encoding 0.5.3", "fvm_shared", "hex-literal 0.4.1", - "log", - "multihash 0.18.1", + "ipc_storage_actor_sdk", + "ipc_storage_ipld", + "ipc_storage_sol_facade", "num-derive 0.4.2", "num-traits", + "quickcheck", + "quickcheck_macros", "serde", - "serde_tuple 0.5.0", ] [[package]] @@ -3342,6 +4102,95 @@ dependencies = [ "serde", ] +[[package]] +name = "fendermint_actor_init" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actors_runtime", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "log", + "num-derive 0.4.2", + "num-traits", + "serde", +] + +[[package]] +name = "fendermint_actor_ipc_storage_config" +version = "0.1.0" +dependencies = [ + "anyhow", + "fendermint_actor_blobs_shared", + "fendermint_actor_ipc_storage_config_shared", + "fil_actors_evm_shared", + "fil_actors_runtime", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "ipc_storage_actor_sdk", + "ipc_storage_sol_facade", + "num-traits", + "serde", +] + +[[package]] +name = "fendermint_actor_ipc_storage_config_shared" +version = "0.1.0" +dependencies = [ + "fendermint_actor_blobs_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "num-derive 0.4.2", + "num-traits", + "serde", +] + +[[package]] +name = "fendermint_actor_machine" +version = "0.1.0" +dependencies = [ + "anyhow", + "fil_actor_adm", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "ipc_storage_actor_sdk", + "ipc_storage_sol_facade", + "serde", +] + +[[package]] +name = "fendermint_actor_timehub" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fendermint_actor_blobs_shared", + "fendermint_actor_machine", + "fil_actors_evm_shared", + "fil_actors_runtime", + "frc42_dispatch 8.0.0", + "fvm_ipld_amt", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex-literal 0.4.1", + "ipc_storage_actor_sdk", + "ipc_storage_sol_facade", + "multihash-codetable", + "num-derive 0.4.2", + "num-traits", + "serde", + "tracing", +] + [[package]] name = "fendermint_app" version = "0.1.0" @@ -3417,13 +4266,13 @@ dependencies = [ "tendermint-proto 0.31.1", "tendermint-rpc", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "toml 0.8.23", "tower 0.4.13", "tower-abci", "tracing", "tracing-appender", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "url", ] @@ -3434,7 +4283,7 @@ dependencies = [ "anyhow", "bytes", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.54", "ethers", "fendermint_materializer", "fendermint_vm_actor_interface", @@ -3449,7 +4298,7 @@ dependencies = [ "num-traits", "tendermint-rpc", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "url", ] @@ -3537,7 +4386,7 @@ dependencies = [ "async-trait", "axum", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.54", "erased-serde", "ethers", "ethers-contract", @@ -3572,7 +4421,7 @@ dependencies = [ "tokio", "tower-http 0.4.4", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -3594,7 +4443,7 @@ dependencies = [ "serde_json", "tokio", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -3658,7 +4507,7 @@ dependencies = [ "tendermint-rpc", "text-tables", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "toml 0.8.23", "tracing", "url", @@ -3690,8 +4539,10 @@ dependencies = [ "base64 0.21.7", "bytes", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.54", "ethers", + "fendermint_actor_blobs_shared", + "fendermint_actor_bucket", "fendermint_crypto", "fendermint_vm_actor_interface", "fendermint_vm_genesis", @@ -3709,7 +4560,7 @@ dependencies = [ "tendermint-rpc", "tokio", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -3878,10 +4729,17 @@ dependencies = [ "cid 0.11.1", "ethers", "fendermint_actor_activity_tracker", + "fendermint_actor_adm", + "fendermint_actor_blob_reader", + "fendermint_actor_blobs", + "fendermint_actor_blobs_shared", "fendermint_actor_chainmetadata", "fendermint_actor_eam", "fendermint_actor_f3_light_client", "fendermint_actor_gas_market_eip1559", + "fendermint_actor_init", + "fendermint_actor_ipc_storage_config", + "fendermint_actor_ipc_storage_config_shared", "fendermint_crypto", "fendermint_eth_deployer", "fendermint_eth_hardhat", @@ -3897,6 +4755,7 @@ dependencies = [ "fendermint_vm_message", "fendermint_vm_resolver", "fendermint_vm_topdown", + "fil_actor_adm", "fil_actor_eam", "fil_actor_evm", "futures-core", @@ -3910,6 +4769,8 @@ dependencies = [ "ipc-api", "ipc-observability", "ipc_actors_abis", + "iroh", + "iroh-blobs", "libipld", "merkle-tree-rs", "multihash 0.18.1", @@ -3931,7 +4792,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tracing", ] @@ -4017,7 +4878,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-stream", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tracing", "unsigned-varint 0.7.2", ] @@ -4032,7 +4893,7 @@ dependencies = [ "async-trait", "bytes", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.54", "ethers", "fendermint_crypto", "fendermint_testing", @@ -4058,7 +4919,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -4070,7 +4931,7 @@ dependencies = [ "base64 0.21.7", "chrono", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.54", "fendermint_actor_f3_light_client", "fendermint_vm_genesis", "filecoin-f3-certs", @@ -4099,7 +4960,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "url", ] @@ -4109,6 +4970,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ + "bitvec", "rand_core 0.6.4", "subtle", ] @@ -4130,6 +4992,13 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +[[package]] +name = "fil_actor_adm" +version = "0.1.0" +dependencies = [ + "serde", +] + [[package]] name = "fil_actor_bundler" version = "6.1.0" @@ -4139,7 +5008,7 @@ dependencies = [ "anyhow", "async-std", "cid 0.10.1", - "clap 4.5.51", + "clap 4.5.54", "futures", "fvm_ipld_blockstore 0.2.1", "fvm_ipld_car 0.7.1", @@ -4251,14 +5120,14 @@ dependencies = [ [[package]] name = "filecoin-f3-blssig" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "blake2 0.11.0-rc.2", - "bls-signatures", - "blstrs", + "bls-signatures 0.15.0", + "blstrs 0.7.1", "filecoin-f3-gpbft", "group 0.13.0", - "hashlink", + "hashlink 0.10.0", "parking_lot", "rayon", "thiserror 2.0.17", @@ -4267,7 +5136,7 @@ dependencies = [ [[package]] name = "filecoin-f3-certs" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "ahash 0.8.12", "filecoin-f3-gpbft", @@ -4278,7 +5147,7 @@ dependencies = [ [[package]] name = "filecoin-f3-gpbft" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "ahash 0.8.12", "anyhow", @@ -4301,7 +5170,7 @@ dependencies = [ [[package]] name = "filecoin-f3-lightclient" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "anyhow", "base64 0.22.1", @@ -4317,7 +5186,7 @@ dependencies = [ [[package]] name = "filecoin-f3-merkle" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "anyhow", "sha3", @@ -4326,7 +5195,7 @@ dependencies = [ [[package]] name = "filecoin-f3-rpc" version = "0.1.0" -source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#f838fcd973e6e7f32298363ceb03a8010a1dc1fe" +source = "git+https://github.com/moshababo/rust-f3?branch=cargo-git-compat#40af605984045a9f2b9ba5dcc9c04c984deb8d1f" dependencies = [ "anyhow", "filecoin-f3-gpbft", @@ -4343,9 +5212,9 @@ checksum = "9081144cced0c2b7dc6e7337c2c8c7f4c6ff7ef0bb9c0b75b7f1aaeb1428ebd7" dependencies = [ "anyhow", "bellperson", - "blstrs", + "blstrs 0.7.1", "ff 0.13.1", - "generic-array 0.14.9", + "generic-array 0.14.7", "hex", "lazy_static", "merkletree", @@ -4357,19 +5226,19 @@ dependencies = [ [[package]] name = "filecoin-proofs" -version = "19.0.0" +version = "19.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47ba6651d5fb07c62163c8ddb4e3274e1a4101b91c86b358769a2c561b284fcb" +checksum = "b42def522daf9cffe4c1f0190882aed109477ce429b33a5b21059f5face90635" dependencies = [ "anyhow", "bellperson", "bincode", "blake2b_simd", - "blstrs", + "blstrs 0.7.1", "ff 0.13.1", "filecoin-hashers", "fr32", - "generic-array 0.14.9", + "generic-array 0.14.7", "hex", "iowrap", "lazy_static", @@ -4397,7 +5266,7 @@ checksum = "d50610f79df0975b54461fd65820183b99326fda4f24223d507c1b75cb303b14" dependencies = [ "anyhow", "bincode", - "blstrs", + "blstrs 0.7.1", "filecoin-proofs", "fr32", "lazy_static", @@ -4419,9 +5288,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "fixed-hash" @@ -4443,9 +5312,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "miniz_oxide 0.8.9", @@ -4470,6 +5339,18 @@ dependencies = [ "spin 0.9.8", ] +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "nanorand", + "spin 0.9.8", +] + [[package]] name = "fnv" version = "1.0.7" @@ -4523,7 +5404,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de08b59372f0316e8c7e304aaec13f180ccb33d55ebe02c10034a0826a2bd" dependencies = [ "anyhow", - "blstrs", + "blstrs 0.7.1", "byte-slice-cast", "byteorder", "ff 0.13.1", @@ -4598,14 +5479,14 @@ dependencies = [ "frc42_hasher 8.0.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] name = "fs-err" -version = "3.1.3" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ad492b2cf1d89d568a43508ab24f98501fe03f2f31c01e1d0fe7366a71745d2" +checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" dependencies = [ "autocfg", ] @@ -4651,6 +5532,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin 0.10.0", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -4731,7 +5625,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -4741,7 +5635,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-pki-types", ] @@ -4805,7 +5699,7 @@ dependencies = [ "anyhow", "arbitrary", "cid 0.11.1", - "derive_more 2.0.1", + "derive_more 2.1.1", "filecoin-proofs-api", "fvm-wasm-instrument", "fvm_ipld_amt", @@ -5014,7 +5908,7 @@ dependencies = [ "arbitrary", "bitflags 2.10.0", "blake2b_simd", - "bls-signatures", + "bls-signatures 0.15.0", "cid 0.11.1", "data-encoding", "data-encoding-macro", @@ -5051,11 +5945,57 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "genawaiter" +version = "0.99.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c86bd0361bcbde39b13475e6e36cb24c329964aa2611be285289d1e4b751c1a0" +dependencies = [ + "futures-core", + "genawaiter-macro", + "genawaiter-proc-macro", + "proc-macro-hack", +] + +[[package]] +name = "genawaiter-macro" +version = "0.99.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b32dfe1fdfc0bbde1f22a5da25355514b5e450c33a6af6770884c8750aedfbc" + +[[package]] +name = "genawaiter-proc-macro" +version = "0.99.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784f84eebc366e15251c4a8c3acee82a6a6f427949776ecb88377362a9621738" +dependencies = [ + "proc-macro-error 0.4.12", + "proc-macro-hack", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "generator" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" +dependencies = [ + "cc", + "cfg-if", + "libc", + "log", + "rustversion", + "windows-link 0.2.1", + "windows-result 0.4.1", +] + [[package]] name = "generic-array" -version = "0.14.9" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -5074,9 +6014,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", @@ -5122,7 +6062,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" dependencies = [ "fallible-iterator", - "indexmap 2.12.0", + "indexmap 2.13.0", "stable_deref_trait", ] @@ -5176,7 +6116,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", + "rand 0.8.5", "rand_core 0.6.4", + "rand_xorshift 0.3.0", "subtle", ] @@ -5205,29 +6147,29 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.12.0", + "indexmap 2.13.0", "slab", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tracing", ] [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.3.1", - "indexmap 2.12.0", + "http 1.4.0", + "indexmap 2.13.0", "slab", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tracing", ] @@ -5237,6 +6179,15 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" +[[package]] +name = "hash32" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +dependencies = [ + "byteorder", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -5251,6 +6202,9 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash 0.8.12", +] [[package]] name = "hashbrown" @@ -5266,9 +6220,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "hashers" @@ -5279,6 +6233,15 @@ dependencies = [ "fxhash", ] +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + [[package]] name = "hashlink" version = "0.10.0" @@ -5322,6 +6285,20 @@ dependencies = [ "http 0.2.12", ] +[[package]] +name = "heapless" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" +dependencies = [ + "atomic-polyfill", + "hash32", + "rustc_version 0.4.1", + "serde", + "spin 0.9.8", + "stable_deref_trait", +] + [[package]] name = "heck" version = "0.3.3" @@ -5410,6 +6387,31 @@ dependencies = [ "url", ] +[[package]] +name = "hickory-proto" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.2", + "ring 0.17.14", + "thiserror 2.0.17", + "tinyvec", + "tokio", + "tracing", + "url", +] + [[package]] name = "hickory-resolver" version = "0.24.4" @@ -5418,7 +6420,7 @@ checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.24.4", "ipconfig", "lru-cache", "once_cell", @@ -5431,6 +6433,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-resolver" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.2", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.2", + "resolv-conf", + "smallvec", + "thiserror 2.0.17", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -5466,10 +6489,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array 0.14.9", + "generic-array 0.14.7", "hmac 0.8.1", ] +[[package]] +name = "hmac-sha1" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b05da5b9e5d4720bfb691eebb2b9d42da3570745da71eac8a1f5bb7e59aab88" +dependencies = [ + "hmac 0.12.1", + "sha1", +] + +[[package]] +name = "hmac-sha256" +version = "1.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad6880c8d4a9ebf39c6e8b77007ce223f646a4d21ce29d99f70cb16420545425" + [[package]] name = "home" version = "0.5.12" @@ -5479,6 +6518,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "hostname-validator" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" + [[package]] name = "http" version = "0.2.12" @@ -5492,12 +6537,11 @@ dependencies = [ [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -5519,7 +6563,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.3.1", + "http 1.4.0", ] [[package]] @@ -5530,7 +6574,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "pin-project-lite", ] @@ -5604,18 +6648,19 @@ dependencies = [ [[package]] name = "hyper" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ "atomic-waker", "bytes", "futures-channel", "futures-core", - "h2 0.4.12", - "http 1.3.1", + "h2 0.4.13", + "http 1.4.0", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "pin-utils", @@ -5681,16 +6726,16 @@ version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http 1.3.1", - "hyper 1.7.0", + "http 1.4.0", + "hyper 1.8.1", "hyper-util", "log", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-pki-types", "tokio", "tokio-rustls 0.26.4", "tower-service", - "webpki-roots 1.0.4", + "webpki-roots 1.0.5", ] [[package]] @@ -5714,7 +6759,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-util", "native-tls", "tokio", @@ -5724,18 +6769,18 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", - "hyper 1.7.0", + "hyper 1.8.1", "ipnet", "libc", "percent-encoding", @@ -5833,9 +6878,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -5847,9 +6892,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -5917,14 +6962,14 @@ dependencies = [ "ipnet", "log", "netlink-packet-core", - "netlink-packet-route", + "netlink-packet-route 0.17.1", "netlink-proto", "netlink-sys", "rtnetlink", "smol", "system-configuration 0.6.1", "tokio", - "windows", + "windows 0.53.0", ] [[package]] @@ -5934,7 +6979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", - "attohttpc", + "attohttpc 0.24.1", "bytes", "futures", "http 0.2.12", @@ -5946,6 +6991,27 @@ dependencies = [ "xmltree", ] +[[package]] +name = "igd-next" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516893339c97f6011282d5825ac94fc1c7aad5cad26bdc2d0cee068c0bf97f97" +dependencies = [ + "async-trait", + "attohttpc 0.30.1", + "bytes", + "futures", + "http 1.4.0", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "log", + "rand 0.9.2", + "tokio", + "url", + "xmltree", +] + [[package]] name = "ignore" version = "0.4.25" @@ -6011,7 +7077,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -6052,12 +7118,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "serde", "serde_core", ] @@ -6078,7 +7144,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ "block-padding", - "generic-array 0.14.9", + "generic-array 0.14.7", +] + +[[package]] +name = "inplace-vec-builder" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf64c2edc8226891a71f127587a2861b132d2b942310843814d5001d99a1d307" +dependencies = [ + "smallvec", ] [[package]] @@ -6088,6 +7163,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", ] [[package]] @@ -6164,7 +7242,7 @@ dependencies = [ "bytes", "chrono", "cid 0.11.1", - "clap 4.5.51", + "clap 4.5.54", "clap_complete", "contracts-artifacts", "env_logger 0.10.2", @@ -6219,17 +7297,65 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-tungstenite 0.18.0", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "toml 0.7.8", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "url", "urlencoding", - "uuid 1.18.1", + "uuid 1.19.0", "warp", "zeroize", ] +[[package]] +name = "ipc-decentralized-storage" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "blake2b_simd", + "bls-signatures 0.13.1", + "bytes", + "clap 4.5.54", + "ethers", + "fendermint_actor_blobs_shared", + "fendermint_actor_bucket", + "fendermint_crypto", + "fendermint_rpc", + "fendermint_vm_actor_interface", + "fendermint_vm_message", + "futures", + "futures-util", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "hex", + "ipc-api", + "iroh", + "iroh-base", + "iroh-blobs", + "iroh_manager", + "lazy_static", + "mime_guess", + "prometheus", + "prometheus_exporter", + "rand 0.8.5", + "recall_entangler", + "recall_entangler_storage", + "reqwest 0.11.27", + "serde", + "serde_json", + "tempfile", + "tendermint-rpc", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-subscriber 0.3.22", + "urlencoding", + "uuid 1.19.0", + "warp", +] + [[package]] name = "ipc-observability" version = "0.1.0" @@ -6243,7 +7369,7 @@ dependencies = [ "strum 0.26.3", "tracing", "tracing-appender", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -6339,7 +7465,7 @@ dependencies = [ "argon2", "base64 0.21.7", "blake2b_simd", - "bls-signatures", + "bls-signatures 0.15.0", "ethers", "fs-err", "fvm_shared", @@ -6375,7 +7501,7 @@ dependencies = [ "fvm_shared", "lazy_static", "prettyplease", - "syn 2.0.108", + "syn 2.0.114", "thiserror 1.0.69", "tracing", ] @@ -6420,42 +7546,389 @@ dependencies = [ ] [[package]] -name = "ipconfig" -version = "0.3.2" +name = "ipc_storage_actor_sdk" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actor_adm", + "fil_actors_runtime", + "fvm_ipld_encoding 0.5.3", + "fvm_sdk", + "fvm_shared", + "ipc_storage_sol_facade", + "num-traits", + "serde", +] + +[[package]] +name = "ipc_storage_ipld" +version = "0.1.0" +dependencies = [ + "anyhow", + "cid 0.11.1", + "fil_actors_runtime", + "fvm_ipld_amt", + "fvm_ipld_blockstore 0.3.1", + "fvm_ipld_encoding 0.5.3", + "fvm_ipld_hamt", + "fvm_sdk", + "fvm_shared", + "integer-encoding 3.0.4", + "serde", +] + +[[package]] +name = "ipc_storage_sol_facade" +version = "0.1.2" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "anyhow", + "dunce", + "eyre", + "fvm_ipld_encoding 0.5.3", + "fvm_shared", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "serde", + "serde_json", + "syn 2.0.114", + "thiserror 2.0.17", + "walkdir", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.10", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipld-core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "104718b1cc124d92a6d01ca9c9258a7df311405debb3408c445a36452f9bf8db" +dependencies = [ + "cid 0.11.1", + "serde", + "serde_bytes", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "iroh" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ca758f4ce39ae3f07de922be6c73de6a48a07f39554e78b5745585652ce38f5" +dependencies = [ + "aead", + "anyhow", + "atomic-waker", + "backon", + "bytes", + "cfg_aliases", + "concurrent-queue", + "crypto_box", + "data-encoding", + "der 0.7.10", + "derive_more 1.0.0", + "ed25519-dalek", + "futures-buffered", + "futures-util", + "getrandom 0.3.4", + "hickory-resolver 0.25.2", + "http 1.4.0", + "igd-next 0.16.2", + "instant", + "iroh-base", + "iroh-metrics", + "iroh-quinn", + "iroh-quinn-proto", + "iroh-quinn-udp", + "iroh-relay", + "n0-future", + "netdev", + "netwatch", + "pin-project", + "pkarr", + "portmapper", + "rand 0.8.5", + "rcgen 0.13.2", + "reqwest 0.12.28", + "ring 0.17.14", + "rustls 0.23.36", + "rustls-webpki 0.102.8", + "serde", + "smallvec", + "spki 0.7.3", + "strum 0.26.3", + "stun-rs", + "surge-ping", + "thiserror 2.0.17", + "time", + "tokio", + "tokio-stream", + "tokio-util 0.7.18", + "tracing", + "url", + "wasm-bindgen-futures", + "webpki-roots 0.26.11", + "x509-parser", + "z32", +] + +[[package]] +name = "iroh-base" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91ac4aaab68153d726c4e6b39c30f9f9253743f0e25664e52f4caeb46f48d11" +dependencies = [ + "curve25519-dalek", + "data-encoding", + "derive_more 1.0.0", + "ed25519-dalek", + "postcard", + "rand_core 0.6.4", + "serde", + "thiserror 2.0.17", + "url", +] + +[[package]] +name = "iroh-blobs" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "817b785193b73c34ef1f2dcb5ddf8729ecef9b72a8fc0e706ee6d7a9bf8766a6" +dependencies = [ + "anyhow", + "async-channel 2.5.0", + "bao-tree", + "blake3", + "bytes", + "chrono", + "data-encoding", + "derive_more 1.0.0", + "futures-buffered", + "futures-lite 2.6.1", + "futures-util", + "genawaiter", + "hashlink 0.9.1", + "hex", + "iroh", + "iroh-base", + "iroh-io", + "iroh-metrics", + "nested_enum_utils 0.1.0", + "num_cpus", + "oneshot", + "parking_lot", + "portable-atomic", + "postcard", + "quic-rpc", + "quic-rpc-derive", + "rand 0.8.5", + "range-collections", + "redb", + "reflink-copy", + "self_cell", + "serde", + "serde-error", + "smallvec", + "ssh-key", + "strum 0.26.3", + "tempfile", + "thiserror 2.0.17", + "tokio", + "tokio-util 0.7.18", + "tracing", + "tracing-futures", + "tracing-test", + "walkdir", +] + +[[package]] +name = "iroh-io" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a5feb781017b983ff1b155cd1faf8174da2acafd807aa482876da2d7e6577a" +dependencies = [ + "bytes", + "futures-lite 2.6.1", + "pin-project", + "smallvec", + "tokio", +] + +[[package]] +name = "iroh-metrics" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f70466f14caff7420a14373676947e25e2917af6a5b1bec45825beb2bf1eb6a7" +dependencies = [ + "iroh-metrics-derive", + "itoa", + "serde", + "snafu", + "tracing", +] + +[[package]] +name = "iroh-metrics-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d12f5c45c4ed2436302a4e03cad9a0ad34b2962ad0c5791e1019c0ee30eeb09" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "iroh-quinn" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +checksum = "76c6245c9ed906506ab9185e8d7f64857129aee4f935e899f398a3bd3b70338d" dependencies = [ + "bytes", + "cfg_aliases", + "iroh-quinn-proto", + "iroh-quinn-udp", + "pin-project-lite", + "rustc-hash 2.1.1", + "rustls 0.23.36", "socket2 0.5.10", - "widestring", - "windows-sys 0.48.0", - "winreg", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", ] [[package]] -name = "ipld-core" -version = "0.4.2" +name = "iroh-quinn-proto" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "104718b1cc124d92a6d01ca9c9258a7df311405debb3408c445a36452f9bf8db" +checksum = "929d5d8fa77d5c304d3ee7cae9aede31f13908bd049f9de8c7c0094ad6f7c535" dependencies = [ - "cid 0.11.1", - "serde", - "serde_bytes", + "bytes", + "getrandom 0.2.17", + "rand 0.8.5", + "ring 0.17.14", + "rustc-hash 2.1.1", + "rustls 0.23.36", + "rustls-pki-types", + "rustls-platform-verifier", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", ] [[package]] -name = "ipnet" -version = "2.11.0" +name = "iroh-quinn-udp" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "c53afaa1049f7c83ea1331f5ebb9e6ebc5fdd69c468b7a22dd598b02c9bcc973" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] [[package]] -name = "iri-string" -version = "0.7.8" +name = "iroh-relay" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +checksum = "c63f122cdfaa4b4e0e7d6d3921d2b878f42a0c6d3ee5a29456dc3f5ab5ec931f" dependencies = [ - "memchr", + "anyhow", + "bytes", + "cfg_aliases", + "data-encoding", + "derive_more 1.0.0", + "getrandom 0.3.4", + "hickory-resolver 0.25.2", + "http 1.4.0", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "iroh-base", + "iroh-metrics", + "iroh-quinn", + "iroh-quinn-proto", + "lru 0.12.5", + "n0-future", + "num_enum", + "pin-project", + "pkarr", + "postcard", + "rand 0.8.5", + "reqwest 0.12.28", + "rustls 0.23.36", + "rustls-webpki 0.102.8", "serde", + "sha1", + "strum 0.26.3", + "stun-rs", + "thiserror 2.0.17", + "tokio", + "tokio-rustls 0.26.4", + "tokio-util 0.7.18", + "tokio-websockets", + "tracing", + "url", + "webpki-roots 0.26.11", + "ws_stream_wasm", + "z32", +] + +[[package]] +name = "iroh_manager" +version = "0.1.0" +dependencies = [ + "anyhow", + "iroh", + "iroh-blobs", + "iroh-quinn", + "iroh-relay", + "n0-future", + "num-traits", + "quic-rpc", + "tempfile", + "tokio", + "tracing", + "tracing-subscriber 0.3.22", + "url", ] [[package]] @@ -6540,9 +8013,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jni" @@ -6578,9 +8051,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -6632,17 +8105,17 @@ checksum = "cf36eb27f8e13fa93dcb50ccb44c417e25b818cfa1a481b5470cd07b19c60b98" dependencies = [ "base64 0.22.1", "futures-util", - "http 1.3.1", + "http 1.4.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-pki-types", "rustls-platform-verifier", "soketto", "thiserror 2.0.17", "tokio", "tokio-rustls 0.26.4", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tracing", "url", ] @@ -6657,7 +8130,7 @@ dependencies = [ "bytes", "futures-timer", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "http-body-util", "jsonrpsee-types", @@ -6668,7 +8141,7 @@ dependencies = [ "thiserror 2.0.17", "tokio", "tokio-stream", - "tower 0.5.2", + "tower 0.5.3", "tracing", ] @@ -6680,18 +8153,18 @@ checksum = "790bedefcec85321e007ff3af84b4e417540d5c87b3c9779b9e247d1bcc3dab8" dependencies = [ "base64 0.22.1", "http-body 1.0.1", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-rustls 0.27.7", "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-platform-verifier", "serde", "serde_json", "thiserror 2.0.17", "tokio", - "tower 0.5.2", + "tower 0.5.3", "url", ] @@ -6701,7 +8174,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc88ff4688e43cc3fa9883a8a95c6fa27aa2e76c96e610b737b6554d650d7fd5" dependencies = [ - "http 1.3.1", + "http 1.4.0", "serde", "serde_json", "thiserror 2.0.17", @@ -6713,11 +8186,11 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6fceceeb05301cc4c065ab3bd2fa990d41ff4eb44e4ca1b30fa99c057c3e79" dependencies = [ - "http 1.3.1", + "http 1.4.0", "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", - "tower 0.5.2", + "tower 0.5.3", "url", ] @@ -6780,6 +8253,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "keccak-hash" version = "0.11.0" @@ -6806,7 +8289,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" dependencies = [ "ascii-canvas", - "bit-set", + "bit-set 0.5.3", "ena", "itertools 0.11.0", "lalrpop-util", @@ -6858,9 +8341,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libipld" @@ -6937,7 +8420,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.16", + "getrandom 0.2.17", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -6994,7 +8477,7 @@ dependencies = [ "prost-build", "thiserror 1.0.69", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "unsigned-varint 0.7.2", ] @@ -7048,7 +8531,7 @@ dependencies = [ "async-std-resolver", "async-trait", "futures", - "hickory-resolver", + "hickory-resolver 0.24.4", "libp2p-core", "libp2p-identity", "parking_lot", @@ -7070,7 +8553,7 @@ dependencies = [ "fnv", "futures", "futures-ticker", - "getrandom 0.2.16", + "getrandom 0.2.17", "hex_fmt", "instant", "libp2p-core", @@ -7102,7 +8585,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "lru", + "lru 0.12.5", "quick-protobuf", "quick-protobuf-codec 0.3.1", "smallvec", @@ -7113,9 +8596,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" +checksum = "f0c7892c221730ba55f7196e98b0b8ba5e04b4155651736036628e9f73ed6fc3" dependencies = [ "asn1_der", "bs58", @@ -7173,7 +8656,7 @@ dependencies = [ "async-std", "data-encoding", "futures", - "hickory-proto", + "hickory-proto 0.24.4", "if-watch", "libp2p-core", "libp2p-identity", @@ -7302,7 +8785,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring 0.17.14", - "rustls 0.23.34", + "rustls 0.23.36", "socket2 0.5.10", "thiserror 1.0.69", "tokio", @@ -7344,7 +8827,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru", + "lru 0.12.5", "multistream-select", "once_cell", "rand 0.8.5", @@ -7363,7 +8846,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -7394,9 +8877,9 @@ dependencies = [ "futures-rustls", "libp2p-core", "libp2p-identity", - "rcgen", + "rcgen 0.11.3", "ring 0.17.14", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser", @@ -7411,7 +8894,7 @@ checksum = "cccf04b0e3ff3de52d07d5fd6c3b061d0e7f908ffc683c32d9638caedce86fc8" dependencies = [ "futures", "futures-timer", - "igd-next", + "igd-next 0.14.3", "libp2p-core", "libp2p-swarm", "tokio", @@ -7436,13 +8919,13 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags 2.10.0", "libc", - "redox_syscall", + "redox_syscall 0.7.0", ] [[package]] @@ -7511,9 +8994,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -7556,6 +9039,12 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d2be3f5a0d4d5c983d1f8ecc2a87676a0875a14feb9eebf0675f7c3e2f3c35" +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + [[package]] name = "lock_api" version = "0.4.14" @@ -7567,13 +9056,26 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" dependencies = [ "value-bag", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber 0.3.22", +] + [[package]] name = "lru" version = "0.12.5" @@ -7583,6 +9085,12 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "lru" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" + [[package]] name = "lru-cache" version = "0.1.2" @@ -7623,6 +9131,17 @@ dependencies = [ "libc", ] +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "match-lookup" version = "0.1.1" @@ -7669,6 +9188,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + [[package]] name = "memchr" version = "2.7.6" @@ -7681,7 +9206,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad38eb12aea514a0466ea40a80fd8cc83637065948eb4a426e4aa46261175227" dependencies = [ - "rustix 1.1.2", + "rustix 1.1.3", ] [[package]] @@ -7775,15 +9300,32 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", "windows-sys 0.61.2", ] +[[package]] +name = "moka" +version = "0.12.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3dec6bd31b08944e08b58fd99373893a6c17054d6f3ea5006cc894f4f4eee2a" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "equivalent", + "parking_lot", + "portable-atomic", + "smallvec", + "tagptr", + "uuid 1.19.0", +] + [[package]] name = "multer" version = "2.1.0" @@ -7840,7 +9382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" dependencies = [ "blake2b_simd", - "blake2s_simd 1.0.3", + "blake2s_simd 1.0.4", "blake3", "core2", "digest 0.10.7", @@ -7873,7 +9415,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67996849749d25f1da9f238e8ace2ece8f9d6bdf3f9750aaf2ae7de3a5cad8ea" dependencies = [ "blake2b_simd", - "blake2s_simd 1.0.3", + "blake2s_simd 1.0.4", "blake3", "core2", "digest 0.10.7", @@ -7892,7 +9434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ "proc-macro-crate 1.1.3", - "proc-macro-error", + "proc-macro-error 1.0.4", "proc-macro2", "quote", "syn 1.0.109", @@ -7919,7 +9461,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", "synstructure 0.13.2", ] @@ -7943,6 +9485,27 @@ dependencies = [ "unsigned-varint 0.7.2", ] +[[package]] +name = "n0-future" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bb0e5d99e681ab3c938842b96fcb41bf8a7bb4bfdb11ccbd653a7e83e06c794" +dependencies = [ + "cfg_aliases", + "derive_more 1.0.0", + "futures-buffered", + "futures-lite 2.6.1", + "futures-util", + "js-sys", + "pin-project", + "send_wrapper 0.6.0", + "tokio", + "tokio-util 0.7.18", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-time", +] + [[package]] name = "nalgebra" version = "0.33.2" @@ -7960,6 +9523,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "nanorand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" +dependencies = [ + "getrandom 0.2.17", +] + [[package]] name = "native-tls" version = "0.2.14" @@ -7969,7 +9541,7 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "schannel", "security-framework 2.11.1", @@ -7986,10 +9558,10 @@ dependencies = [ "bellpepper", "bellpepper-core", "blake2s_simd 0.5.11", - "blstrs", + "blstrs 0.7.1", "byteorder", "ff 0.13.1", - "generic-array 0.14.9", + "generic-array 0.14.7", "itertools 0.8.2", "log", "pasta_curves", @@ -7997,6 +9569,47 @@ dependencies = [ "trait-set", ] +[[package]] +name = "nested_enum_utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f256ef99e7ac37428ef98c89bef9d84b590172de4bbfbe81b68a4cd3abadb32" +dependencies = [ + "proc-macro-crate 3.4.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "nested_enum_utils" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d5475271bdd36a4a2769eac1ef88df0f99428ea43e52dfd8b0ee5cb674695f" +dependencies = [ + "proc-macro-crate 3.4.0", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "netdev" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f901362e84cd407be6f8cd9d3a46bccf09136b095792785401ea7d283c79b91d" +dependencies = [ + "dlopen2", + "ipnet", + "libc", + "netlink-packet-core", + "netlink-packet-route 0.17.1", + "netlink-sys", + "once_cell", + "system-configuration 0.6.1", + "windows-sys 0.52.0", +] + [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -8022,6 +9635,21 @@ dependencies = [ "netlink-packet-utils", ] +[[package]] +name = "netlink-packet-route" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0800eae8638a299eaa67476e1c6b6692922273e0f7939fd188fc861c837b9cd2" +dependencies = [ + "anyhow", + "bitflags 2.10.0", + "byteorder", + "libc", + "log", + "netlink-packet-core", + "netlink-packet-utils", +] + [[package]] name = "netlink-packet-utils" version = "0.5.2" @@ -8062,6 +9690,37 @@ dependencies = [ "tokio", ] +[[package]] +name = "netwatch" +version = "0.5.0" +dependencies = [ + "atomic-waker", + "bytes", + "cfg_aliases", + "derive_more 1.0.0", + "iroh-quinn-udp", + "js-sys", + "libc", + "n0-future", + "nested_enum_utils 0.2.3", + "netdev", + "netlink-packet-core", + "netlink-packet-route 0.23.0", + "netlink-proto", + "netlink-sys", + "serde", + "snafu", + "socket2 0.5.10", + "time", + "tokio", + "tokio-util 0.7.18", + "tracing", + "web-sys", + "windows 0.59.0", + "windows-result 0.3.4", + "wmi", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -8079,6 +9738,12 @@ dependencies = [ "libc", ] +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + [[package]] name = "nohash-hasher" version = "0.2.0" @@ -8095,6 +9760,21 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "ntimestamp" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c50f94c405726d3e0095e89e72f75ce7f6587b94a8bd8dc8054b73f65c0fd68c" +dependencies = [ + "base32", + "document-features", + "getrandom 0.2.17", + "httpdate", + "js-sys", + "once_cell", + "serde", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -8131,6 +9811,22 @@ dependencies = [ "serde", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" +dependencies = [ + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand 0.8.5", + "smallvec", + "zeroize", +] + [[package]] name = "num-complex" version = "0.4.6" @@ -8166,7 +9862,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -8240,7 +9936,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -8260,7 +9956,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.5", - "indexmap 2.12.0", + "indexmap 2.13.0", "memchr", ] @@ -8278,6 +9974,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "once_cell_polyfill" @@ -8285,6 +9985,12 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" +[[package]] +name = "oneshot" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce411919553d3f9fa53a0880544cda985a112117a0444d5ff1e870a893d6ea" + [[package]] name = "opaque-debug" version = "0.3.1" @@ -8318,9 +10024,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.74" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ "bitflags 2.10.0", "cfg-if", @@ -8339,7 +10045,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -8348,6 +10054,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" + [[package]] name = "openssl-src" version = "300.5.4+3.5.4" @@ -8359,9 +10071,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.110" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -8398,6 +10110,53 @@ version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", + "sha2 0.10.9", +] + +[[package]] +name = "p384" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +dependencies = [ + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", + "sha2 0.10.9", +] + +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct 0.2.0", + "ecdsa 0.16.9", + "elliptic-curve 0.13.8", + "primeorder", + "rand_core 0.6.4", + "sha2 0.10.9", +] + +[[package]] +name = "pairing" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135590d8bdba2b31346f9cd1fb2a912329f5135e832a4f422942eb6ead8b6b3b" +dependencies = [ + "group 0.12.1", +] + [[package]] name = "pairing" version = "0.23.0" @@ -8432,7 +10191,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -8459,7 +10218,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link 0.2.1", ] @@ -8595,6 +10354,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -8603,9 +10371,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.3" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" +checksum = "2c9eb05c21a464ea704b53158d358a31e6425db2f63a1a7312268b05fe2b75f7" dependencies = [ "memchr", "ucd-trie", @@ -8613,9 +10381,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.8.3" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187da9a3030dbafabbbfb20cb323b976dc7b7ce91fcd84f2f74d6e31d378e2de" +checksum = "68f9dbced329c441fa79d80472764b1a2c7e57123553b8519b36663a2fb234ed" dependencies = [ "pest", "pest_generator", @@ -8623,22 +10391,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.8.3" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b401d98f5757ebe97a26085998d6c0eecec4995cad6ab7fc30ffdf4b052843" +checksum = "3bb96d5051a78f44f43c8f712d8e810adb0ebf923fc9ed2655a7f66f63ba8ee5" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] name = "pest_meta" -version = "2.8.3" +version = "2.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f27a2cfee9f9039c4d86faa5af122a0ac3851441a34865b8a043b46be0065a" +checksum = "602113b5b5e8621770cfd490cfd90b9f84ab29bd2b0e49ad83eb6d186cef2365" dependencies = [ "pest", "sha2 0.10.9", @@ -8651,7 +10419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.12.0", + "indexmap 2.13.0", ] [[package]] @@ -8661,7 +10429,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" dependencies = [ "futures", - "rustc_version", + "rustc_version 0.4.1", ] [[package]] @@ -8694,7 +10462,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -8723,7 +10491,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -8749,6 +10517,48 @@ dependencies = [ "futures-io", ] +[[package]] +name = "pkarr" +version = "3.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eb1f2f4311bae1da11f930c804c724c9914cf55ae51a9ee0440fc98826984f7" +dependencies = [ + "async-compat", + "base32", + "bytes", + "cfg_aliases", + "document-features", + "dyn-clone", + "ed25519-dalek", + "futures-buffered", + "futures-lite 2.6.1", + "getrandom 0.2.17", + "log", + "lru 0.13.0", + "ntimestamp", + "reqwest 0.12.28", + "self_cell", + "serde", + "sha1_smol", + "simple-dns", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der 0.7.10", + "pkcs8 0.10.2", + "spki 0.7.3", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -8775,6 +10585,48 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "pnet_base" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe4cf6fb3ab38b68d01ab2aea03ed3d1132b4868fa4e06285f29f16da01c5f4c" +dependencies = [ + "no-std-net", +] + +[[package]] +name = "pnet_macros" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688b17499eee04a0408aca0aa5cba5fc86401d7216de8a63fdf7a4c227871804" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.114", +] + +[[package]] +name = "pnet_macros_support" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eea925b72f4bd37f8eab0f221bbe4c78b63498350c983ffa9dd4bcde7e030f56" +dependencies = [ + "pnet_base", +] + +[[package]] +name = "pnet_packet" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9a005825396b7fe7a38a8e288dbc342d5034dac80c15212436424fef8ea90ba" +dependencies = [ + "glob", + "pnet_base", + "pnet_macros", + "pnet_macros_support", +] + [[package]] name = "polling" version = "2.8.0" @@ -8801,7 +10653,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.5.2", "pin-project-lite", - "rustix 1.1.2", + "rustix 1.1.3", "windows-sys 0.61.2", ] @@ -8828,6 +10680,43 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "portable-atomic" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" + +[[package]] +name = "portmapper" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d6db66007eac4a0ec8331d0d20c734bd64f6445d64bbaf0d0a27fea7a054e36" +dependencies = [ + "base64 0.22.1", + "bytes", + "derive_more 1.0.0", + "futures-lite 2.6.1", + "futures-util", + "hyper-util", + "igd-next 0.16.2", + "iroh-metrics", + "libc", + "nested_enum_utils 0.2.3", + "netwatch", + "num_enum", + "rand 0.8.5", + "serde", + "smallvec", + "snafu", + "socket2 0.5.10", + "time", + "tokio", + "tokio-util 0.7.18", + "tower-layer", + "tracing", + "url", +] + [[package]] name = "positioned-io" version = "0.3.5" @@ -8848,9 +10737,22 @@ dependencies = [ "cobs", "embedded-io 0.4.0", "embedded-io 0.6.1", + "heapless", + "postcard-derive", "serde", ] +[[package]] +name = "postcard-derive" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0232bd009a197ceec9cc881ba46f727fcd8060a2d8d6a9dde7a69030a6fe2bb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "potential_utf" version = "0.1.4" @@ -8875,6 +10777,40 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "precis-core" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c2e7b31f132e0c6f8682cfb7bf4a5340dbe925b7986618d0826a56dfe0c8e56" +dependencies = [ + "precis-tools", + "ucd-parse", + "unicode-normalization", +] + +[[package]] +name = "precis-profiles" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e2768890a47af73a032af9f0cedbddce3c9d06cf8de201d5b8f2436ded7674" +dependencies = [ + "lazy_static", + "precis-core", + "precis-tools", + "unicode-normalization", +] + +[[package]] +name = "precis-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cc1eb2d5887ac7bfd2c0b745764db89edb84b856e4214e204ef48ef96d10c4a" +dependencies = [ + "lazy_static", + "regex", + "ucd-parse", +] + [[package]] name = "precomputed-hash" version = "0.1.1" @@ -8908,7 +10844,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.108", + "syn 2.0.114", +] + +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve 0.13.8", ] [[package]] @@ -8951,7 +10896,20 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.10+spec-1.0.0", +] + +[[package]] +name = "proc-macro-error" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7" +dependencies = [ + "proc-macro-error-attr 0.4.12", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", ] [[package]] @@ -8960,10 +10918,23 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ - "proc-macro-error-attr", + "proc-macro-error-attr 1.0.4", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de" +dependencies = [ "proc-macro2", "quote", "syn 1.0.109", + "syn-mid", "version_check", ] @@ -8978,11 +10949,39 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.114", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.20+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" + [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -9047,7 +11046,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -9093,7 +11092,7 @@ dependencies = [ "tiny-keccak", "tokio", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "url", ] @@ -9103,12 +11102,16 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ + "bit-set 0.8.0", + "bit-vec 0.8.0", "bitflags 2.10.0", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", "rand_xorshift 0.4.0", "regex-syntax", + "rusty-fork", + "tempfile", "unarray", ] @@ -9224,6 +11227,52 @@ dependencies = [ "wasmtime-math", ] +[[package]] +name = "quic-rpc" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18bad98bd048264ceb1361ff9d77a031535d8c1e3fe8f12c6966ec825bf68eb7" +dependencies = [ + "anyhow", + "bytes", + "document-features", + "flume 0.11.1", + "futures-lite 2.6.1", + "futures-sink", + "futures-util", + "iroh-quinn", + "pin-project", + "postcard", + "rcgen 0.13.2", + "rustls 0.23.36", + "serde", + "slab", + "smallvec", + "time", + "tokio", + "tokio-serde", + "tokio-util 0.7.18", + "tracing", +] + +[[package]] +name = "quic-rpc-derive" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf13f1bced5f2f2642d9d89a29d75f2d81ab34c4acfcb434c209d6094b9b2b7" +dependencies = [ + "proc-macro2", + "quic-rpc", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -9288,7 +11337,7 @@ checksum = "f71ee38b42f8459a88d3362be6f9b841ad2d5421844f61eb1c59c11bff3ac14a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -9306,7 +11355,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.34", + "rustls 0.23.36", "socket2 0.6.1", "thiserror 2.0.17", "tokio", @@ -9326,7 +11375,7 @@ dependencies = [ "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-pki-types", "slab", "thiserror 2.0.17", @@ -9351,13 +11400,23 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.41" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] +[[package]] +name = "quoted-string-parser" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc75379cdb451d001f1cb667a9f74e8b355e9df84cc5193513cbe62b96fc5e9" +dependencies = [ + "pest", + "pest_derive", +] + [[package]] name = "r-efi" version = "5.3.0" @@ -9379,6 +11438,7 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] @@ -9388,7 +11448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -9408,7 +11468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -9417,14 +11477,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -9454,16 +11514,28 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" dependencies = [ - "rand_core 0.9.3", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core 0.6.4", ] [[package]] -name = "rand_xoshiro" -version = "0.6.0" +name = "range-collections" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +checksum = "861706ea9c4aded7584c5cd1d241cec2ea7f5f50999f236c22b65409a1f1a0d0" dependencies = [ - "rand_core 0.6.4", + "binary-merge", + "inplace-vec-builder", + "ref-cast", + "smallvec", ] [[package]] @@ -9504,6 +11576,68 @@ dependencies = [ "yasna", ] +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem 3.0.6", + "ring 0.17.14", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "recall_entangler" +version = "0.1.0" +source = "git+https://github.com/recallnet/entanglement.git?rev=aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc#aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" +dependencies = [ + "anyhow", + "async-trait", + "bytes", + "cid 0.10.1", + "futures", + "iroh", + "iroh-blobs", + "recall_entangler_storage", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tokio-stream", +] + +[[package]] +name = "recall_entangler_storage" +version = "0.1.0" +source = "git+https://github.com/recallnet/entanglement.git?rev=aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc#aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" +dependencies = [ + "anyhow", + "async-trait", + "bytes", + "cid 0.10.1", + "futures", + "futures-lite 2.6.1", + "iroh", + "iroh-blobs", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "uuid 1.19.0", +] + +[[package]] +name = "redb" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" +dependencies = [ + "libc", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -9513,13 +11647,22 @@ dependencies = [ "bitflags 2.10.0", ] +[[package]] +name = "redox_syscall" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" +dependencies = [ + "bitflags 2.10.0", +] + [[package]] name = "redox_users" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "libredox", "thiserror 1.0.69", ] @@ -9541,7 +11684,19 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", +] + +[[package]] +name = "reflink-copy" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23bbed272e39c47a095a5242218a67412a220006842558b03fe2935e8f3d7b92" +dependencies = [ + "cfg-if", + "libc", + "rustix 1.1.3", + "windows 0.62.2", ] [[package]] @@ -9581,6 +11736,12 @@ dependencies = [ "regex-syntax", ] +[[package]] +name = "regex-lite" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" + [[package]] name = "regex-syntax" version = "0.8.8" @@ -9650,20 +11811,20 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.4.12", - "http 1.3.1", + "h2 0.4.13", + "http 1.4.0", "http-body 1.0.1", "http-body-util", - "hyper 1.7.0", + "hyper 1.8.1", "hyper-rustls 0.27.7", "hyper-tls 0.6.0", "hyper-util", @@ -9674,7 +11835,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.34", + "rustls 0.23.36", "rustls-pki-types", "serde", "serde_json", @@ -9683,23 +11844,23 @@ dependencies = [ "tokio", "tokio-native-tls", "tokio-rustls 0.26.4", - "tokio-util 0.7.17", - "tower 0.5.2", - "tower-http 0.6.6", + "tokio-util 0.7.18", + "tower 0.5.3", + "tower-http 0.6.8", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.4", + "webpki-roots 1.0.5", ] [[package]] name = "resolv-conf" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "rfc6979" @@ -9745,7 +11906,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted 0.9.0", "windows-sys 0.52.0", @@ -9813,6 +11974,27 @@ dependencies = [ "serde", ] +[[package]] +name = "rsa" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8 0.10.2", + "rand_core 0.6.4", + "sha2 0.10.9", + "signature 2.2.0", + "spki 0.7.3", + "subtle", + "zeroize", +] + [[package]] name = "rtnetlink" version = "0.13.1" @@ -9823,7 +12005,7 @@ dependencies = [ "futures", "log", "netlink-packet-core", - "netlink-packet-route", + "netlink-packet-route 0.17.1", "netlink-packet-utils", "netlink-proto", "netlink-sys", @@ -9832,6 +12014,40 @@ dependencies = [ "tokio", ] +[[package]] +name = "ruint" +version = "1.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c141e807189ad38a07276942c6623032d3753c8859c146104ac2e4d68865945a" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "ark-ff 0.5.0", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types 0.12.2", + "proptest", + "rand 0.8.5", + "rand 0.9.2", + "rlp 0.5.2", + "ruint-macro", + "serde_core", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + [[package]] name = "rust-embed" version = "6.8.1" @@ -9852,7 +12068,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.108", + "syn 2.0.114", "walkdir", ] @@ -9900,13 +12116,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver", + "semver 1.0.27", ] [[package]] @@ -9947,9 +12172,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags 2.10.0", "errno", @@ -9997,9 +12222,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.34" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "log", "once_cell", @@ -10016,7 +12241,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ - "openssl-probe", + "openssl-probe 0.1.6", "rustls 0.19.1", "schannel", "security-framework 2.11.1", @@ -10028,7 +12253,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ - "openssl-probe", + "openssl-probe 0.1.6", "rustls-pemfile", "schannel", "security-framework 2.11.1", @@ -10036,11 +12261,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.0", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -10057,9 +12282,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "web-time", "zeroize", @@ -10076,8 +12301,8 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.34", - "rustls-native-certs 0.8.2", + "rustls 0.23.36", + "rustls-native-certs 0.8.3", "rustls-platform-verifier-android", "rustls-webpki 0.103.8", "security-framework 3.5.1", @@ -10102,6 +12327,17 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring 0.17.14", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustls-webpki" version = "0.103.8" @@ -10119,6 +12355,18 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" +[[package]] +name = "rusty-fork" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "rw-stream-sink" version = "0.4.0" @@ -10132,9 +12380,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "safe_arch" @@ -10184,7 +12432,7 @@ dependencies = [ "proc-macro-crate 3.4.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -10219,9 +12467,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.0.5" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1317c3bf3e7df961da95b0a56a172a02abead31276215a0497241a7624b487ce" +checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2" dependencies = [ "dyn-clone", "ref-cast", @@ -10287,7 +12535,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct 0.1.1", "der 0.6.1", - "generic-array 0.14.9", + "generic-array 0.14.7", "pkcs8 0.9.0", "subtle", "zeroize", @@ -10301,7 +12549,7 @@ checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", "der 0.7.10", - "generic-array 0.14.9", + "generic-array 0.14.7", "pkcs8 0.10.2", "subtle", "zeroize", @@ -10343,6 +12591,21 @@ dependencies = [ "libc", ] +[[package]] +name = "self_cell" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b12e76d157a900eb52e81bc6e9f3069344290341720e9178cde2407113ac8d89" + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + [[package]] name = "semver" version = "1.0.27" @@ -10353,6 +12616,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + [[package]] name = "send_wrapper" version = "0.4.0" @@ -10384,6 +12656,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde-error" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "342110fb7a5d801060c885da03bf91bfa7c7ca936deafcc64bb6706375605d47" +dependencies = [ + "serde", +] + [[package]] name = "serde_bytes" version = "0.11.19" @@ -10421,7 +12702,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -10450,15 +12731,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -10480,7 +12761,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -10494,9 +12775,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" dependencies = [ "serde_core", ] @@ -10540,7 +12821,7 @@ checksum = "ec3a1e7d2eadec84deabd46ae061bf480a91a6bce74d25dad375bd656f2e19d8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -10573,17 +12854,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.15.1" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.0", + "indexmap 2.13.0", "schemars 0.9.0", - "schemars 1.0.5", + "schemars 1.2.0", "serde_core", "serde_json", "time", @@ -10598,7 +12879,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -10607,20 +12888,31 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.13.0", "itoa", "ryu", "serde", "unsafe-libyaml", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct 0.2.0", + "serde", +] + [[package]] name = "serial_test" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" dependencies = [ - "futures", + "futures-executor", + "futures-util", "log", "once_cell", "parking_lot", @@ -10630,13 +12922,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.2.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -10650,6 +12942,12 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + [[package]] name = "sha2" version = "0.9.9" @@ -10686,9 +12984,9 @@ dependencies = [ [[package]] name = "sha2raw" -version = "14.0.0" +version = "14.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90007d4997c15161e16bda035b950af95dd6ddd597c13ec676bc4aef519b466f" +checksum = "63724e248a02820435baefe81a582d39957b9f8f212c11a3c88f50c11bfdd112" dependencies = [ "byteorder", "cpufeatures", @@ -10708,6 +13006,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -10725,10 +13033,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.6" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -10767,9 +13076,24 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "simple-dns" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "dee851d0e5e7af3721faea1843e8015e820a234f81fda3dea9247e15bac9a86a" +dependencies = [ + "bitflags 2.10.0", +] [[package]] name = "simple_asn1" @@ -10824,13 +13148,34 @@ dependencies = [ "async-executor", "async-fs", "async-io 2.6.0", - "async-lock 3.4.1", + "async-lock 3.4.2", "async-net", "async-process", "blocking", "futures-lite 2.6.1", ] +[[package]] +name = "snafu" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e84b3f4eacbf3a1ce05eac6763b4d629d60cbc94d632e4092c54ade71f1e1a2" +dependencies = [ + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1c97747dbf44bb1ca44a561ece23508e99cb592e862f22222dcf42f51d1e451" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "snap" version = "1.1.1" @@ -10849,7 +13194,7 @@ dependencies = [ "curve25519-dalek", "rand_core 0.6.4", "ring 0.17.14", - "rustc_version", + "rustc_version 0.4.1", "sha2 0.10.9", "subtle", ] @@ -10928,6 +13273,12 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.6.0" @@ -10949,10 +13300,52 @@ dependencies = [ ] [[package]] -name = "sptr" -version = "0.3.2" +name = "sptr" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" + +[[package]] +name = "ssh-cipher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caac132742f0d33c3af65bfcde7f6aa8f62f0e991d80db99149eb9d44708784f" +dependencies = [ + "cipher", + "ssh-encoding", +] + +[[package]] +name = "ssh-encoding" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9242b9ef4108a78e8cd1a2c98e193ef372437f8c22be363075233321dd4a15" +dependencies = [ + "base64ct", + "pem-rfc7468", + "sha2 0.10.9", +] + +[[package]] +name = "ssh-key" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" +checksum = "3b86f5297f0f04d08cabaa0f6bff7cb6aec4d9c3b49d87990d63da9d9156a8c3" +dependencies = [ + "ed25519-dalek", + "p256", + "p384", + "p521", + "rand_core 0.6.4", + "rsa", + "sec1 0.7.3", + "sha2 0.10.9", + "signature 2.2.0", + "ssh-cipher", + "ssh-encoding", + "subtle", + "zeroize", +] [[package]] name = "stable_deref_trait" @@ -10980,15 +13373,15 @@ dependencies = [ [[package]] name = "storage-proofs-core" -version = "19.0.0" +version = "19.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a6f583102c3faa65e7aad3d1cf3ffe5e9c9699e7265141a020c2d45c937d66" +checksum = "5c337424d4965c376506bc2f8f36c26ecf71089c97b49e445755f3b0930914b1" dependencies = [ "aes", "anyhow", "bellperson", "blake2b_simd", - "blstrs", + "blstrs 0.7.1", "byteorder", "cbc", "config 0.14.1", @@ -10996,7 +13389,7 @@ dependencies = [ "filecoin-hashers", "fr32", "fs2", - "generic-array 0.14.9", + "generic-array 0.14.7", "itertools 0.13.0", "lazy_static", "log", @@ -11006,7 +13399,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "semver", + "semver 1.0.27", "serde", "serde_json", "sha2 0.10.9", @@ -11015,15 +13408,15 @@ dependencies = [ [[package]] name = "storage-proofs-porep" -version = "19.0.0" +version = "19.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a21ea69adc933398389c36be5b35865dc3fac8b064bb95c7104dc7bc8426e0" +checksum = "4206bb30605aa30099d823196bf700db3e082722205d71b2813ba0f175d61966" dependencies = [ "anyhow", "bellperson", "bincode", "blake2b_simd", - "blstrs", + "blstrs 0.7.1", "byte-slice-cast", "byteorder", "chacha20", @@ -11032,7 +13425,7 @@ dependencies = [ "ff 0.13.1", "filecoin-hashers", "fr32", - "generic-array 0.14.9", + "generic-array 0.14.7", "glob", "hex", "lazy_static", @@ -11057,17 +13450,17 @@ dependencies = [ [[package]] name = "storage-proofs-post" -version = "19.0.0" +version = "19.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b040787160b2381f1f86ac08f8789283da753e97df25e6be4ea3cc8615d5497c" +checksum = "2ea448de59cf58559ca25980f0405a187436b6250b4d279d496a8c0a079d8b50" dependencies = [ "anyhow", "bellperson", - "blstrs", + "blstrs 0.7.1", "byteorder", "ff 0.13.1", "filecoin-hashers", - "generic-array 0.14.9", + "generic-array 0.14.7", "log", "rayon", "serde", @@ -11077,17 +13470,17 @@ dependencies = [ [[package]] name = "storage-proofs-update" -version = "19.0.0" +version = "19.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1118e3f9dff7c93a68d06a17ae89bf051321278be810e4c3c24a1a88bbc0c3e7" +checksum = "28d7c2ac1de6dd9925edf1306637c194f184cc2cab72422542cb6ea74bc4c9fd" dependencies = [ "anyhow", "bellperson", - "blstrs", + "blstrs 0.7.1", "ff 0.13.1", "filecoin-hashers", "fr32", - "generic-array 0.14.9", + "generic-array 0.14.7", "lazy_static", "log", "memmap2", @@ -11154,7 +13547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", - "proc-macro-error", + "proc-macro-error 1.0.4", "proc-macro2", "quote", "syn 1.0.109", @@ -11188,7 +13581,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -11200,7 +13593,31 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", +] + +[[package]] +name = "stun-rs" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb921f10397d5669e1af6455e9e2d367bf1f9cebcd6b1dd1dc50e19f6a9ac2ac" +dependencies = [ + "base64 0.22.1", + "bounded-integer", + "byteorder", + "crc", + "enumflags2", + "fallible-iterator", + "hmac-sha1", + "hmac-sha256", + "hostname-validator", + "lazy_static", + "md5", + "paste", + "precis-core", + "precis-profiles", + "quoted-string-parser", + "rand 0.9.2", ] [[package]] @@ -11237,6 +13654,22 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" +[[package]] +name = "surge-ping" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30498e9c9feba213c3df6ed675bdf75519ccbee493517e7225305898c86cac05" +dependencies = [ + "hex", + "parking_lot", + "pnet_packet", + "rand 0.9.2", + "socket2 0.6.1", + "thiserror 1.0.69", + "tokio", + "tracing", +] + [[package]] name = "svm-rs" version = "0.3.5" @@ -11248,7 +13681,7 @@ dependencies = [ "hex", "once_cell", "reqwest 0.11.27", - "semver", + "semver 1.0.27", "serde", "serde_json", "sha2 0.10.9", @@ -11270,15 +13703,38 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.108" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn-mid" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea305d57546cc8cd04feb14b62ec84bf17f50e3f7b12560d7bfa9265f39d9ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "syn-solidity" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab4e6eed052a117409a1a744c8bda9c3ea6934597cf7419f791cb7d590871c4c" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -11314,7 +13770,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -11359,6 +13815,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -11378,9 +13840,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df7f62577c25e07834649fc3b39fafdc597c0a3527dc1c60129201ccfcbaa50c" +checksum = "b1dd07eb858a2067e2f3c7155d54e929265c264e6f37efe3ee7a8d1b5a1dd0ba" [[package]] name = "target-triple" @@ -11390,14 +13852,14 @@ checksum = "591ef38edfb78ca4771ee32cf494cb8771944bee237a9b91fc9c1424ac4b777b" [[package]] name = "tempfile" -version = "3.23.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand 2.3.0", "getrandom 0.3.4", "once_cell", - "rustix 1.1.2", + "rustix 1.1.3", "windows-sys 0.61.2", ] @@ -11536,14 +13998,14 @@ dependencies = [ "bytes", "flex-error", "futures", - "getrandom 0.2.16", + "getrandom 0.2.17", "http 0.2.12", "hyper 0.14.32", "hyper-proxy", "hyper-rustls 0.22.1", "peg", "pin-project", - "semver", + "semver 1.0.27", "serde", "serde_bytes", "serde_json", @@ -11621,7 +14083,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -11632,7 +14094,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -11655,30 +14117,31 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" dependencies = [ "deranged", "itoa", + "js-sys", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" dependencies = [ "num-conv", "time-core", @@ -11733,9 +14196,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", @@ -11756,7 +14219,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -11807,19 +14270,32 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.34", + "rustls 0.23.36", "tokio", ] +[[package]] +name = "tokio-serde" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf600e7036b17782571dd44fa0a5cea3c82f60db5137f774a325a76a0d6852b" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project", +] + [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util 0.7.18", ] [[package]] @@ -11879,16 +14355,40 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", + "futures-util", "pin-project-lite", + "slab", + "tokio", +] + +[[package]] +name = "tokio-websockets" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fcaf159b4e7a376b05b5bfd77bfd38f3324f5fce751b4213bfc7eaa47affb4e" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-core", + "futures-sink", + "getrandom 0.3.4", + "http 1.4.0", + "httparse", + "rand 0.9.2", + "ring 0.17.14", + "rustls-pki-types", + "simdutf8", "tokio", + "tokio-rustls 0.26.4", + "tokio-util 0.7.18", ] [[package]] @@ -11926,17 +14426,17 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.8" +version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.13.0", "serde_core", - "serde_spanned 1.0.3", - "toml_datetime 0.7.3", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", - "winnow 0.7.13", + "winnow 0.7.14", ] [[package]] @@ -11950,9 +14450,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] @@ -11963,7 +14463,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.13.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -11976,33 +14476,33 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.13.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow 0.7.13", + "winnow 0.7.14", ] [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.0", - "toml_datetime 0.7.3", + "indexmap 2.13.0", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", - "winnow 0.7.13", + "winnow 0.7.14", ] [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ - "winnow 0.7.13", + "winnow 0.7.14", ] [[package]] @@ -12013,9 +14513,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tower" @@ -12032,7 +14532,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tower-layer", "tower-service", "tracing", @@ -12040,9 +14540,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", @@ -12092,18 +14592,18 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "bitflags 2.10.0", "bytes", "futures-util", - "http 1.3.1", + "http 1.4.0", "http-body 1.0.1", "iri-string", "pin-project-lite", - "tower 0.5.2", + "tower 0.5.3", "tower-layer", "tower-service", ] @@ -12122,9 +14622,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -12134,32 +14634,32 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" dependencies = [ "crossbeam-channel", - "thiserror 1.0.69", + "thiserror 2.0.17", "time", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -12182,7 +14682,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" dependencies = [ "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -12229,9 +14729,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -12248,6 +14748,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber 0.3.22", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote", + "syn 2.0.114", +] + [[package]] name = "trait-set" version = "0.3.0" @@ -12267,9 +14788,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559b6a626c0815c942ac98d434746138b4f89ddd6a1b8cbb168c6845fb3376c5" +checksum = "3e17e807bff86d2a06b52bca4276746584a78375055b6e45843925ce2802b335" dependencies = [ "glob", "serde", @@ -12277,7 +14798,7 @@ dependencies = [ "serde_json", "target-triple", "termcolor", - "toml 0.9.8", + "toml 0.9.11+spec-1.1.0", ] [[package]] @@ -12331,7 +14852,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.3.1", + "http 1.4.0", "httparse", "log", "rand 0.8.5", @@ -12347,6 +14868,15 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" +[[package]] +name = "ucd-parse" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06ff81122fcbf4df4c1660b15f7e3336058e7aec14437c9f85c6b31a0f279b9" +dependencies = [ + "regex-lite", +] + [[package]] name = "ucd-trie" version = "0.1.7" @@ -12385,9 +14915,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -12395,6 +14925,15 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +[[package]] +name = "unicode-normalization" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-segmentation" version = "1.12.0" @@ -12419,7 +14958,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "crypto-common 0.1.6", + "crypto-common 0.1.7", "subtle", ] @@ -12461,14 +15000,15 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -12501,15 +15041,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "serde", ] [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -12524,9 +15064,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" +checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0" [[package]] name = "vcpkg" @@ -12571,6 +15111,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "waker-fn" version = "1.2.0" @@ -12620,7 +15169,7 @@ dependencies = [ "serde_urlencoded", "tokio", "tokio-tungstenite 0.21.0", - "tokio-util 0.7.17", + "tokio-util 0.7.18", "tower-service", "tracing", ] @@ -12642,9 +15191,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -12655,9 +15204,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.55" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -12668,9 +15217,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12678,22 +15227,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -12747,8 +15296,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe55c8f9d0dbd25d9447a5a889ff90c0cc3feaa7395310d3d826b2c703eaab" dependencies = [ "bitflags 2.10.0", - "indexmap 2.12.0", - "semver", + "indexmap 2.13.0", + "semver 1.0.27", ] [[package]] @@ -12759,8 +15308,8 @@ checksum = "bc28600dcb2ba68d7e5f1c3ba4195c2bddc918c0243fd702d0b6dbd05689b681" dependencies = [ "bitflags 2.10.0", "hashbrown 0.15.5", - "indexmap 2.12.0", - "semver", + "indexmap 2.13.0", + "semver 1.0.27", "serde", ] @@ -12798,7 +15347,7 @@ dependencies = [ "cc", "cfg-if", "hashbrown 0.15.5", - "indexmap 2.12.0", + "indexmap 2.13.0", "libc", "log", "mach2", @@ -12873,7 +15422,7 @@ dependencies = [ "cranelift-bitset", "cranelift-entity", "gimli 0.31.1", - "indexmap 2.12.0", + "indexmap 2.13.0", "log", "object 0.36.7", "postcard", @@ -12936,14 +15485,14 @@ checksum = "5732a5c86efce7bca121a61d8c07875f6b85c1607aa86753b40f7f8bd9d3a780" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -12985,14 +15534,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.4", + "webpki-root-certs 1.0.5", ] [[package]] name = "webpki-root-certs" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" +checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" dependencies = [ "rustls-pki-types", ] @@ -13014,9 +15563,18 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "1.0.4" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.5", +] + +[[package]] +name = "webpki-roots" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" dependencies = [ "rustls-pki-types", ] @@ -13041,7 +15599,7 @@ checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762" dependencies = [ "either", "env_home", - "rustix 1.1.2", + "rustix 1.1.3", "winsafe", ] @@ -13102,6 +15660,37 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f919aee0a93304be7f62e8e5027811bbba96bcb1de84d6618be56e43f8a32a1" +dependencies = [ + "windows-core 0.59.0", + "windows-targets 0.53.5", +] + +[[package]] +name = "windows" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" +dependencies = [ + "windows-collections", + "windows-core 0.62.2", + "windows-future", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610" +dependencies = [ + "windows-core 0.62.2", +] + [[package]] name = "windows-core" version = "0.53.0" @@ -13112,19 +15701,54 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "810ce18ed2112484b0d4e15d022e5f598113e220c53e373fb31e67e21670c1ce" +dependencies = [ + "windows-implement 0.59.0", + "windows-interface", + "windows-result 0.3.4", + "windows-strings 0.3.1", + "windows-targets 0.53.5", +] + [[package]] name = "windows-core" version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ - "windows-implement", + "windows-implement 0.60.2", "windows-interface", "windows-link 0.2.1", "windows-result 0.4.1", "windows-strings 0.5.1", ] +[[package]] +name = "windows-future" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "windows-implement" version = "0.60.2" @@ -13133,7 +15757,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -13144,7 +15768,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -13159,15 +15783,25 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-numerics" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" +dependencies = [ + "windows-core 0.62.2", + "windows-link 0.2.1", +] + [[package]] name = "windows-registry" -version = "0.5.3" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", ] [[package]] @@ -13199,9 +15833,9 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.4.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" dependencies = [ "windows-link 0.1.3", ] @@ -13332,6 +15966,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows-threading" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" +dependencies = [ + "windows-link 0.2.1", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -13523,9 +16166,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] @@ -13552,6 +16195,21 @@ version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +[[package]] +name = "wmi" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7787dacdd8e71cbc104658aade4009300777f9b5fda6a75f19145fedb8a18e71" +dependencies = [ + "chrono", + "futures", + "log", + "serde", + "thiserror 2.0.17", + "windows 0.59.0", + "windows-core 0.59.0", +] + [[package]] name = "writeable" version = "0.6.2" @@ -13569,7 +16227,7 @@ dependencies = [ "js-sys", "log", "pharos", - "rustc_version", + "rustc_version 0.4.1", "send_wrapper 0.6.0", "thiserror 2.0.17", "wasm-bindgen", @@ -13622,7 +16280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" dependencies = [ "libc", - "rustix 1.1.2", + "rustix 1.1.3", ] [[package]] @@ -13719,7 +16377,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ca6c5a4d66c1a9ea261811cf4773c27343de7e5033e1b75ea3f297dc7db3c1a" dependencies = [ - "flume", + "flume 0.10.14", "scopeguard", ] @@ -13742,28 +16400,34 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", "synstructure 0.13.2", ] +[[package]] +name = "z32" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2164e798d9e3d84ee2c91139ace54638059a3b23e361f5c11781c2c6459bde0f" + [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -13783,7 +16447,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", "synstructure 0.13.2", ] @@ -13798,13 +16462,13 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -13837,7 +16501,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.114", ] [[package]] @@ -13860,6 +16524,12 @@ dependencies = [ "zstd", ] +[[package]] +name = "zmij" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" + [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" diff --git a/Cargo.toml b/Cargo.toml index c5156b3619..ff29c5279d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,9 +43,36 @@ members = [ "fendermint/actors/chainmetadata", "fendermint/actors/activity-tracker", "fendermint/actors/eam", + "fendermint/actors/init", "fendermint/actors/f3-light-client", "fendermint/actors/gas_market/eip1559", + # ipc decentralized storage + + # rpc serves + "ipc-storage/ipc-decentralized-storage", + + # actors + "fendermint/actors/adm_types", # fil_actor_adm - ADM types + "fendermint/actors/adm", # ADM actor + "fendermint/actors/machine", # Machine base trait + "fendermint/actors/blobs", + "fendermint/actors/blobs/shared", + "fendermint/actors/blobs/testing", + "fendermint/actors/blob_reader", + "fendermint/actors/bucket", # S3-like object storage + "fendermint/actors/timehub", # Timestamping service + "fendermint/actors/ipc_storage_config", + "fendermint/actors/ipc_storage_config/shared", + + # storage components (netwatch patched for socket2 0.5 compatibility!) + "ipc-storage/iroh_manager", + "ipc-storage/ipld", + "ipc-storage/actor_sdk", + + # sol contracts facade + "ipc-storage/sol-facade/crates/facade", + "build-rs-utils", "contracts-artifacts", ] @@ -71,6 +98,7 @@ axum = { version = "0.6", features = ["ws"] } base64 = "0.21" bollard = "0.15" blake2b_simd = "1.0" +blake3 = "1.5" bloom = "0.3" bytes = "1.4" clap = { version = "4.1", features = ["derive", "env", "string"] } @@ -78,6 +106,7 @@ color-eyre = "0.5.11" byteorder = "1.5.0" config = "0.13" const-hex = "1.14.0" +data-encoding = "2.3.3" dirs = "5.0" dircpy = "0.3.19" either = "1.10" @@ -97,6 +126,15 @@ hex-literal = "0.4.1" http = "0.2.12" im = "15.1.0" integer-encoding = { version = "3.0.3", default-features = false } +# Iroh dependencies +ambassador = "0.3.5" +iroh = "0.35" +iroh-base = "0.35" +iroh-blobs = { version = "0.35", features = ["rpc"] } +iroh-relay = "0.35" +iroh-quinn = { version = "0.13" } +n0-future = "0.1.2" +quic-rpc = { version = "0.20", features = ["quinn-transport"] } jsonrpc-v2 = { version = "0.11", default-features = false, features = [ "bytes-v10", ] } @@ -148,8 +186,19 @@ quickcheck_macros = "1" rand = "0.8" rand_chacha = "0.3" regex = "1" +replace_with = "0.1.7" statrs = "0.18.0" reqwest = { version = "0.11.13", features = ["json"] } +# entanglement library +entangler = { package = "recall_entangler", git = "https://github.com/recallnet/entanglement.git", rev = "aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" } +entangler_storage = { package = "recall_entangler_storage", git = "https://github.com/recallnet/entanglement.git", rev = "aee1c675ff05e5cde4771a2e2eb3ac4dab8476bc" } +# Objects HTTP API dependencies +warp = "0.3" +uuid = { version = "1.0", features = ["v4"] } +mime_guess = "2.0" +urlencoding = "2.1" +# Solidity facades (vendored locally, upgraded to FVM 4.7) +ipc_storage_sol_facade = { path = "ipc-storage/sol-facade/crates/facade" } sha2 = "0.10" serde = { version = "1.0.217", features = ["derive"] } serde_bytes = "0.11" @@ -226,6 +275,7 @@ fvm_ipld_amt = "0.7.4" # NOTE: Using master branch instead of v17.0.0 tag due to serde dependency fixes # Master is currently at commit 2f040c12 which fixes the serde::__private::PhantomData import issue fil_actors_evm_shared = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } +fil_actor_adm = { path = "fendermint/actors/adm_types" } fil_actor_eam = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } fil_actor_evm = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } fil_actors_runtime = { git = "https://github.com/filecoin-project/builtin-actors", branch = "master" } @@ -234,6 +284,7 @@ cid = { version = "0.11", default-features = false, features = [ "serde-codec", "std", ] } +multihash-codetable = "0.1" frc42_dispatch = { path = "./ext/frc42_dispatch" } @@ -252,6 +303,11 @@ tendermint-proto = { version = "0.31" } [patch.crates-io] # Using latest FVM to match builtin-actors v17.0.0 requirements fvm = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } + +# Fix netwatch socket2 0.5 compatibility (macOS BSD sockets) +# Patched version with socket2 0.5+ API fixes +netwatch = { path = "patches/netwatch" } + fvm_shared = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } fvm_sdk = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } fvm_ipld_blockstore = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } @@ -261,6 +317,9 @@ fvm_ipld_hamt = { git = "https://github.com/consensus-shipyard/ref-fvm.git", bra fvm_ipld_amt = { git = "https://github.com/consensus-shipyard/ref-fvm.git", branch = "master" } yamux = { git = "https://github.com/paritytech/yamux", tag = "yamux-v0.13.4" } +# Pin digest to 0.11.0-rc.2 for compatibility with blake2 from huitseeker/hashes (used by rust-f3) +digest = { git = "https://github.com/RustCrypto/traits", tag = "digest-v0.11.0-rc.2" } + [profile.wasm] inherits = "release" panic = "abort" diff --git a/fendermint/actors/Cargo.toml b/fendermint/actors/Cargo.toml index 153d52e9c3..b89df36358 100644 --- a/fendermint/actors/Cargo.toml +++ b/fendermint/actors/Cargo.toml @@ -17,3 +17,12 @@ fendermint_actor_chainmetadata = { path = "chainmetadata", features = ["fil-acto fendermint_actor_f3_light_client = { path = "f3-light-client", features = ["fil-actor"] } fendermint_actor_gas_market_eip1559 = { path = "gas_market/eip1559", features = ["fil-actor"] } fendermint_actor_eam = { path = "eam", features = ["fil-actor"] } +fendermint_actor_init = { path = "init", features = ["fil-actor"] } +# IPC Storage actors +fendermint_actor_adm = { path = "adm", features = ["fil-actor"] } +fendermint_actor_blobs = { path = "blobs", features = ["fil-actor"] } +fendermint_actor_blob_reader = { path = "blob_reader", features = ["fil-actor"] } +fendermint_actor_bucket = { path = "bucket", features = ["fil-actor"] } +fendermint_actor_machine = { path = "machine", features = ["fil-actor"] } +fendermint_actor_ipc_storage_config = { path = "ipc_storage_config", features = ["fil-actor"] } +fendermint_actor_timehub = { path = "timehub", features = ["fil-actor"] } diff --git a/fendermint/actors/adm/Cargo.toml b/fendermint/actors/adm/Cargo.toml new file mode 100644 index 0000000000..851eea7c96 --- /dev/null +++ b/fendermint/actors/adm/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "fendermint_actor_adm" +description = "ADM (Autonomous Data Management) actor for machine lifecycle management" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true, default-features = false } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +hex-literal = { workspace = true } +integer-encoding = { workspace = true } +log = { workspace = true } +multihash = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["machine"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_machine = { path = "../machine" } +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] + diff --git a/fendermint/actors/adm/src/ext.rs b/fendermint/actors/adm/src/ext.rs new file mode 100644 index 0000000000..03418ab8bf --- /dev/null +++ b/fendermint/actors/adm/src/ext.rs @@ -0,0 +1,56 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; + +pub mod init { + use super::*; + use cid::Cid; + use fvm_ipld_encoding::RawBytes; + use fvm_shared::address::Address; + + pub const EXEC_METHOD: u64 = 2; + + /// Init actor Exec Params. + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct ExecParams { + pub code_cid: Cid, + pub constructor_params: RawBytes, + } + + /// Init actor Exec Return value. + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct ExecReturn { + /// ID based address for created actor. + pub id_address: Address, + /// Reorg safe address for actor. + pub robust_address: Address, + } +} + +pub mod account { + pub const PUBKEY_ADDRESS_METHOD: u64 = 2; +} + +pub mod machine { + use super::*; + use fvm_shared::address::Address; + use std::collections::HashMap; + + pub const INIT_METHOD: u64 = 2; + + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct ConstructorParams { + /// The machine owner ID address. + pub owner: Address, + /// User-defined metadata. + pub metadata: HashMap, + } + + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct InitParams { + /// The machine ID address. + pub address: Address, + } +} diff --git a/fendermint/actors/adm/src/lib.rs b/fendermint/actors/adm/src/lib.rs new file mode 100644 index 0000000000..6c8981d03c --- /dev/null +++ b/fendermint/actors/adm/src/lib.rs @@ -0,0 +1,303 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; +use std::iter; + +use cid::Cid; +use ext::init::{ExecParams, ExecReturn}; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, deserialize_block, extract_send_result, + runtime::{builtins::Type, ActorCode, Runtime}, + ActorDowncast, ActorError, INIT_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::{ipld_block::IpldBlock, tuple::*, RawBytes}; +use fvm_shared::{address::Address, error::ExitCode, ActorID, METHOD_CONSTRUCTOR}; +use ipc_storage_sol_facade::machine::Calls; +use num_derive::FromPrimitive; + +// ADM actor ID as defined in fendermint/vm/actor_interface/src/adm.rs +pub const ADM_ACTOR_ID: ActorID = 17; + +use crate::sol_facade as sol; +use crate::sol_facade::{AbiCall, AbiCallRuntime, InputData}; +use crate::state::PermissionMode; +pub use crate::state::{Kind, Metadata, PermissionModeParams, State}; + +pub mod ext; +mod sol_facade; +mod state; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(AdmActor); + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + // Exported calls (computed via `frc42_dispatch::method_hash!` & hardcoded to avoid dependency issues) + CreateExternal = 1214262202, + UpdateDeployers = 1768606754, + ListMetadata = 2283215593, + GetMachineCode = 2892692559, //= frc42_dispatch::method_hash!("GetMachineCode"); + InvokeContract = 3844450837, //= frc42_dispatch::method_hash!("InvokeEVM") +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub machine_codes: HashMap, + pub permission_mode: PermissionModeParams, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CreateExternalParams { + pub owner: Address, + pub kind: Kind, + pub metadata: HashMap, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, PartialEq, Eq)] +pub struct CreateExternalReturn { + pub actor_id: ActorID, + pub robust_address: Option
, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListMetadataParams { + pub owner: Address, +} + +fn create_machine( + rt: &impl Runtime, + owner: Address, + code_cid: Cid, + metadata: HashMap, +) -> Result { + let constructor_params = + RawBytes::serialize(ext::machine::ConstructorParams { owner, metadata })?; + let ret: ExecReturn = deserialize_block(extract_send_result(rt.send_simple( + &INIT_ACTOR_ADDR, + ext::init::EXEC_METHOD, + IpldBlock::serialize_cbor(&ExecParams { + code_cid, + constructor_params, + })?, + rt.message().value_received(), + ))?)?; + + // Initialize the machine with its address + let actor_id = ret.id_address.id().unwrap(); + let address = Address::new_id(actor_id); + extract_send_result(rt.send_simple( + &ret.id_address, + ext::machine::INIT_METHOD, + IpldBlock::serialize_cbor(&ext::machine::InitParams { address })?, + rt.message().value_received(), + ))?; + + Ok(CreateExternalReturn { + actor_id, + robust_address: Some(ret.robust_address), + }) +} + +fn ensure_deployer_allowed(rt: &impl Runtime) -> Result<(), ActorError> { + // The caller is guaranteed to be an ID address. + let caller_id = rt.message().caller().id().unwrap(); + + // Check if the caller is a contract. If it is, and we're in permissioned mode, + // then the contract was either there in genesis or has been deployed by a whitelisted + // account; in both cases it's been known up front whether it creates other contracts, + // and if that was undesireable it would not have been deployed as it is. + let code_cid = rt.get_actor_code_cid(&caller_id).expect("caller has code"); + if rt.resolve_builtin_actor_type(&code_cid) == Some(Type::EVM) { + return Ok(()); + } + + // Check if the caller is whitelisted. + let state: State = rt.state()?; + if !state.can_deploy(rt, caller_id)? { + return Err(ActorError::forbidden(String::from( + "sender not allowed to deploy contracts", + ))); + } + + Ok(()) +} + +pub struct AdmActor; + +impl AdmActor { + pub fn constructor(rt: &impl Runtime, args: ConstructorParams) -> Result<(), ActorError> { + let actor_id = rt.resolve_address(&rt.message().receiver()).unwrap(); + if actor_id != ADM_ACTOR_ID { + return Err(ActorError::forbidden(format!( + "The ADM must be deployed at {ADM_ACTOR_ID}, was deployed at {actor_id}" + ))); + } + rt.validate_immediate_caller_is(iter::once(&SYSTEM_ACTOR_ADDR))?; + + let st = State::new(rt.store(), args.machine_codes, args.permission_mode)?; + rt.create(&st) + } + + fn update_deployers(rt: &impl Runtime, deployers: Vec
) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + // Reject update if we're unrestricted. + let state: State = rt.state()?; + if !matches!(state.permission_mode, PermissionMode::AllowList(_)) { + return Err(ActorError::forbidden(String::from( + "deployers can only be updated in allowlist mode", + ))); + }; + + // Check that the caller is in the allowlist. + let caller_id = rt.message().caller().id().unwrap(); + if !state.can_deploy(rt, caller_id)? { + return Err(ActorError::forbidden(String::from( + "sender not allowed to update deployers", + ))); + } + + // Perform the update. + rt.transaction(|st: &mut State, rt| { + st.set_deployers(rt.store(), deployers).map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "failed to set deployers") + }) + })?; + + Ok(()) + } + + /// Create a new machine from off-chain. + pub fn create_external( + rt: &impl Runtime, + params: CreateExternalParams, + ) -> Result { + ensure_deployer_allowed(rt)?; + rt.validate_immediate_caller_accept_any()?; + + let owner_id = rt + .resolve_address(¶ms.owner) + .ok_or(ActorError::illegal_argument(format!( + "failed to resolve actor for address {}", + params.owner + )))?; + let owner = Address::new_id(owner_id); + let machine_code = Self::retrieve_machine_code(rt, params.kind)?; + let ret = create_machine(rt, owner, machine_code, params.metadata.clone())?; + let address = Address::new_id(ret.actor_id); + + // Save machine metadata. + rt.transaction(|st: &mut State, rt| { + st.set_metadata(rt.store(), owner, address, params.kind, params.metadata) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_ARGUMENT, + "failed to set machine metadata", + ) + }) + })?; + + Ok(ret) + } + + /// Returns a list of machine metadata by owner. + /// + /// Metadata includes machine kind and address. + pub fn list_metadata( + rt: &impl Runtime, + params: ListMetadataParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let owner_id = rt + .resolve_address(¶ms.owner) + .ok_or(ActorError::illegal_argument(format!( + "failed to resolve actor for address {}", + params.owner + )))?; + let owner_address = Address::new_id(owner_id); + + let st: State = rt.state()?; + let metadata = st.get_metadata(rt.store(), owner_address).map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "failed to get metadata") + })?; + Ok(metadata) + } + + fn invoke_contract( + rt: &impl Runtime, + params: sol::InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol::can_handle(&input_data) { + let output_data = match sol::parse_input(&input_data)? { + Calls::createBucket_0(call) => { + // function createBucket() external; + let params = call.params(rt); + let create_external_return = Self::create_external(rt, params)?; + call.returns(create_external_return) + } + Calls::createBucket_1(call) => { + // function createBucket(address owner, KeyValue[] memory metadata) external; + let params = call.params(); + let create_external_return = Self::create_external(rt, params)?; + call.returns(create_external_return) + } + Calls::createBucket_2(call) => { + // function createBucket(address owner) external; + let params = call.params(); + let create_external_return = Self::create_external(rt, params)?; + call.returns(create_external_return) + } + Calls::listBuckets_0(call) => { + let params = call.params(rt); + let list = Self::list_metadata(rt, params)?; + call.returns(list) + } + Calls::listBuckets_1(call) => { + let params = call.params(); + let list = Self::list_metadata(rt, params)?; + call.returns(list) + } + }; + Ok(sol::InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } + + pub fn get_machine_code(rt: &impl Runtime, kind: Kind) -> Result { + rt.validate_immediate_caller_accept_any()?; + Self::retrieve_machine_code(rt, kind) + } + + fn retrieve_machine_code(rt: &impl Runtime, kind: Kind) -> Result { + rt.state::()? + .get_machine_code(rt.store(), &kind)? + .ok_or(ActorError::not_found(format!( + "machine code for kind '{}' not found", + kind + ))) + } +} + +impl ActorCode for AdmActor { + type Methods = Method; + + fn name() -> &'static str { + "ADMAddressManager" + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + CreateExternal => create_external, + UpdateDeployers => update_deployers, + ListMetadata => list_metadata, + GetMachineCode => get_machine_code, + InvokeContract => invoke_contract, + } +} diff --git a/fendermint/actors/adm/src/sol_facade.rs b/fendermint/actors/adm/src/sol_facade.rs new file mode 100644 index 0000000000..0944268247 --- /dev/null +++ b/fendermint/actors/adm/src/sol_facade.rs @@ -0,0 +1,257 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use fil_actors_runtime::runtime::Runtime; +use fil_actors_runtime::{actor_error, ActorError}; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared::address::Address; +use ipc_storage_sol_facade::machine as sol; +use ipc_storage_sol_facade::machine::{listBuckets_0Call, listBuckets_1Call, Calls}; +use ipc_storage_sol_facade::types::{Address as SolAddress, SolCall, SolInterface, H160}; +use std::collections::HashMap; + +use crate::{CreateExternalParams, CreateExternalReturn, Kind, ListMetadataParams, Metadata}; + +pub fn can_handle(input_data: &InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +impl AbiCallRuntime for sol::createBucket_0Call { + type Params = CreateExternalParams; + type Returns = CreateExternalReturn; + type Output = Vec; + + fn params(&self, rt: &impl Runtime) -> Self::Params { + CreateExternalParams { + owner: rt.message().caller(), + kind: Kind::Bucket, + metadata: HashMap::default(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let address = returns + .robust_address + .map(|address| H160::try_from(address).unwrap_or_default()) + .unwrap_or_default(); + let address: SolAddress = address.into(); + Self::abi_encode_returns(&(address,)) + } +} + +impl AbiCall for sol::createBucket_1Call { + type Params = CreateExternalParams; + type Returns = CreateExternalReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let owner: Address = H160::from(self.owner).into(); + let mut metadata = HashMap::with_capacity(self.metadata.len()); + for kv in self.metadata.clone() { + metadata.insert(kv.key, kv.value); + } + CreateExternalParams { + owner, + kind: Kind::Bucket, + metadata, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let address = returns + .robust_address + .map(|address| H160::try_from(address).unwrap_or_default()) + .unwrap_or_default(); + let address: SolAddress = address.into(); + Self::abi_encode_returns(&(address,)) + } +} + +impl AbiCall for sol::createBucket_2Call { + type Params = CreateExternalParams; + type Returns = CreateExternalReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let owner: Address = H160::from(self.owner).into(); + CreateExternalParams { + owner, + kind: Kind::Bucket, + metadata: HashMap::default(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let address = returns + .robust_address + .map(|address| H160::try_from(address).unwrap_or_default()) + .unwrap_or_default(); + let address: SolAddress = address.into(); + Self::abi_encode_returns(&(address,)) + } +} + +impl AbiCallRuntime for listBuckets_0Call { + type Params = ListMetadataParams; + type Returns = Vec; + type Output = Vec; + + fn params(&self, rt: &impl Runtime) -> Self::Params { + ListMetadataParams { + owner: rt.message().caller(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let machines: Vec = returns + .iter() + .map(|m| sol::Machine { + kind: sol_kind(m.kind), + addr: H160::try_from(m.address).unwrap_or_default().into(), + metadata: m + .metadata + .iter() + .map(|(k, v)| sol::KeyValue { + key: k.clone(), + value: v.clone(), + }) + .collect(), + }) + .collect(); + Self::abi_encode_returns(&(machines,)) + } +} + +impl AbiCall for listBuckets_1Call { + type Params = ListMetadataParams; + type Returns = Vec; + type Output = Vec; + + fn params(&self) -> Self::Params { + ListMetadataParams { + owner: H160::from(self.owner).into(), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let machines: Vec = returns + .iter() + .map(|m| sol::Machine { + kind: sol_kind(m.kind), + addr: H160::try_from(m.address).unwrap_or_default().into(), + metadata: m + .metadata + .iter() + .map(|(k, v)| sol::KeyValue { + key: k.clone(), + value: v.clone(), + }) + .collect(), + }) + .collect(); + Self::abi_encode_returns(&(machines,)) + } +} + +fn sol_kind(kind: Kind) -> u8 { + match kind { + Kind::Bucket => 0, + Kind::Timehub => 1, + } +} + +// --- Copied from ipc_storage_actor_sdk --- // + +#[derive(Default, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractParams { + #[serde(with = "strict_bytes")] + pub input_data: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractReturn { + #[serde(with = "strict_bytes")] + pub output_data: Vec, +} + +/// EVM call with selector (first 4 bytes) and calldata (remaining bytes) +pub struct InputData(Vec); + +impl InputData { + pub fn selector(&self) -> [u8; 4] { + let mut selector = [0u8; 4]; + selector.copy_from_slice(&self.0[0..4]); + selector + } + + pub fn calldata(&self) -> &[u8] { + &self.0[4..] + } +} + +impl TryFrom for InputData { + type Error = ActorError; + + fn try_from(value: InvokeContractParams) -> Result { + if value.input_data.len() < 4 { + return Err(ActorError::illegal_argument("input too short".to_string())); + } + Ok(InputData(value.input_data)) + } +} + +pub trait AbiCall { + type Params; + type Returns; + type Output; + fn params(&self) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; +} + +pub trait AbiCallRuntime { + type Params; + type Returns; + type Output; + fn params(&self, rt: &impl fil_actors_runtime::runtime::Runtime) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; +} + +#[derive(Debug, Clone)] +pub struct AbiEncodeError { + message: String, +} + +impl From for AbiEncodeError { + fn from(error: anyhow::Error) -> Self { + Self { + message: format!("failed to abi encode {}", error), + } + } +} + +impl From for AbiEncodeError { + fn from(message: String) -> Self { + Self { message } + } +} + +impl From for AbiEncodeError { + fn from(error: ActorError) -> Self { + Self { + message: format!("{}", error), + } + } +} + +impl From for ActorError { + fn from(error: AbiEncodeError) -> Self { + actor_error!(serialization, error.message) + } +} diff --git a/fendermint/actors/adm/src/state.rs b/fendermint/actors/adm/src/state.rs new file mode 100644 index 0000000000..1e6d0278d0 --- /dev/null +++ b/fendermint/actors/adm/src/state.rs @@ -0,0 +1,265 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::anyhow; +use cid::Cid; +use fil_actors_runtime::{runtime::Runtime, ActorError, Map2, MapKey, DEFAULT_HAMT_CONFIG}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, ActorID}; +use integer_encoding::VarInt; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt::Display; +use std::str::FromStr; + +type MachineCodeMap = Map2; +type DeployerMap = Map2; +type OwnerMap = Map2>; + +/// The args used to create the permission mode in storage. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum PermissionModeParams { + /// No restriction, everyone can deploy. + Unrestricted, + /// Only whitelisted addresses can deploy. + AllowList(Vec
), +} + +/// The permission mode for controlling who can deploy contracts. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub enum PermissionMode { + /// No restriction, everyone can deploy. + Unrestricted, + /// Only whitelisted addresses can deploy. + AllowList(Cid), // HAMT[Address]() +} + +/// The kinds of machines available. Their code Cids are given at genesis. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)] +pub enum Kind { + /// An object storage bucket with S3-like key semantics. + Bucket, + /// An MMR timehub. + Timehub, +} + +impl MapKey for Kind { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = u64::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + match result { + 0 => Ok(Kind::Bucket), + 1 => Ok(Kind::Timehub), + _ => Err(format!("failed to decode kind from {}", result)), + } + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + let int = match self { + Self::Bucket => 0, + Self::Timehub => 1, + }; + Ok(int.encode_var_vec()) + } +} + +impl FromStr for Kind { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(match s { + "bucket" => Self::Bucket, + "timehub" => Self::Timehub, + _ => return Err(anyhow!("invalid machine kind")), + }) + } +} + +impl Display for Kind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { + Self::Bucket => "bucket", + Self::Timehub => "timehub", + }; + write!(f, "{}", str) + } +} + +/// Machine metadata. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Metadata { + /// Machine kind. + pub kind: Kind, + /// Machine ID address. + pub address: Address, + /// User-defined data. + pub metadata: HashMap, +} + +/// ADM actor state representation. +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The root of a HAMT[u64]Cid containing available machine codes. + /// This is fixed at genesis. + pub machine_codes: Cid, + /// The permission mode controlling who can create machines. + /// This is fixed at genesis, but in allowlist mode, the set of deployers can be changed + /// by any member. + /// Modeled after the IPC EAM actor. + pub permission_mode: PermissionMode, + /// The root of a HAMT[Address]Vec containing address and kind metadata + /// keyed by owner robust address. + pub owners: Cid, +} + +impl State { + pub fn new( + store: &BS, + machine_codes: HashMap, + permission_mode: PermissionModeParams, + ) -> Result { + let mut machine_code_map = MachineCodeMap::empty(store, DEFAULT_HAMT_CONFIG, "machines"); + for (kind, code) in machine_codes { + machine_code_map.set(&kind, code)?; + } + let machine_codes = machine_code_map.flush()?; + + let permission_mode = match permission_mode { + PermissionModeParams::Unrestricted => PermissionMode::Unrestricted, + PermissionModeParams::AllowList(deployers) => { + let mut deployers_map = DeployerMap::empty(store, DEFAULT_HAMT_CONFIG, "deployers"); + for d in deployers { + deployers_map.set(&d, ())?; + } + PermissionMode::AllowList(deployers_map.flush()?) + } + }; + + let owners = OwnerMap::empty(store, DEFAULT_HAMT_CONFIG, "owners").flush()?; + + Ok(State { + machine_codes, + permission_mode, + owners, + }) + } + + pub fn get_machine_code( + &self, + store: &BS, + kind: &Kind, + ) -> Result, ActorError> { + let machine_code_map = + MachineCodeMap::load(store, &self.machine_codes, DEFAULT_HAMT_CONFIG, "machines")?; + let code = machine_code_map.get(kind).map(|c| c.cloned())?; + Ok(code) + } + + pub fn set_deployers( + &mut self, + store: &BS, + deployers: Vec
, + ) -> anyhow::Result<()> { + match self.permission_mode { + PermissionMode::Unrestricted => { + return Err(anyhow::anyhow!( + "cannot set deployers in unrestricted permission mode" + )); + } + PermissionMode::AllowList(_) => { + let mut deployers_map = DeployerMap::empty(store, DEFAULT_HAMT_CONFIG, "deployers"); + for d in deployers { + deployers_map.set(&d, ())?; + } + self.permission_mode = PermissionMode::AllowList(deployers_map.flush()?); + } + } + Ok(()) + } + + pub fn can_deploy(&self, rt: &impl Runtime, deployer: ActorID) -> Result { + Ok(match &self.permission_mode { + PermissionMode::Unrestricted => true, + PermissionMode::AllowList(cid) => { + let deployer_map = + DeployerMap::load(rt.store(), cid, DEFAULT_HAMT_CONFIG, "deployers")?; + let mut allowed = false; + deployer_map.for_each(|k, _| { + // Normalize allowed addresses to ID addresses, so we can compare any kind of allowlisted address. + // This includes f1, f2, f3, etc. + // We cannot normalize the allowlist at construction time because the addresses may not be bound to IDs yet (counterfactual usage). + // Unfortunately, API of Hamt::for_each won't let us stop iterating on match, so this is more wasteful than we'd like. We can optimize later. + // Hamt has implemented Iterator recently, but it's not exposed through Map2 (see ENG-800). + allowed = allowed || rt.resolve_address(&k) == Some(deployer); + Ok(()) + })?; + allowed + } + }) + } + + pub fn set_metadata( + &mut self, + store: &BS, + owner: Address, + address: Address, + kind: Kind, + metadata: HashMap, + ) -> anyhow::Result<()> { + let mut owner_map = OwnerMap::load(store, &self.owners, DEFAULT_HAMT_CONFIG, "owners")?; + let mut machine_metadata = owner_map + .get(&owner)? + .map(|machines| machines.to_owned()) + .unwrap_or_default(); + machine_metadata.push(Metadata { + kind, + address, + metadata, + }); + owner_map.set(&owner, machine_metadata)?; + self.owners = owner_map.flush()?; + Ok(()) + } + + pub fn get_metadata( + &self, + store: &BS, + owner: Address, + ) -> anyhow::Result> { + let owner_map = OwnerMap::load(store, &self.owners, DEFAULT_HAMT_CONFIG, "owners")?; + let metadata = owner_map + .get(&owner)? + .map(|m| m.to_owned()) + .unwrap_or_default(); + Ok(metadata) + } +} + +#[cfg(test)] +mod tests { + use cid::Cid; + + use crate::state::PermissionMode; + + #[test] + fn test_serialization() { + let p = PermissionMode::Unrestricted; + let v = fvm_ipld_encoding::to_vec(&p).unwrap(); + + let dp: PermissionMode = fvm_ipld_encoding::from_slice(&v).unwrap(); + assert_eq!(dp, p); + + let p = PermissionMode::AllowList(Cid::default()); + let v = fvm_ipld_encoding::to_vec(&p).unwrap(); + + let dp: PermissionMode = fvm_ipld_encoding::from_slice(&v).unwrap(); + assert_eq!(dp, p) + } +} diff --git a/fendermint/actors/adm_types/Cargo.toml b/fendermint/actors/adm_types/Cargo.toml new file mode 100644 index 0000000000..5200ca1097 --- /dev/null +++ b/fendermint/actors/adm_types/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "fil_actor_adm" +description = "ADM actor types and interface" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[dependencies] +serde = { workspace = true, features = ["derive"] } + diff --git a/fendermint/actors/adm_types/src/lib.rs b/fendermint/actors/adm_types/src/lib.rs new file mode 100644 index 0000000000..094802fdd1 --- /dev/null +++ b/fendermint/actors/adm_types/src/lib.rs @@ -0,0 +1,28 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! # fil_actor_adm - ADM Actor Types +//! +//! This crate provides the types and interface for the ADM (Autonomous Data Management) actor. +//! It's designed to be a lightweight dependency for actors that need to interact with ADM. + +use serde::{Deserialize, Serialize}; + +/// Types of machines that can be managed by ADM +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub enum Kind { + /// S3-like object storage with key-value semantics + Bucket, + /// MMR accumulator for timestamping + Timehub, +} + +impl std::fmt::Display for Kind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Kind::Bucket => write!(f, "bucket"), + Kind::Timehub => write!(f, "timehub"), + } + } +} diff --git a/fendermint/actors/blob_reader/Cargo.toml b/fendermint/actors/blob_reader/Cargo.toml new file mode 100644 index 0000000000..7ea5c1b1b5 --- /dev/null +++ b/fendermint/actors/blob_reader/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "fendermint_actor_blob_reader" +description = "Singleton actor for reading blob bytes" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +frc42_dispatch = { workspace = true } +log = { workspace = true, features = ["std"] } +num-traits = { workspace = true } +num-derive = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["blob-reader"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } +ipc_storage_ipld = { path = "../../../ipc-storage/ipld" } + +[dev-dependencies] +fendermint_actor_blobs_testing = { path = "../blobs/testing" } +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/blob_reader/src/actor.rs b/fendermint/actors/blob_reader/src/actor.rs new file mode 100644 index 0000000000..fd5d7a4abb --- /dev/null +++ b/fendermint/actors/blob_reader/src/actor.rs @@ -0,0 +1,384 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::bytes::B256; +use fil_actors_runtime::{ + actor_dispatch, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, FIRST_EXPORTED_METHOD_NUMBER, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::MethodNum; +use ipc_storage_actor_sdk::evm::emit_evm_event; + +use crate::shared::{ + CloseReadRequestParams, GetOpenReadRequestsParams, GetPendingReadRequestsParams, + GetReadRequestStatusParams, Method, OpenReadRequestParams, ReadRequestStatus, ReadRequestTuple, + SetReadRequestPendingParams, State, BLOB_READER_ACTOR_NAME, +}; +use crate::sol_facade::{ReadRequestClosed, ReadRequestOpened, ReadRequestPending}; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(ReadReqActor); + +pub struct ReadReqActor; + +impl ReadReqActor { + fn constructor(rt: &impl Runtime) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let state = State::new(rt.store())?; + rt.create(&state) + } + + fn open_read_request( + rt: &impl Runtime, + params: OpenReadRequestParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let id = rt.transaction(|st: &mut State, _rt| { + st.open_read_request( + rt.store(), + params.hash, + params.offset, + params.len, + params.callback_addr, + params.callback_method, + ) + })?; + + emit_evm_event( + rt, + ReadRequestOpened { + id: &id, + blob_hash: ¶ms.hash, + read_offset: params.offset.into(), + read_length: params.len.into(), + callback: params.callback_addr, + method_num: params.callback_method, + }, + )?; + + Ok(id) + } + + fn get_read_request_status( + rt: &impl Runtime, + params: GetReadRequestStatusParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + let status = rt + .state::()? + .get_read_request_status(rt.store(), params.0)?; + Ok(status) + } + + fn get_open_read_requests( + rt: &impl Runtime, + params: GetOpenReadRequestsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_read_requests_by_status( + rt.store(), + ReadRequestStatus::Open, + params.0, + ) + } + + fn get_pending_read_requests( + rt: &impl Runtime, + params: GetPendingReadRequestsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_read_requests_by_status( + rt.store(), + ReadRequestStatus::Pending, + params.0, + ) + } + + fn set_read_request_pending( + rt: &impl Runtime, + params: SetReadRequestPendingParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + rt.transaction(|st: &mut State, _| st.set_read_request_pending(rt.store(), params.0))?; + emit_evm_event(rt, ReadRequestPending::new(¶ms.0)) + } + + fn close_read_request( + rt: &impl Runtime, + params: CloseReadRequestParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + rt.transaction(|st: &mut State, _| st.close_read_request(rt.store(), params.0))?; + emit_evm_event(rt, ReadRequestClosed::new(¶ms.0)) + } + + /// Fallback method for unimplemented method numbers. + pub fn fallback( + rt: &impl Runtime, + method: MethodNum, + _: Option, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + if method >= FIRST_EXPORTED_METHOD_NUMBER { + Ok(None) + } else { + Err(actor_error!(unhandled_message; "invalid method: {}", method)) + } + } +} + +impl ActorCode for ReadReqActor { + type Methods = Method; + + fn name() -> &'static str { + BLOB_READER_ACTOR_NAME + } + + actor_dispatch! { + Constructor => constructor, + + // User methods + OpenReadRequest => open_read_request, + + // System methods + GetReadRequestStatus => get_read_request_status, + GetOpenReadRequests => get_open_read_requests, + GetPendingReadRequests => get_pending_read_requests, + SetReadRequestPending => set_read_request_pending, + CloseReadRequest => close_read_request, + + _ => fallback, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::sol_facade::ReadRequestClosed; + use fendermint_actor_blobs_testing::new_hash; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::test_utils::{ + expect_empty, MockRuntime, ETHACCOUNT_ACTOR_CODE_ID, SYSTEM_ACTOR_CODE_ID, + }; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::address::Address; + use ipc_storage_actor_sdk::evm::to_actor_event; + + pub fn construct_and_verify() -> MockRuntime { + let rt = MockRuntime { + receiver: Address::new_id(10), + ..Default::default() + }; + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let result = rt + .call::(Method::Constructor as u64, None) + .unwrap(); + expect_empty(result); + rt.verify(); + rt.reset(); + rt + } + + fn expect_emitted_open_event(rt: &MockRuntime, params: &OpenReadRequestParams, id: &B256) { + let event = to_actor_event(ReadRequestOpened { + id, + blob_hash: ¶ms.hash, + read_offset: params.offset.into(), + read_length: params.len.into(), + callback: params.callback_addr, + method_num: params.callback_method, + }) + .unwrap(); + rt.expect_emitted_event(event); + } + + fn expect_emitted_pending_event(rt: &MockRuntime, params: &SetReadRequestPendingParams) { + let event = to_actor_event(ReadRequestPending::new(¶ms.0)).unwrap(); + rt.expect_emitted_event(event); + } + + fn expect_emitted_closed_event(rt: &MockRuntime, params: &CloseReadRequestParams) { + let event = to_actor_event(ReadRequestClosed::new(¶ms.0)).unwrap(); + rt.expect_emitted_event(event); + } + + #[test] + fn test_read_request_operations() { + let rt = construct_and_verify(); + + // Set up test addresses + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.set_origin(id_addr); + + // Create a test blob hash and callback details + let blob_hash = new_hash(1024).0; + let offset = 32u32; + let len = 1024u32; + let callback_method = 42u64; + + // Test opening a read request + rt.expect_validate_caller_any(); + let open_params = OpenReadRequestParams { + hash: blob_hash, + offset, + len, + callback_addr: f4_eth_addr, + callback_method, + }; + let expected_id = B256::from(1); + expect_emitted_open_event(&rt, &open_params, &expected_id); + let request_id = rt + .call::( + Method::OpenReadRequest as u64, + IpldBlock::serialize_cbor(&open_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + // Test checking request status + rt.expect_validate_caller_any(); + let status_params = GetReadRequestStatusParams(request_id); + let result = rt + .call::( + Method::GetReadRequestStatus as u64, + IpldBlock::serialize_cbor(&status_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(matches!(result, Some(ReadRequestStatus::Open))); + rt.verify(); + + // Test getting open requests + rt.expect_validate_caller_any(); + let get_params = GetOpenReadRequestsParams(1); // Get just one request + let result = rt + .call::( + Method::GetOpenReadRequests as u64, + IpldBlock::serialize_cbor(&get_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + + assert_eq!(result.len(), 1); + let (req_id, req_blob_hash, req_offset, req_len, req_callback_addr, req_callback_method) = + &result[0]; + assert_eq!(req_id, &request_id); + assert_eq!(req_blob_hash, &blob_hash); + assert_eq!(req_offset, &offset); + assert_eq!(req_len, &len); + assert_eq!(req_callback_addr, &f4_eth_addr); + assert_eq!(req_callback_method, &callback_method); + rt.verify(); + + // Test setting request to pending + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let pending_params = SetReadRequestPendingParams(request_id); + expect_emitted_pending_event(&rt, &pending_params); + let result = rt.call::( + Method::SetReadRequestPending as u64, + IpldBlock::serialize_cbor(&pending_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Verify request is now pending + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); // Reset caller + rt.expect_validate_caller_any(); + let status_params = GetReadRequestStatusParams(request_id); + let result = rt + .call::( + Method::GetReadRequestStatus as u64, + IpldBlock::serialize_cbor(&status_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(matches!(result, Some(ReadRequestStatus::Pending))); + rt.verify(); + + // Test closing a request (requires system actor caller) + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let close_params = CloseReadRequestParams(request_id); + expect_emitted_closed_event(&rt, &close_params); + let result = rt.call::( + Method::CloseReadRequest as u64, + IpldBlock::serialize_cbor(&close_params).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Verify request no longer exists + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); // Reset caller + rt.expect_validate_caller_any(); + let status_params = GetReadRequestStatusParams(request_id); + let result = rt + .call::( + Method::GetReadRequestStatus as u64, + IpldBlock::serialize_cbor(&status_params).unwrap(), + ) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + assert!(result.is_none()); + rt.verify(); + } + + #[test] + fn test_read_request_error_cases() { + let rt = construct_and_verify(); + + // Set up test addresses + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + // Test closing non-existent request + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let non_existent_request_id = B256([0u8; 32]); + let close_params = CloseReadRequestParams(non_existent_request_id); + let result = rt.call::( + Method::CloseReadRequest as u64, + IpldBlock::serialize_cbor(&close_params).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + + // Test closing request with the non-system caller + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + let result = rt.call::( + Method::CloseReadRequest as u64, + IpldBlock::serialize_cbor(&close_params).unwrap(), + ); + assert!(result.is_err()); + rt.verify(); + } +} diff --git a/fendermint/actors/blob_reader/src/lib.rs b/fendermint/actors/blob_reader/src/lib.rs new file mode 100644 index 0000000000..a784389323 --- /dev/null +++ b/fendermint/actors/blob_reader/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod shared; +mod sol_facade; +mod state; + +pub use shared::*; diff --git a/fendermint/actors/blob_reader/src/shared.rs b/fendermint/actors/blob_reader/src/shared.rs new file mode 100644 index 0000000000..655806a6fd --- /dev/null +++ b/fendermint/actors/blob_reader/src/shared.rs @@ -0,0 +1,112 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt; + +use fendermint_actor_blobs_shared::bytes::B256; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, ActorID, MethodNum, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; +use serde::{Deserialize, Serialize}; + +pub use crate::state::State; + +pub const BLOB_READER_ACTOR_NAME: &str = "blob_reader"; +pub const BLOB_READER_ACTOR_ID: ActorID = 67; +pub const BLOB_READER_ACTOR_ADDR: Address = Address::new_id(BLOB_READER_ACTOR_ID); + +/// The status of a read request. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] +pub enum ReadRequestStatus { + /// Read request is open and waiting to be processed + #[default] + Open, + /// Read request is being processed + Pending, +} + +impl fmt::Display for ReadRequestStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ReadRequestStatus::Open => write!(f, "open"), + ReadRequestStatus::Pending => write!(f, "pending"), + } + } +} + +/// A request to read blob data. +#[derive(Clone, Debug, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct ReadRequest { + /// The hash of the blob to read data from. + pub blob_hash: B256, + /// The offset to start reading from. + pub offset: u32, + /// The length of data to read. + pub len: u32, + /// The address to call back when the read is complete. + pub callback_addr: Address, + /// The method to call back when the read is complete. + pub callback_method: MethodNum, + /// Status of the read request + pub status: ReadRequestStatus, +} + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + + // User methods + OpenReadRequest = frc42_dispatch::method_hash!("OpenReadRequest"), + + // System methods + GetReadRequestStatus = frc42_dispatch::method_hash!("GetReadRequestStatus"), + GetOpenReadRequests = frc42_dispatch::method_hash!("GetOpenReadRequests"), + GetPendingReadRequests = frc42_dispatch::method_hash!("GetPendingReadRequests"), + SetReadRequestPending = frc42_dispatch::method_hash!("SetReadRequestPending"), + CloseReadRequest = frc42_dispatch::method_hash!("CloseReadRequest"), +} + +/// Params for adding a read request. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct OpenReadRequestParams { + /// The hash of the blob to read. + pub hash: B256, + /// The offset to start reading from. + pub offset: u32, + /// The length of the read request. + pub len: u32, + /// The address to call back when the read is complete. + pub callback_addr: Address, + /// The method to call back when the read is complete. + pub callback_method: MethodNum, +} + +/// Params for getting read request status. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetReadRequestStatusParams(pub B256); + +/// Params for getting open read requests. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetOpenReadRequestsParams(pub u32); + +/// Params for getting pending read requests. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetPendingReadRequestsParams(pub u32); + +/// Params for setting a read request to pending. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SetReadRequestPendingParams(pub B256); + +/// Params for closing a read request. The ID of the read request. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct CloseReadRequestParams(pub B256); + +/// Return type for request queues. +pub type ReadRequestTuple = (B256, B256, u32, u32, Address, u64); diff --git a/fendermint/actors/blob_reader/src/sol_facade.rs b/fendermint/actors/blob_reader/src/sol_facade.rs new file mode 100644 index 0000000000..1e95453233 --- /dev/null +++ b/fendermint/actors/blob_reader/src/sol_facade.rs @@ -0,0 +1,66 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::bytes::B256; +use fvm_shared::{address::Address, MethodNum}; +use ipc_storage_actor_sdk::evm::TryIntoEVMEvent; +use ipc_storage_sol_facade::{blob_reader as sol, primitives::U256, types::H160}; + +pub struct ReadRequestOpened<'a> { + pub id: &'a B256, + pub blob_hash: &'a B256, + pub read_offset: u64, + pub read_length: u64, + pub callback: Address, + pub method_num: MethodNum, +} +impl TryIntoEVMEvent for ReadRequestOpened<'_> { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + let callback_address: H160 = self.callback.try_into()?; + Ok(sol::Events::ReadRequestOpened(sol::ReadRequestOpened { + id: self.id.0.into(), + blobHash: self.blob_hash.0.into(), + readOffset: U256::from(self.read_offset), + readLength: U256::from(self.read_length), + callbackAddress: callback_address.into(), + callbackMethod: U256::from(self.method_num), + })) + } +} + +pub struct ReadRequestPending<'a> { + pub id: &'a B256, +} +impl<'a> ReadRequestPending<'a> { + pub fn new(id: &'a B256) -> Self { + Self { id } + } +} +impl TryIntoEVMEvent for ReadRequestPending<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ReadRequestPending(sol::ReadRequestPending { + id: self.id.0.into(), + })) + } +} + +pub struct ReadRequestClosed<'a> { + pub id: &'a B256, +} +impl<'a> ReadRequestClosed<'a> { + pub fn new(id: &'a B256) -> Self { + Self { id } + } +} +impl TryIntoEVMEvent for ReadRequestClosed<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ReadRequestClosed(sol::ReadRequestClosed { + id: self.id.0.into(), + })) + } +} diff --git a/fendermint/actors/blob_reader/src/state.rs b/fendermint/actors/blob_reader/src/state.rs new file mode 100644 index 0000000000..17f2e35fe2 --- /dev/null +++ b/fendermint/actors/blob_reader/src/state.rs @@ -0,0 +1,176 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::bytes::B256; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use ipc_storage_ipld::hamt::{self, map::TrackedFlushResult}; +use log::info; + +use crate::shared::{ReadRequest, ReadRequestStatus, ReadRequestTuple}; + +const MAX_READ_REQUEST_LEN: u32 = 1024 * 1024; // 1MB + +/// The state represents all read requests. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// ReadRequests Hamt. + pub read_requests: ReadRequests, + /// Counter to sequence the requests + pub request_id_counter: u64, +} + +impl State { + pub fn new(store: &BS) -> Result { + let read_requests = ReadRequests::new(store)?; + Ok(State { + read_requests, + request_id_counter: 0, + }) + } + + pub fn open_read_request( + &mut self, + store: &BS, + blob_hash: B256, + offset: u32, + len: u32, + callback_addr: Address, + callback_method: u64, + ) -> Result { + // Validate length is not greater than the maximum allowed + if len > MAX_READ_REQUEST_LEN { + return Err(ActorError::illegal_argument(format!( + "read request length {} exceeds maximum allowed {}", + len, MAX_READ_REQUEST_LEN + ))); + } + + let request_id = self.next_request_id(); + let read_request = ReadRequest { + blob_hash, + offset, + len, + callback_addr, + callback_method, + status: ReadRequestStatus::Open, + }; + info!("opening a read request onchain: {:?}", request_id); + // will create a new request even if the request parameters are the same + let mut read_requests = self.read_requests.hamt(store)?; + self.read_requests + .save_tracked(read_requests.set_and_flush_tracked(&request_id, read_request)?); + Ok(request_id) + } + + pub fn get_read_request_status( + &self, + store: BS, + id: B256, + ) -> Result, ActorError> { + let read_requests = self.read_requests.hamt(store)?; + Ok(read_requests.get(&id)?.map(|r| r.status.clone())) + } + + pub fn get_read_requests_by_status( + &self, + store: BS, + status: ReadRequestStatus, + size: u32, + ) -> Result, ActorError> { + let read_requests = self.read_requests.hamt(store)?; + + let mut requests = Vec::new(); + read_requests.for_each(|id, request| { + if request.status == status && (requests.len() as u32) < size { + requests.push(( + id, + request.blob_hash, + request.offset, + request.len, + request.callback_addr, + request.callback_method, + )) + } + + Ok(()) + })?; + Ok(requests) + } + + /// Set a read request status to pending. + pub fn set_read_request_pending( + &mut self, + store: BS, + id: B256, + ) -> Result<(), ActorError> { + let mut read_requests = self.read_requests.hamt(store)?; + let mut request = read_requests + .get(&id)? + .ok_or_else(|| ActorError::not_found(format!("read request {} not found", id)))?; + + if !matches!(request.status, ReadRequestStatus::Open) { + return Err(ActorError::illegal_state(format!( + "read request {} is not in open state", + id + ))); + } + + request.status = ReadRequestStatus::Pending; + self.read_requests + .save_tracked(read_requests.set_and_flush_tracked(&id, request)?); + + Ok(()) + } + + pub fn close_read_request( + &mut self, + store: &BS, + request_id: B256, + ) -> Result<(), ActorError> { + if self.get_read_request_status(store, request_id)?.is_none() { + return Err(ActorError::not_found( + "cannot close read request, it does not exist".to_string(), + )); + } + + // remove the closed request + let mut read_requests = self.read_requests.hamt(store)?; + self.read_requests + .save_tracked(read_requests.delete_and_flush_tracked(&request_id)?.0); + Ok(()) + } + + fn next_request_id(&mut self) -> B256 { + self.request_id_counter += 1; + B256::from(self.request_id_counter) + } +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ReadRequests { + pub root: hamt::Root, + size: u64, +} + +impl ReadRequests { + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "read_requests")?; + Ok(Self { root, size: 0 }) + } + + pub fn hamt( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } +} diff --git a/fendermint/actors/blobs/Cargo.toml b/fendermint/actors/blobs/Cargo.toml new file mode 100644 index 0000000000..4329cd9aec --- /dev/null +++ b/fendermint/actors/blobs/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "fendermint_actor_blobs" +description = "Singleton actor for blob management" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +cid = { workspace = true } +anyhow = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +log = { workspace = true, features = ["std"] } +num-traits = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["blobs", "credit", "gas"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_blobs_shared = { path = "./shared" } +fendermint_actor_ipc_storage_config_shared = { path = "../ipc_storage_config/shared" } +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } +ipc_storage_ipld = { path = "../../../ipc-storage/ipld" } + +# BLS signature verification +bls-signatures = { version = "0.13.1", default-features = false, features = ["blst"] } + +[dev-dependencies] +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } +rand = { workspace = true } +cid = { workspace = true } + +fendermint_actor_blobs_testing = { path = "./testing" } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/blobs/shared/Cargo.toml b/fendermint/actors/blobs/shared/Cargo.toml new file mode 100644 index 0000000000..5519777ae5 --- /dev/null +++ b/fendermint/actors/blobs/shared/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "fendermint_actor_blobs_shared" +description = "Shared resources for blobs" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +data-encoding = { workspace = true } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +ipc_storage_ipld = { path = "../../../../ipc-storage/ipld" } + +[dev-dependencies] +blake3 = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/blobs/shared/src/accounts.rs b/fendermint/actors/blobs/shared/src/accounts.rs new file mode 100644 index 0000000000..2348f2a9c9 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts.rs @@ -0,0 +1,11 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod account; +mod params; +mod status; + +pub use account::*; +pub use params::*; +pub use status::*; diff --git a/fendermint/actors/blobs/shared/src/accounts/account.rs b/fendermint/actors/blobs/shared/src/accounts/account.rs new file mode 100644 index 0000000000..b93b6b213e --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts/account.rs @@ -0,0 +1,33 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; + +use crate::credit::{Credit, CreditApproval}; + +/// The external (shared) view of an account. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Account { + /// Total size of all blobs managed by the account. + pub capacity_used: u64, + /// Current free credit in byte-blocks that can be used for new commitments. + pub credit_free: Credit, + /// Current committed credit in byte-blocks that will be used for debits. + pub credit_committed: Credit, + /// Optional default sponsor account address. + pub credit_sponsor: Option
, + /// The chain epoch of the last debit. + pub last_debit_epoch: ChainEpoch, + /// Credit approvals to other accounts from this account, keyed by receiver. + pub approvals_to: HashMap, + /// Credit approvals to this account from other accounts, keyed by sender. + pub approvals_from: HashMap, + /// The maximum allowed TTL for actor's blobs. + pub max_ttl: ChainEpoch, + /// The total token value an account has used to buy credits. + pub gas_allowance: TokenAmount, +} diff --git a/fendermint/actors/blobs/shared/src/accounts/params.rs b/fendermint/actors/blobs/shared/src/accounts/params.rs new file mode 100644 index 0000000000..68dc097ea5 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts/params.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use serde::{Deserialize, Serialize}; + +use super::AccountStatus; + +/// Params for setting account status. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct SetAccountStatusParams { + /// Address to set the account status for. + pub subscriber: Address, + /// Status to set. + pub status: AccountStatus, +} + +/// Params for getting an account. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetAccountParams(pub Address); diff --git a/fendermint/actors/blobs/shared/src/accounts/status.rs b/fendermint/actors/blobs/shared/src/accounts/status.rs new file mode 100644 index 0000000000..64b274b1bf --- /dev/null +++ b/fendermint/actors/blobs/shared/src/accounts/status.rs @@ -0,0 +1,40 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::clock::ChainEpoch; +use serde::{Deserialize, Serialize}; + +/// The status of an account. +/// This controls the max TTL that the user is allowed to set on their blobs. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +pub enum AccountStatus { + // Default TTL. + #[default] + Default, + /// Reduced TTL. + Reduced, + /// Extended TTL. + Extended, +} + +impl AccountStatus { + /// Returns the max allowed TTL. + pub fn get_max_ttl(&self, default_max_ttl: ChainEpoch) -> ChainEpoch { + match self { + AccountStatus::Default => default_max_ttl, + AccountStatus::Reduced => 0, + AccountStatus::Extended => ChainEpoch::MAX, + } + } +} + +impl std::fmt::Display for AccountStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AccountStatus::Default => write!(f, "default"), + AccountStatus::Reduced => write!(f, "reduced"), + AccountStatus::Extended => write!(f, "extended"), + } + } +} diff --git a/fendermint/actors/blobs/shared/src/blobs.rs b/fendermint/actors/blobs/shared/src/blobs.rs new file mode 100644 index 0000000000..d7bf810c87 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs.rs @@ -0,0 +1,25 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fvm_shared::address::Address; + +mod blob; +mod params; +mod status; +mod subscription; + +pub use blob::*; +pub use params::*; +pub use status::*; +pub use subscription::*; + +use crate::bytes::B256; + +/// Tuple representing a unique blob source. +pub type BlobSource = (Address, SubscriptionId, B256); + +/// The return type used when fetching "added" or "pending" blobs. +pub type BlobRequest = (B256, u64, HashSet); diff --git a/fendermint/actors/blobs/shared/src/blobs/blob.rs b/fendermint/actors/blobs/shared/src/blobs/blob.rs new file mode 100644 index 0000000000..b8f8f00144 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/blob.rs @@ -0,0 +1,24 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::clock::ChainEpoch; + +use super::{BlobStatus, SubscriptionId}; +use crate::bytes::B256; + +/// The external (shared) view of a blob. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Blob { + /// The size of the content. + pub size: u64, + /// Blob metadata that contains information for blob recovery. + pub metadata_hash: B256, + /// Active subscribers (accounts) that are paying for the blob to expiry. + pub subscribers: HashMap, + /// Blob status. + pub status: BlobStatus, +} diff --git a/fendermint/actors/blobs/shared/src/blobs/params.rs b/fendermint/actors/blobs/shared/src/blobs/params.rs new file mode 100644 index 0000000000..0b6123802f --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/params.rs @@ -0,0 +1,133 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use serde::{Deserialize, Serialize}; + +use super::{BlobStatus, SubscriptionId}; +use crate::bytes::B256; + +/// Params for adding a blob. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct AddBlobParams { + /// Address of the entity adding the blob. + pub from: Address, + /// Optional sponsor address. + /// Origin or caller must still have a delegation from a sponsor. + pub sponsor: Option
, + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for blob recovery. + pub metadata_hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Blob size. + pub size: u64, + /// Blob time-to-live epochs. + /// If not specified, the current default TTL from the config actor is used. + pub ttl: Option, +} + +/// Params for getting a blob. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetBlobParams(pub B256); + +/// Params for getting blob status. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetBlobStatusParams { + /// The origin address that requested the blob. + /// This could be a wallet or machine. + pub subscriber: Address, + /// Blob blake3 hash. + pub hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +/// Params for getting added blobs. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetAddedBlobsParams(pub u32); + +/// Params for getting pending blobs. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetPendingBlobsParams(pub u32); + +/// Params for setting a blob to pending. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct SetBlobPendingParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// The address that requested the blob. + pub subscriber: Address, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +/// Params for finalizing a blob. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct FinalizeBlobParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// The address that requested the blob. + /// This could be a wallet or machine. + pub subscriber: Address, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// The status to set as final. + pub status: BlobStatus, + /// Aggregated BLS signature from node operators (48 bytes). + pub aggregated_signature: Vec, + /// Bitmap indicating which operators signed (bit position corresponds to operator index). + pub signer_bitmap: u128, +} + +/// Params for deleting a blob. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct DeleteBlobParams { + /// Account address that initiated the deletion. + pub from: Address, + /// Optional sponsor address. + /// Origin or caller must still have a delegation from a sponsor. + /// Must be used if the caller is the delegate who added the blob. + pub sponsor: Option
, + /// Blob blake3 hash. + pub hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +/// Params for overwriting a blob, i.e., deleting one and adding another. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct OverwriteBlobParams { + /// Blake3 hash of the blob to be deleted. + pub old_hash: B256, + /// Params for a new blob to add. + pub add: AddBlobParams, +} + +/// Params for trimming blob expiries. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct TrimBlobExpiriesParams { + /// Address to trim blob expiries for. + pub subscriber: Address, + /// Starting hash to trim expiries from. + pub starting_hash: Option, + /// Limit of blobs to trim expiries for. + /// This specifies the maximum number of blobs that will be examined for trimming. + pub limit: Option, +} diff --git a/fendermint/actors/blobs/shared/src/blobs/status.rs b/fendermint/actors/blobs/shared/src/blobs/status.rs new file mode 100644 index 0000000000..25435f3f80 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/status.rs @@ -0,0 +1,30 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use serde::{Deserialize, Serialize}; + +/// The status of a blob. +#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +pub enum BlobStatus { + /// Blob is added but not resolving. + #[default] + Added, + /// Blob is pending resolve. + Pending, + /// Blob was successfully resolved. + Resolved, + /// Blob resolution failed. + Failed, +} + +impl std::fmt::Display for BlobStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + BlobStatus::Added => write!(f, "added"), + BlobStatus::Pending => write!(f, "pending"), + BlobStatus::Resolved => write!(f, "resolved"), + BlobStatus::Failed => write!(f, "failed"), + } + } +} diff --git a/fendermint/actors/blobs/shared/src/blobs/subscription.rs b/fendermint/actors/blobs/shared/src/blobs/subscription.rs new file mode 100644 index 0000000000..b0c9b42a27 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/blobs/subscription.rs @@ -0,0 +1,107 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::ActorError; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use ipc_storage_ipld::hamt::MapKey; +use serde::{Deserialize, Serialize}; + +use crate::bytes::B256; + +/// An object used to determine what [`Account`](s) are accountable for a blob, and for how long. +/// Subscriptions allow us to distribute the cost of a blob across multiple accounts that +/// have added the same blob. +#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Subscription { + /// Added block. + pub added: ChainEpoch, + /// Overlap with initial group expiry. + pub overlap: ChainEpoch, + /// Expiry block. + pub expiry: ChainEpoch, + /// Source Iroh node ID used for ingestion. + /// This might be unique to each instance of the same blob. + /// It's included here for record keeping. + pub source: B256, + /// The delegate origin that may have created the subscription via a credit approval. + pub delegate: Option
, + /// Whether the subscription failed due to an issue resolving the target blob. + pub failed: bool, +} + +/// User-defined identifier used to differentiate blob subscriptions for the same subscriber. +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct SubscriptionId { + inner: String, +} + +impl SubscriptionId { + /// Max ID length. + pub const MAX_LEN: usize = 64; + + /// Returns a new [`SubscriptionId`]. + pub fn new(value: &str) -> Result { + if value.len() > Self::MAX_LEN { + return Err(ActorError::illegal_argument(format!( + "subscription ID length is {} but must not exceed the maximum of {} characters", + value.len(), + Self::MAX_LEN + ))); + } + Ok(Self { + inner: value.to_string(), + }) + } +} + +impl From for String { + fn from(id: SubscriptionId) -> String { + id.inner + } +} + +impl TryFrom for SubscriptionId { + type Error = ActorError; + + fn try_from(value: String) -> Result { + Self::new(&value) + } +} + +impl std::fmt::Display for SubscriptionId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.inner.is_empty() { + write!(f, "default") + } else { + write!(f, "{}", self.inner) + } + } +} + +impl MapKey for SubscriptionId { + fn from_bytes(b: &[u8]) -> Result { + let inner = String::from_utf8(b.to_vec()).map_err(|e| e.to_string())?; + Self::new(&inner).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.inner.as_bytes().to_vec()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_subscription_id_length() { + let id_str = |len: usize| "a".repeat(len); + let id = SubscriptionId::new(&id_str(SubscriptionId::MAX_LEN)).unwrap(); + assert_eq!(id.inner, id_str(SubscriptionId::MAX_LEN)); + + let id = SubscriptionId::new(&id_str(SubscriptionId::MAX_LEN + 1)); + assert!(id.is_err()); + } +} diff --git a/fendermint/actors/blobs/shared/src/bytes.rs b/fendermint/actors/blobs/shared/src/bytes.rs new file mode 100644 index 0000000000..06b913c5be --- /dev/null +++ b/fendermint/actors/blobs/shared/src/bytes.rs @@ -0,0 +1,118 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::anyhow; +use data_encoding::{DecodeError, DecodeKind}; +use ipc_storage_ipld::hamt::MapKey; +use serde::{Deserialize, Serialize}; + +/// Container for 256 bits or 32 bytes. +#[derive( + Clone, Copy, Debug, Default, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize, +)] +#[serde(transparent)] +pub struct B256(pub [u8; 32]); + +impl AsRef<[u8]> for B256 { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl From<[u8; 32]> for B256 { + fn from(value: [u8; 32]) -> Self { + Self(value) + } +} + +impl From for [u8; 32] { + fn from(value: B256) -> Self { + value.0 + } +} + +impl From<&[u8; 32]> for B256 { + fn from(value: &[u8; 32]) -> Self { + Self(*value) + } +} + +impl TryFrom<&[u8]> for B256 { + type Error = anyhow::Error; + + fn try_from(slice: &[u8]) -> Result { + if slice.len() == 32 { + let mut array = [0u8; 32]; + array.copy_from_slice(slice); + Ok(Self(array)) + } else { + Err(anyhow!("hash slice must be exactly 32 bytes")) + } + } +} + +impl From for B256 { + fn from(value: u64) -> Self { + let mut padded = [0u8; 32]; + padded[24..].copy_from_slice(&value.to_be_bytes()); + Self(padded) + } +} + +impl std::str::FromStr for B256 { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + parse_array::<32>(s) + .map(Self::from) + .map_err(|e| anyhow::anyhow!(e)) + } +} + +/// Parse from a base32 string into a byte array +fn parse_array(input: &str) -> Result<[u8; N], DecodeError> { + data_encoding::BASE32_NOPAD + .decode(input.to_ascii_uppercase().as_bytes())? + .try_into() + .map_err(|_| DecodeError { + position: N, + kind: DecodeKind::Length, + }) +} + +impl std::fmt::Display for B256 { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut t = data_encoding::BASE32_NOPAD.encode(self.as_ref()); + t.make_ascii_lowercase(); + f.write_str(&t) + } +} + +impl MapKey for B256 { + fn from_bytes(b: &[u8]) -> Result { + b.try_into().map_err(|e: anyhow::Error| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.0.to_vec()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn test_display_parse_roundtrip() { + for i in 0..100 { + let b: B256 = blake3::hash(&[i]).as_bytes().into(); + let text = b.to_string(); + let b1 = text.parse::().unwrap(); + let b2 = B256::from_str(&text).unwrap(); + assert_eq!(b, b1); + assert_eq!(b, b2); + } + } +} diff --git a/fendermint/actors/blobs/shared/src/credit.rs b/fendermint/actors/blobs/shared/src/credit.rs new file mode 100644 index 0000000000..2a3b46b23a --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::econ::TokenAmount; + +mod allowance; +mod approval; +mod params; +mod token_rate; + +pub use allowance::*; +pub use approval::*; +pub use params::*; +pub use token_rate::*; + +/// Credit is counted the same way as tokens. +/// The smallest indivisible unit is 1 atto, and 1 credit = 1e18 atto credits. +pub type Credit = TokenAmount; diff --git a/fendermint/actors/blobs/shared/src/credit/allowance.rs b/fendermint/actors/blobs/shared/src/credit/allowance.rs new file mode 100644 index 0000000000..b462e4d5d4 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/allowance.rs @@ -0,0 +1,44 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, econ::TokenAmount}; + +use crate::credit::Credit; + +/// Credit allowance for an account. +#[derive(Debug, Default, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct CreditAllowance { + /// The amount from the account. + pub amount: Credit, + /// The account's default sponsor. + pub sponsor: Option
, + /// The amount from the account's default sponsor. + pub sponsored_amount: Credit, +} + +impl CreditAllowance { + /// Returns the total allowance from self and default sponsor. + pub fn total(&self) -> Credit { + &self.amount + &self.sponsored_amount + } +} + +/// Gas allowance for an account. +#[derive(Debug, Default, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct GasAllowance { + /// The amount from the account. + pub amount: TokenAmount, + /// The account's default sponsor. + pub sponsor: Option
, + /// The amount from the account's default sponsor. + pub sponsored_amount: TokenAmount, +} + +impl GasAllowance { + /// Returns the total allowance from self and default sponsor. + pub fn total(&self) -> TokenAmount { + &self.amount + &self.sponsored_amount + } +} diff --git a/fendermint/actors/blobs/shared/src/credit/approval.rs b/fendermint/actors/blobs/shared/src/credit/approval.rs new file mode 100644 index 0000000000..397eb34b7d --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/approval.rs @@ -0,0 +1,78 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::ActorError; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{clock::ChainEpoch, econ::TokenAmount}; + +use crate::credit::Credit; + +/// A credit approval from one account to another. +#[derive(Debug, Default, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct CreditApproval { + /// Optional credit approval limit. + pub credit_limit: Option, + /// Used to limit gas fee delegation. + pub gas_allowance_limit: Option, + /// Optional credit approval expiry epoch. + pub expiry: Option, + /// Counter for how much credit has been used via this approval. + pub credit_used: Credit, + /// Used to track gas fees paid for by the delegation + pub gas_allowance_used: TokenAmount, +} + +impl CreditApproval { + /// Returns a new credit approval. + pub fn new( + credit_limit: Option, + gas_allowance_limit: Option, + expiry: Option, + ) -> Self { + Self { + credit_limit, + gas_allowance_limit, + expiry, + ..Default::default() + } + } + + /// Validates whether the approval has enough allowance for the credit amount. + pub fn validate_credit_usage(&self, amount: &TokenAmount) -> Result<(), ActorError> { + if let Some(credit_limit) = self.credit_limit.as_ref() { + let unused = &(credit_limit - &self.credit_used); + if unused < amount { + return Err(ActorError::forbidden(format!( + "usage would exceed approval credit limit (available: {}; required: {})", + unused, amount + ))); + } + } + Ok(()) + } + + /// Validates whether the approval has enough allowance for the gas amount. + pub fn validate_gas_usage(&self, amount: &TokenAmount) -> Result<(), ActorError> { + if let Some(gas_limit) = self.gas_allowance_limit.as_ref() { + let unused = &(gas_limit - &self.gas_allowance_used); + if unused < amount { + return Err(ActorError::forbidden(format!( + "usage would exceed approval gas allowance (available: {}; required: {})", + unused, amount + ))); + } + } + Ok(()) + } + + /// Validates whether the approval has a valid expiration. + pub fn validate_expiration(&self, current_epoch: ChainEpoch) -> Result<(), ActorError> { + if let Some(expiry) = self.expiry { + if expiry <= current_epoch { + return Err(ActorError::forbidden("approval expired".into())); + } + } + Ok(()) + } +} diff --git a/fendermint/actors/blobs/shared/src/credit/params.rs b/fendermint/actors/blobs/shared/src/credit/params.rs new file mode 100644 index 0000000000..01f76a06a7 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/params.rs @@ -0,0 +1,79 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use serde::{Deserialize, Serialize}; + +use super::Credit; + +/// Params for buying credits. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct BuyCreditParams(pub Address); + +/// Set credit sponsor. +/// If not present, the sponsor is unset. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SetSponsorParams(pub Option
); + +/// Params for updating credit. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct UpdateGasAllowanceParams { + /// Account address that initiated the update. + pub from: Address, + /// Optional account address that is sponsoring the update. + pub sponsor: Option
, + /// Token amount to add, which can be negative. + pub add_amount: TokenAmount, +} + +/// Params for approving credit. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ApproveCreditParams { + /// Account address that is receiving the approval. + pub to: Address, + /// Optional restriction on caller addresses, e.g., a bucket. + /// The receiver will only be able to use the approval via an allowlisted caller. + /// If not present, any caller is allowed. + pub caller_allowlist: Option>, + /// Optional credit approval limit. + /// If specified, the approval becomes invalid once the used credits reach the + /// specified limit. + pub credit_limit: Option, + /// Optional gas fee limit. + /// If specified, the approval becomes invalid once the used gas fees reach the + /// specified limit. + pub gas_fee_limit: Option, + /// Optional credit approval time-to-live epochs. + /// If specified, the approval becomes invalid after this duration. + pub ttl: Option, +} + +/// Params for revoking credit. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct RevokeCreditParams { + /// Account address whose approval is being revoked. + pub to: Address, + /// Optional caller address to remove from the caller allowlist. + /// If not present, the entire approval is revoked. + pub for_caller: Option
, +} + +/// Params for looking up a credit approval. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetCreditApprovalParams { + /// Account address that made the approval. + pub from: Address, + /// Account address that received the approval. + pub to: Address, +} + +/// Params for looking up credit allowance. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetGasAllowanceParams(pub Address); diff --git a/fendermint/actors/blobs/shared/src/credit/token_rate.rs b/fendermint/actors/blobs/shared/src/credit/token_rate.rs new file mode 100644 index 0000000000..6b816c3682 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/credit/token_rate.rs @@ -0,0 +1,157 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::ops::{Div, Mul}; + +use fvm_shared::{ + bigint::{BigInt, BigUint}, + econ::TokenAmount, +}; +use serde::{Deserialize, Serialize}; + +use super::Credit; + +/// TokenCreditRate determines how much atto credits can be bought by a certain amount of RECALL. +#[derive(Clone, Default, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct TokenCreditRate { + rate: BigUint, +} + +impl TokenCreditRate { + pub const RATIO: u128 = 10u128.pow(18); + + pub fn from(rate: impl Into) -> Self { + Self { rate: rate.into() } + } + + pub fn rate(&self) -> &BigUint { + &self.rate + } +} + +impl std::fmt::Display for TokenCreditRate { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.rate) + } +} + +impl Mul<&TokenCreditRate> for TokenAmount { + type Output = Credit; + + fn mul(self, rate: &TokenCreditRate) -> Self::Output { + let rate = BigInt::from(rate.rate.clone()); + (self * rate).div_floor(TokenCreditRate::RATIO) + } +} + +impl Div<&TokenCreditRate> for &Credit { + type Output = TokenAmount; + + fn div(self, rate: &TokenCreditRate) -> Self::Output { + #[allow(clippy::suspicious_arithmetic_impl)] + (self * TokenCreditRate::RATIO).div_floor(rate.rate.clone()) + } +} + +impl PartialOrd for TokenCreditRate { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for TokenCreditRate { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.rate.cmp(&other.rate) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_token_credit_rate() { + struct TestCase { + tokens: TokenAmount, + rate: TokenCreditRate, + expected: &'static str, + description: &'static str, + } + + let test_cases = vec![ + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(1usize), + expected: "0.000000000000000001", + description: "lower bound: 1 RECALL buys 1 atto credit", + }, + TestCase { + tokens: TokenAmount::from_nano(500000000), // 0.5 RECALL + rate: TokenCreditRate::from(1usize), + expected: "0.0", + description: "crossing lower bound. 0.5 RECALL cannot buy 1 atto credit", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(2usize), + expected: "0.000000000000000002", + description: "1 RECALL buys 2 atto credits", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(10u64.pow(18)), + expected: "1.0", + description: "1 RECALL buys 1 whole credit", + }, + TestCase { + tokens: TokenAmount::from_whole(50), + rate: TokenCreditRate::from(10u64.pow(18)), + expected: "50.0", + description: "50 RECALL buys 50 whole credits", + }, + TestCase { + tokens: TokenAmount::from_nano(233432100u64), + rate: TokenCreditRate::from(10u64.pow(18)), + expected: "0.2334321", + description: "0.2334321 RECALL buys 0.2334321 credits", + }, + TestCase { + tokens: TokenAmount::from_nano(233432100u64), + rate: TokenCreditRate::from(10u128.pow(36)), + expected: "233432100000000000.0", + description: "0.2334321 RECALL buys 233432100000000000 credits", + }, + TestCase { + tokens: TokenAmount::from_atto(1), // 1 attoRECALL + rate: TokenCreditRate::from(10u128.pow(36)), + expected: "1.0", + description: "1 atto RECALL buys 1 credit", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(10u128.pow(18).div(4)), + expected: "0.25", + description: "1 RECALL buys 0.25 credit", + }, + TestCase { + tokens: TokenAmount::from_whole(1), + rate: TokenCreditRate::from(10u128.pow(18).div(3)), + expected: "0.333333333333333333", + description: "1 RECALL buys 0.333333333333333333 credit", + }, + ]; + + for t in test_cases { + let credits = t.tokens.clone() * &t.rate; + assert_eq!( + t.expected, + credits.to_string(), + "tc: {}, {}, {}", + t.description, + t.tokens, + t.rate + ); + } + } +} diff --git a/fendermint/actors/blobs/shared/src/lib.rs b/fendermint/actors/blobs/shared/src/lib.rs new file mode 100644 index 0000000000..b5d78a0992 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/lib.rs @@ -0,0 +1,54 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_shared::econ::TokenAmount; +use fvm_shared::{address::Address, ActorID}; + +use crate::credit::{Credit, TokenCreditRate}; + +pub mod accounts; +pub mod blobs; +pub mod bytes; +pub mod credit; +pub mod method; +pub mod operators; +pub mod sdk; + +/// The unique identifier for the blob actor in the system. +pub const BLOBS_ACTOR_ID: ActorID = 66; +/// The address of the blob actor, derived from its actor ID. +pub const BLOBS_ACTOR_ADDR: Address = Address::new_id(BLOBS_ACTOR_ID); + +/// The stats of the blob actor. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetStatsReturn { + /// The current token balance earned by the subnet. + pub balance: TokenAmount, + /// The total free storage capacity of the subnet. + pub capacity_free: u64, + /// The total used storage capacity of the subnet. + pub capacity_used: u64, + /// The total number of credits sold in the subnet. + pub credit_sold: Credit, + /// The total number of credits committed to active storage in the subnet. + pub credit_committed: Credit, + /// The total number of credits debited in the subnet. + pub credit_debited: Credit, + /// The token to credit rate. + pub token_credit_rate: TokenCreditRate, + /// Total number of debit accounts. + pub num_accounts: u64, + /// Total number of actively stored blobs. + pub num_blobs: u64, + /// Total number of blobs that are not yet added to the validator's resolve pool. + pub num_added: u64, + // Total bytes of all blobs that are not yet added to the validator's resolve pool. + pub bytes_added: u64, + /// Total number of currently resolving blobs. + pub num_resolving: u64, + /// Total bytes of all currently resolving blobs. + pub bytes_resolving: u64, +} diff --git a/fendermint/actors/blobs/shared/src/method.rs b/fendermint/actors/blobs/shared/src/method.rs new file mode 100644 index 0000000000..3718f09132 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/method.rs @@ -0,0 +1,49 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + + // EVM Interop + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), + + // User methods + BuyCredit = frc42_dispatch::method_hash!("BuyCredit"), + ApproveCredit = frc42_dispatch::method_hash!("ApproveCredit"), + RevokeCredit = frc42_dispatch::method_hash!("RevokeCredit"), + SetAccountSponsor = frc42_dispatch::method_hash!("SetAccountSponsor"), + GetAccount = frc42_dispatch::method_hash!("GetAccount"), + GetCreditApproval = frc42_dispatch::method_hash!("GetCreditApproval"), + AddBlob = frc42_dispatch::method_hash!("AddBlob"), + GetBlob = frc42_dispatch::method_hash!("GetBlob"), + DeleteBlob = frc42_dispatch::method_hash!("DeleteBlob"), + OverwriteBlob = frc42_dispatch::method_hash!("OverwriteBlob"), + + // System methods + GetGasAllowance = frc42_dispatch::method_hash!("GetGasAllowance"), + UpdateGasAllowance = frc42_dispatch::method_hash!("UpdateGasAllowance"), + GetBlobStatus = frc42_dispatch::method_hash!("GetBlobStatus"), + GetAddedBlobs = frc42_dispatch::method_hash!("GetAddedBlobs"), + GetPendingBlobs = frc42_dispatch::method_hash!("GetPendingBlobs"), + SetBlobPending = frc42_dispatch::method_hash!("SetBlobPending"), + FinalizeBlob = frc42_dispatch::method_hash!("FinalizeBlob"), + DebitAccounts = frc42_dispatch::method_hash!("DebitAccounts"), + + // Admin methods + SetAccountStatus = frc42_dispatch::method_hash!("SetAccountStatus"), + TrimBlobExpiries = frc42_dispatch::method_hash!("TrimBlobExpiries"), + + // Metrics methods + GetStats = frc42_dispatch::method_hash!("GetStats"), + + // Node operator methods + RegisterNodeOperator = frc42_dispatch::method_hash!("RegisterNodeOperator"), + GetOperatorInfo = frc42_dispatch::method_hash!("GetOperatorInfo"), + GetActiveOperators = frc42_dispatch::method_hash!("GetActiveOperators"), +} diff --git a/fendermint/actors/blobs/shared/src/operators.rs b/fendermint/actors/blobs/shared/src/operators.rs new file mode 100644 index 0000000000..e612958276 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/operators.rs @@ -0,0 +1,41 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; + +/// Parameters for registering a node operator +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct RegisterNodeOperatorParams { + /// BLS public key (must be 48 bytes) + pub bls_pubkey: Vec, + /// RPC URL where the operator's node can be queried for signatures + pub rpc_url: String, +} + +/// Parameters for getting operator information +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetOperatorInfoParams { + /// Address of the operator + pub address: Address, +} + +/// Return type for getting operator information +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct OperatorInfo { + /// BLS public key + pub bls_pubkey: Vec, + /// RPC URL + pub rpc_url: String, + /// Whether the operator is active + pub active: bool, +} + +/// Return type for getting all active operators +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct GetActiveOperatorsReturn { + /// Ordered list of active operator addresses + /// Index in this list corresponds to bit position in signature bitmap + pub operators: Vec
, +} diff --git a/fendermint/actors/blobs/shared/src/sdk.rs b/fendermint/actors/blobs/shared/src/sdk.rs new file mode 100644 index 0000000000..77bd816270 --- /dev/null +++ b/fendermint/actors/blobs/shared/src/sdk.rs @@ -0,0 +1,97 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::{deserialize_block, extract_send_result, runtime::Runtime, ActorError}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::{address::Address, sys::SendFlags, MethodNum}; + +use crate::{ + blobs::{ + AddBlobParams, Blob, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, Subscription, + }, + credit::{CreditApproval, GetCreditApprovalParams}, + method::Method, + BLOBS_ACTOR_ADDR, +}; + +/// Returns a credit approval from one account to another if it exists. +pub fn get_credit_approval( + rt: &impl Runtime, + from: Address, + to: Address, +) -> Result, ActorError> { + let params = GetCreditApprovalParams { from, to }; + + deserialize_block(extract_send_result(rt.send( + &BLOBS_ACTOR_ADDR, + Method::GetCreditApproval as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + None, + SendFlags::READ_ONLY, + ))?) +} + +/// Returns `true` if `from` and `to` are the same address, +/// or if `from` has a credit delegation to `to` that has not yet expired. +pub fn has_credit_approval( + rt: &impl Runtime, + from: Address, + to: Address, +) -> Result { + if from != to { + let approval = get_credit_approval(rt, from, to)?; + let curr_epoch = rt.curr_epoch(); + Ok(approval.is_some_and(|a| a.expiry.is_none_or(|e| e >= curr_epoch))) + } else { + Ok(true) + } +} + +/// Adds a blob. +pub fn add_blob(rt: &impl Runtime, params: AddBlobParams) -> Result { + let params = IpldBlock::serialize_cbor(¶ms)?; + deserialize_block(extract_send_result(rt.send_simple( + &BLOBS_ACTOR_ADDR, + Method::AddBlob as MethodNum, + params, + rt.message().value_received(), + ))?) +} + +/// Returns information about a blob. +pub fn get_blob(rt: &impl Runtime, params: GetBlobParams) -> Result, ActorError> { + deserialize_block(extract_send_result(rt.send( + &BLOBS_ACTOR_ADDR, + Method::GetBlob as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + None, + SendFlags::READ_ONLY, + ))?) +} + +/// Deletes a blob. +pub fn delete_blob(rt: &impl Runtime, params: DeleteBlobParams) -> Result<(), ActorError> { + extract_send_result(rt.send_simple( + &BLOBS_ACTOR_ADDR, + Method::DeleteBlob as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + ))?; + Ok(()) +} + +/// Overwrite a blob, i.e., delete one and add another in a single call. +pub fn overwrite_blob( + rt: &impl Runtime, + params: OverwriteBlobParams, +) -> Result { + deserialize_block(extract_send_result(rt.send_simple( + &BLOBS_ACTOR_ADDR, + Method::OverwriteBlob as MethodNum, + IpldBlock::serialize_cbor(¶ms)?, + rt.message().value_received(), + ))?) +} diff --git a/fendermint/actors/blobs/src/actor.rs b/fendermint/actors/blobs/src/actor.rs new file mode 100644 index 0000000000..1187cef8f5 --- /dev/null +++ b/fendermint/actors/blobs/src/actor.rs @@ -0,0 +1,235 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{bytes::B256, method::Method}; +use fil_actors_runtime::{ + actor_dispatch, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, FIRST_EXPORTED_METHOD_NUMBER, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::MethodNum; +use ipc_storage_actor_sdk::evm::{InputData, InvokeContractParams, InvokeContractReturn}; + +use crate::{ + sol_facade::{blobs as sol_blobs, credit as sol_credit, AbiCall, AbiCallRuntime}, + State, BLOBS_ACTOR_NAME, +}; + +mod admin; +mod metrics; +mod system; +mod user; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(BlobsActor); + +/// Singleton actor for managing blob storage. +/// +/// The [`Address`]es stored in this actor's state _must_ be ID-based addresses for +/// efficient comparison with message origin and caller addresses, which are always ID-based. +/// [`Address`]es in the method params can be of any type. +/// They will be resolved to ID-based addresses. +/// +/// For simplicity, this actor currently manages both blobs and credit. +/// A future version of the protocol will likely separate them in some way. +pub struct BlobsActor; + +impl BlobsActor { + /// Creates a new [`BlobsActor`] state. + /// + /// This is only used in tests. This actor is created manually at genesis. + fn constructor(rt: &impl Runtime) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let state = State::new(rt.store())?; + rt.create(&state) + } + + /// Invokes actor methods with EVM calldata. + fn invoke_contract( + rt: &impl Runtime, + params: InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol_blobs::can_handle(&input_data) { + let output_data = match sol_blobs::parse_input(&input_data)? { + sol_blobs::Calls::addBlob(call) => { + let params = call.params(rt)?; + Self::add_blob(rt, params)?; + call.returns(()) + } + sol_blobs::Calls::deleteBlob(call) => { + let params = call.params(rt)?; + Self::delete_blob(rt, params)?; + call.returns(()) + } + sol_blobs::Calls::getBlob(call) => { + let params = call.params()?; + let blob = Self::get_blob(rt, params)?; + call.returns(blob)? + } + sol_blobs::Calls::getStats(call) => { + let stats = Self::get_stats(rt)?; + call.returns(stats) + } + sol_blobs::Calls::overwriteBlob(call) => { + let params = call.params(rt)?; + Self::overwrite_blob(rt, params)?; + call.returns(()) + } + sol_blobs::Calls::trimBlobExpiries(call) => { + let params = call.params(); + let cursor = Self::trim_blob_expiries(rt, params)?; + call.returns(cursor) + } + }; + Ok(InvokeContractReturn { output_data }) + } else if sol_credit::can_handle(&input_data) { + let output_data = match sol_credit::parse_input(&input_data)? { + sol_credit::Calls::buyCredit_0(call) => { + // function buyCredit() external payable; + let params = call.params(rt); + Self::buy_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::buyCredit_1(call) => { + // function buyCredit(address recipient) external payable; + let params = call.params(); + Self::buy_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::approveCredit_0(call) => { + let params = call.params(); + Self::approve_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::approveCredit_1(call) => { + let params = call.params(); + Self::approve_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::approveCredit_2(call) => { + let params = call.params(); + Self::approve_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::revokeCredit_0(call) => { + let params = call.params(); + Self::revoke_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::revokeCredit_1(call) => { + let params = call.params(); + Self::revoke_credit(rt, params)?; + call.returns(()) + } + sol_credit::Calls::setAccountSponsor(call) => { + let params = call.params(); + Self::set_account_sponsor(rt, params)?; + call.returns(()) + } + sol_credit::Calls::getAccount(call) => { + let params = call.params(); + let account_info = Self::get_account(rt, params)?; + call.returns(account_info)? + } + sol_credit::Calls::getCreditApproval(call) => { + let params = call.params(); + let credit_approval = Self::get_credit_approval(rt, params)?; + call.returns(credit_approval) + } + sol_credit::Calls::setAccountStatus(call) => { + let params = call.params()?; + Self::set_account_status(rt, params)?; + call.returns(()) + } + }; + Ok(InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } + + /// Fallback method for unimplemented method numbers. + fn fallback( + rt: &impl Runtime, + method: MethodNum, + _: Option, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + if method >= FIRST_EXPORTED_METHOD_NUMBER { + Ok(None) + } else { + Err(actor_error!(unhandled_message; "invalid method: {}", method)) + } + } +} + +impl ActorCode for BlobsActor { + type Methods = Method; + + fn name() -> &'static str { + BLOBS_ACTOR_NAME + } + + actor_dispatch! { + Constructor => constructor, + + // EVM interop + InvokeContract => invoke_contract, + + // User methods + BuyCredit => buy_credit, + ApproveCredit => approve_credit, + RevokeCredit => revoke_credit, + SetAccountSponsor => set_account_sponsor, + GetAccount => get_account, + GetCreditApproval => get_credit_approval, + AddBlob => add_blob, + GetBlob => get_blob, + DeleteBlob => delete_blob, + OverwriteBlob => overwrite_blob, + + // System methods + GetGasAllowance => get_gas_allowance, + UpdateGasAllowance => update_gas_allowance, + GetBlobStatus => get_blob_status, + GetAddedBlobs => get_added_blobs, + GetPendingBlobs => get_pending_blobs, + SetBlobPending => set_blob_pending, + FinalizeBlob => finalize_blob, + DebitAccounts => debit_accounts, + + // Admin methods + SetAccountStatus => set_account_status, + TrimBlobExpiries => trim_blob_expiries, + + // Metrics methods + GetStats => get_stats, + + // Node operator methods + RegisterNodeOperator => register_node_operator, + GetOperatorInfo => get_operator_info, + GetActiveOperators => get_active_operators, + + _ => fallback, + } +} + +/// Makes a syscall that will delete a blob from the underlying Iroh-based data store. +fn delete_from_disc(hash: B256) -> Result<(), ActorError> { + #[cfg(feature = "fil-actor")] + { + ipc_storage_actor_sdk::storage::delete_blob(hash.0).map_err(|en| { + ActorError::unspecified(format!("failed to delete blob from disc: {:?}", en)) + })?; + log::debug!("deleted blob {} from disc", hash); + Ok(()) + } + #[cfg(not(feature = "fil-actor"))] + { + log::debug!("mock deletion from disc (hash={})", hash); + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/actor/admin.rs b/fendermint/actors/blobs/src/actor/admin.rs new file mode 100644 index 0000000000..5a3b8f1613 --- /dev/null +++ b/fendermint/actors/blobs/src/actor/admin.rs @@ -0,0 +1,74 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::SetAccountStatusParams, blobs::TrimBlobExpiriesParams, bytes::B256, +}; +use fendermint_actor_ipc_storage_config_shared::{get_config, require_caller_is_admin}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; +use ipc_storage_actor_sdk::caller::{Caller, CallerOption}; + +use crate::{ + actor::{delete_from_disc, BlobsActor}, + State, +}; + +impl BlobsActor { + /// Sets the account status for an address. + /// + /// The `subscriber` address must be delegated (only delegated addresses can use credit). + pub fn set_account_status( + rt: &impl Runtime, + params: SetAccountStatusParams, + ) -> Result<(), ActorError> { + require_caller_is_admin(rt)?; + + let caller = Caller::new_delegated(rt, params.subscriber, None, CallerOption::None)?; + let config = get_config(rt)?; + + rt.transaction(|st: &mut State, rt| { + st.set_account_status( + rt.store(), + &config, + caller.state_address(), + params.status, + rt.curr_epoch(), + ) + }) + } + + /// Trims the subscription expiries for an account based on its current maximum allowed blob TTL. + /// + /// This is used in conjunction with `set_account_status` when reducing an account's maximum + /// allowed blob TTL. + /// Returns the number of subscriptions processed and the next key to continue iteration. + /// + /// The `subscriber` address must be delegated (only delegated addresses can use credit). + pub fn trim_blob_expiries( + rt: &impl Runtime, + params: TrimBlobExpiriesParams, + ) -> Result<(u32, Option), ActorError> { + require_caller_is_admin(rt)?; + + let caller = Caller::new_delegated(rt, params.subscriber, None, CallerOption::None)?; + let config = get_config(rt)?; + + let (processed, next_key, deleted_blobs) = rt.transaction(|st: &mut State, rt| { + st.trim_blob_expiries( + &config, + rt.store(), + caller.state_address(), + rt.curr_epoch(), + params.starting_hash, + params.limit, + ) + })?; + + for hash in deleted_blobs { + delete_from_disc(hash)?; + } + + Ok((processed, next_key)) + } +} diff --git a/fendermint/actors/blobs/src/actor/metrics.rs b/fendermint/actors/blobs/src/actor/metrics.rs new file mode 100644 index 0000000000..7ce69fb82b --- /dev/null +++ b/fendermint/actors/blobs/src/actor/metrics.rs @@ -0,0 +1,23 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::GetStatsReturn; +use fendermint_actor_ipc_storage_config_shared::get_config; +use fil_actors_runtime::{runtime::Runtime, ActorError}; + +use crate::{actor::BlobsActor, State}; + +impl BlobsActor { + /// Returns credit and storage usage statistics. + pub fn get_stats(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let config = get_config(rt)?; + let stats = rt + .state::()? + .get_stats(&config, rt.current_balance()); + + Ok(stats) + } +} diff --git a/fendermint/actors/blobs/src/actor/system.rs b/fendermint/actors/blobs/src/actor/system.rs new file mode 100644 index 0000000000..6909d9dcc7 --- /dev/null +++ b/fendermint/actors/blobs/src/actor/system.rs @@ -0,0 +1,420 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::blobs::BlobRequest; +use fendermint_actor_blobs_shared::{ + blobs::{ + BlobStatus, FinalizeBlobParams, GetAddedBlobsParams, GetBlobStatusParams, + GetPendingBlobsParams, SetBlobPendingParams, + }, + credit::{Credit, GasAllowance, GetGasAllowanceParams, UpdateGasAllowanceParams}, + operators::{ + GetActiveOperatorsReturn, GetOperatorInfoParams, OperatorInfo, RegisterNodeOperatorParams, + }, +}; +use fendermint_actor_ipc_storage_config_shared::get_config; +use fil_actors_runtime::{runtime::Runtime, ActorError, SYSTEM_ACTOR_ADDR}; +use fvm_shared::error::ExitCode; +use ipc_storage_actor_sdk::{ + caller::{Caller, CallerOption}, + evm::emit_evm_event, +}; +use num_traits::Zero; + +use crate::{ + actor::{delete_from_disc, BlobsActor}, + sol_facade::{blobs as sol_blobs, credit::CreditDebited}, + state::blobs::{FinalizeBlobStateParams, SetPendingBlobStateParams}, + State, +}; + +impl BlobsActor { + /// Returns the gas allowance from a credit purchase for an address. + /// + /// This method is called by the ipc_storage executor, and as such, cannot fail. + pub fn get_gas_allowance( + rt: &impl Runtime, + params: GetGasAllowanceParams, + ) -> Result { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let from_caller = match Caller::new(rt, params.0, None, CallerOption::None) { + Ok(caller) => caller, + Err(e) => { + return if e.exit_code() == ExitCode::USR_FORBIDDEN { + // Disallowed actor type (this is called by all txns so we can't error) + Ok(GasAllowance::default()) + } else { + Err(e) + }; + } + }; + + let allowance = rt.state::()?.get_gas_allowance( + rt.store(), + from_caller.state_address(), + rt.curr_epoch(), + )?; + + Ok(allowance) + } + + /// Updates gas allowance for the `from` address. + /// + /// The allowance update is applied to `sponsor` if it exists. + /// The `from` address must have an approval from `sponsor`. + /// The `from` address can be any actor, including those without delegated addresses. + /// This method is called by the ipc_storage executor, and as such, cannot fail. + pub fn update_gas_allowance( + rt: &impl Runtime, + params: UpdateGasAllowanceParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let caller = Caller::new(rt, params.from, params.sponsor, CallerOption::None)?; + + rt.transaction(|st: &mut State, rt| { + st.update_gas_allowance( + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + params.add_amount, + rt.curr_epoch(), + ) + }) + } + + /// Returns the current [`BlobStatus`] for a blob by hash. + pub fn get_blob_status( + rt: &impl Runtime, + params: GetBlobStatusParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new(rt, params.subscriber, None, CallerOption::None)?; + + rt.state::()?.get_blob_status( + rt.store(), + caller.state_address(), + params.hash, + params.id, + ) + } + + /// Returns a list of [`BlobRequest`]s that are currently in the [`BlobStatus::Added`] state. + /// + /// All blobs that have been added but have not yet been picked up by validators for download + /// are in the [`BlobStatus::Added`] state. + pub fn get_added_blobs( + rt: &impl Runtime, + params: GetAddedBlobsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_added_blobs(rt.store(), params.0) + } + + /// Returns a list of [`BlobRequest`]s that are currently in the [`BlobStatus::Pending`] state. + /// + /// All blobs that have been added and picked up by validators for download are in the + /// [`BlobStatus::Pending`] state. + /// These are the blobs that validators are currently coordinating to download. They will + /// vote on the final status ([`BlobStatus::Resolved`] or [`BlobStatus::Failed`]), which is + /// recorded on-chain with the `finalize_blob` method. + pub fn get_pending_blobs( + rt: &impl Runtime, + params: GetPendingBlobsParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + rt.state::()?.get_pending_blobs(rt.store(), params.0) + } + + /// Sets a blob to the [`BlobStatus::Pending`] state. + /// + /// The `subscriber` address must be delegated (only delegated addresses can use credit). + pub fn set_blob_pending( + rt: &impl Runtime, + params: SetBlobPendingParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let caller = Caller::new_delegated(rt, params.subscriber, None, CallerOption::None)?; + + rt.transaction(|st: &mut State, rt| { + st.set_blob_pending( + rt.store(), + caller.state_address(), + SetPendingBlobStateParams::from_actor_params(params.clone()), + ) + })?; + + emit_evm_event( + rt, + sol_blobs::BlobPending { + subscriber: caller.event_address(), + hash: ¶ms.hash, + source: ¶ms.source, + }, + ) + } + + /// Finalizes a blob to the [`BlobStatus::Resolved`] or [`BlobStatus::Failed`] state. + /// + /// This is the final protocol step to add a blob, which is controlled by node operator consensus. + /// The [`BlobStatus::Resolved`] state means that a quorum of operators was able to download the blob. + /// The [`BlobStatus::Failed`] state means that a quorum of operators was not able to download the blob. + /// + /// # BLS Signature Verification + /// This method verifies the aggregated BLS signature from node operators to ensure: + /// 1. At least 2/3+ of operators signed the blob hash + /// 2. The aggregated signature is valid for the blob hash + pub fn finalize_blob(rt: &impl Runtime, params: FinalizeBlobParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new(rt, params.subscriber, None, CallerOption::None)?; + + // Get current blob status from state + let current_status = rt.state::()?.get_blob_status( + rt.store(), + caller.state_address(), + params.hash, + params.id.clone(), + )?; + + // Only finalize blobs that are in Added or Pending status + // (Resolved blobs are already finalized, Failed blobs cannot be retried) + if !matches!( + current_status, + Some(BlobStatus::Added) | Some(BlobStatus::Pending) + ) { + return Ok(()); + } + + Self::verify_blob_signatures(rt, ¶ms)?; + + let event_resolved = matches!(params.status, BlobStatus::Resolved); + + rt.transaction(|st: &mut State, rt| { + st.finalize_blob( + rt.store(), + caller.state_address(), + FinalizeBlobStateParams::from_actor_params(params.clone(), rt.curr_epoch()), + ) + })?; + + emit_evm_event( + rt, + sol_blobs::BlobFinalized { + subscriber: caller.event_address(), + hash: ¶ms.hash, + resolved: event_resolved, + }, + ) + } + + /// Verify aggregated BLS signatures for blob finalization + fn verify_blob_signatures( + rt: &impl Runtime, + params: &FinalizeBlobParams, + ) -> Result<(), ActorError> { + use bls_signatures::{ + verify_messages, PublicKey as BlsPublicKey, Serialize as BlsSerialize, + Signature as BlsSignature, + }; + + // Parse aggregated signature + let aggregated_sig = BlsSignature::from_bytes(¶ms.aggregated_signature) + .map_err(|e| ActorError::illegal_argument(format!("Invalid BLS signature: {:?}", e)))?; + + // Get active operators from state + let state = rt.state::()?; + let active_operators = state.operators.get_active_operators(); + let total_operators = active_operators.len(); + + if total_operators == 0 { + return Err(ActorError::illegal_state( + "No active operators registered".into(), + )); + } + + // Extract signer indices from bitmap and collect their public keys + let mut signer_pubkeys = Vec::new(); + let mut signer_count = 0; + + for (index, operator_addr) in active_operators.iter().enumerate() { + if index >= 128 { + break; // u128 bitmap can only hold 128 operators + } + + // Check if this operator signed (bit is set in bitmap) + if (params.signer_bitmap & (1u128 << index)) != 0 { + signer_count += 1; + + // Get operator info to retrieve BLS public key + let operator_info = + state + .operators + .get(rt.store(), operator_addr)? + .ok_or_else(|| { + ActorError::illegal_state(format!( + "Operator {} not found in state", + operator_addr + )) + })?; + + // Parse BLS public key + let pubkey = BlsPublicKey::from_bytes(&operator_info.bls_pubkey).map_err(|e| { + ActorError::illegal_state(format!( + "Invalid BLS public key for operator {}: {:?}", + operator_addr, e + )) + })?; + + signer_pubkeys.push(pubkey); + } + } + + // Check threshold: need at least 2/3+ of operators + let threshold = (total_operators * 2 + 2) / 3; // Ceiling of 2/3 + if signer_count < threshold { + return Err(ActorError::illegal_argument(format!( + "Insufficient signatures: got {}, need {} out of {}", + signer_count, threshold, total_operators + ))); + } + + if signer_pubkeys.is_empty() { + return Err(ActorError::illegal_state("No signer public keys".into())); + } + + // All operators signed the same message (the blob hash) + let hash_bytes = params.hash.0.as_slice(); + + // Create a vector of the message repeated for each signer + let messages: Vec<&[u8]> = vec![hash_bytes; signer_count]; + + // Verify the aggregated signature using verify_messages + // This verifies that the aggregated signature corresponds to the individual signatures + let verification_result = verify_messages(&aggregated_sig, &messages, &signer_pubkeys); + + if !verification_result { + return Err(ActorError::illegal_argument( + "BLS signature verification failed".into(), + )); + } + + log::info!( + "BLS signature verified: {} operators signed (threshold: {}/{})", + signer_count, + threshold, + total_operators + ); + + Ok(()) + } + + /// Debits accounts for current blob usage. + /// + /// This is called by the system actor every X blocks, where X is set in the ipc_storage config actor. + pub fn debit_accounts(rt: &impl Runtime) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + + let config = get_config(rt)?; + + let mut credit_debited = Credit::zero(); + let (deletes, num_accounts, more_accounts) = rt.transaction(|st: &mut State, rt| { + let initial_credit_debited = st.credits.credit_debited.clone(); + let (deletes, more_accounts) = + st.debit_accounts(rt.store(), &config, rt.curr_epoch())?; + credit_debited = &st.credits.credit_debited - initial_credit_debited; + let num_accounts = st.accounts.len(); + Ok((deletes, num_accounts, more_accounts)) + })?; + + for hash in deletes { + delete_from_disc(hash)?; + } + + emit_evm_event( + rt, + CreditDebited { + amount: credit_debited, + num_accounts, + more_accounts, + }, + )?; + + Ok(()) + } + + /// Register a new node operator with BLS public key and RPC URL + /// + /// The caller's address will be registered as the operator address. + /// This method can be called by anyone who wants to become a node operator. + pub fn register_node_operator( + rt: &impl Runtime, + params: RegisterNodeOperatorParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + // Validate BLS public key length (must be 48 bytes) + if params.bls_pubkey.len() != 48 { + return Err(ActorError::illegal_argument( + "BLS public key must be exactly 48 bytes".into(), + )); + } + + // Validate RPC URL is not empty + if params.rpc_url.is_empty() { + return Err(ActorError::illegal_argument( + "RPC URL cannot be empty".into(), + )); + } + + let operator_address = rt.message().caller(); + + let index = rt.transaction(|st: &mut State, rt| { + let node_operator_info = crate::state::operators::NodeOperatorInfo { + bls_pubkey: params.bls_pubkey, + rpc_url: params.rpc_url, + registered_epoch: rt.curr_epoch(), + active: true, + }; + + st.operators + .register(rt.store(), operator_address, node_operator_info) + })?; + + Ok(index) + } + + /// Get information about a specific node operator + pub fn get_operator_info( + rt: &impl Runtime, + params: GetOperatorInfoParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let state = rt.state::()?; + let info = state.operators.get(rt.store(), ¶ms.address)?; + + Ok(info.map(|i| OperatorInfo { + bls_pubkey: i.bls_pubkey, + rpc_url: i.rpc_url, + active: i.active, + })) + } + + /// Get the ordered list of all active node operators + /// + /// The order of addresses in the returned list corresponds to the bit positions + /// in the signature bitmap used for BLS signature aggregation. + pub fn get_active_operators(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let state = rt.state::()?; + let operators = state.operators.get_active_operators(); + + Ok(GetActiveOperatorsReturn { operators }) + } +} diff --git a/fendermint/actors/blobs/src/actor/user.rs b/fendermint/actors/blobs/src/actor/user.rs new file mode 100644 index 0000000000..c225d756fd --- /dev/null +++ b/fendermint/actors/blobs/src/actor/user.rs @@ -0,0 +1,453 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::{Account, GetAccountParams}, + blobs::{ + AddBlobParams, Blob, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, Subscription, + }, + credit::{ + ApproveCreditParams, BuyCreditParams, Credit, CreditApproval, GetCreditApprovalParams, + RevokeCreditParams, SetSponsorParams, + }, +}; +use fendermint_actor_ipc_storage_config_shared::get_config; +use fil_actors_runtime::{extract_send_result, runtime::Runtime, ActorError}; +use fvm_shared::{econ::TokenAmount, METHOD_SEND}; +use ipc_storage_actor_sdk::{ + caller::{Caller, CallerOption}, + evm::emit_evm_event, + util::is_bucket_address, + util::to_delegated_address, +}; +use num_traits::Zero; + +use crate::{ + actor::{delete_from_disc, BlobsActor}, + caller::DelegationOptions, + sol_facade::{ + blobs as sol_blobs, + credit::{CreditApproved, CreditPurchased, CreditRevoked}, + gas::{GasSponsorSet, GasSponsorUnset}, + }, + state::blobs::{AddBlobStateParams, DeleteBlobStateParams}, + State, +}; + +impl BlobsActor { + /// Buy credit with token. + /// + /// The `to` address must be delegated (only delegated addresses can own credit). + pub fn buy_credit(rt: &impl Runtime, params: BuyCreditParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new_delegated(rt, params.0, None, CallerOption::Auth)?; + let config = get_config(rt)?; + + let mut credit_amount = Credit::zero(); + let account = rt.transaction(|st: &mut State, rt| { + let pre_buy = st.credits.credit_sold.clone(); + let account = st.buy_credit( + rt.store(), + &config, + caller.state_address(), + rt.message().value_received(), + rt.curr_epoch(), + )?; + credit_amount = &st.credits.credit_sold - &pre_buy; + Ok(account) + })?; + + emit_evm_event( + rt, + CreditPurchased::new(caller.event_address(), credit_amount), + )?; + + account.to_shared(rt) + } + + /// Approve credit and gas usage from one account to another. + /// + /// The `from` address must be delegated (only delegated addresses can own credit). + /// The `from` address must be the message origin or caller. + /// The `to` address must be delegated (only delegated addresses can use credit). + /// The `to` address will be created if it does not exist. + /// TODO: Remove the `caller_allowlist` parameter. + pub fn approve_credit( + rt: &impl Runtime, + params: ApproveCreditParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from_caller = + Caller::new_delegated(rt, rt.message().caller(), None, CallerOption::Auth)?; + let to_caller = Caller::new_delegated(rt, params.to, None, CallerOption::Create)?; + let config = get_config(rt)?; + + let approval = rt.transaction(|st: &mut State, rt| { + let approval = st.approve_credit( + &config, + rt.store(), + from_caller.state_address(), + to_caller.state_address(), + DelegationOptions { + credit_limit: params.credit_limit, + gas_fee_limit: params.gas_fee_limit, + ttl: params.ttl, + }, + rt.curr_epoch(), + ); + + // For convenience, set the approvee's sponsor to the approver if it was created + if to_caller.created() { + st.set_account_sponsor( + &config, + rt.store(), + to_caller.state_address(), + Some(from_caller.state_address()), + rt.curr_epoch(), + )?; + } + approval + })?; + + emit_evm_event( + rt, + CreditApproved { + from: from_caller.event_address(), + to: to_caller.event_address(), + credit_limit: approval.credit_limit.clone(), + gas_fee_limit: approval.gas_allowance_limit.clone(), + expiry: approval.expiry, + }, + )?; + + Ok(approval) + } + + /// Revoke credit and gas usage from one account to another. + /// + /// The `from` address must be delegated (only delegated addresses can own credit). + /// The `from` address must be the message origin or caller. + /// The `to` address must be delegated (only delegated addresses can use credit). + pub fn revoke_credit(rt: &impl Runtime, params: RevokeCreditParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from_caller = + Caller::new_delegated(rt, rt.message().caller(), None, CallerOption::Auth)?; + let to_caller = Caller::new_delegated(rt, params.to, None, CallerOption::None)?; + + rt.transaction(|st: &mut State, rt| { + st.revoke_credit( + rt.store(), + from_caller.state_address(), + to_caller.state_address(), + ) + })?; + + emit_evm_event( + rt, + CreditRevoked::new(from_caller.event_address(), to_caller.event_address()), + )?; + + Ok(()) + } + + /// Sets or unsets a default credit and gas sponsor from one account to another. + /// + /// If `sponsor` does not exist, the default sponsor is unset. + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `from` address must be the message origin or caller. + /// The `sponsor` address must be delegated (only delegated addresses can own credit). + pub fn set_account_sponsor( + rt: &impl Runtime, + params: SetSponsorParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = + Caller::new_delegated(rt, rt.message().caller(), params.0, CallerOption::Auth)?; + let config = get_config(rt)?; + + rt.transaction(|st: &mut State, rt| { + st.set_account_sponsor( + &config, + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + rt.curr_epoch(), + ) + })?; + + if let Some(sponsor) = caller.sponsor_address() { + emit_evm_event(rt, GasSponsorSet::mew(sponsor))?; + } else { + emit_evm_event(rt, GasSponsorUnset::new())?; + } + + Ok(()) + } + + /// Returns the account for an address. + pub fn get_account( + rt: &impl Runtime, + params: GetAccountParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let caller = Caller::new(rt, params.0, None, CallerOption::None)?; + + let account = rt + .state::()? + .get_account(rt.store(), caller.state_address())? + .map(|mut account| { + // Resolve the credit sponsor + account.credit_sponsor = account + .credit_sponsor + .map(|sponsor| to_delegated_address(rt, sponsor)) + .transpose()?; + + account.to_shared(rt) + }); + + account.transpose() + } + + /// Returns the credit approval from one account to another if it exists. + pub fn get_credit_approval( + rt: &impl Runtime, + params: GetCreditApprovalParams, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from_caller = Caller::new(rt, params.from, None, CallerOption::None)?; + let to_caller = Caller::new(rt, params.to, None, CallerOption::None)?; + + let approval = rt.state::()?.get_credit_approval( + rt.store(), + from_caller.state_address(), + to_caller.state_address(), + )?; + + Ok(approval) + } + + /// Adds or updates a blob subscription. + /// + /// The subscriber will only need credits for blobs that are not already covered by one of + /// their existing subscriptions. + /// + /// The `sponsor` will be the subscriber (the account responsible for payment), if it exists + /// and there is an approval from `sponsor` to `from`. + /// + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `sponsor` address must be delegated (only delegated addresses can use credit). + pub fn add_blob(rt: &impl Runtime, params: AddBlobParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from = if is_bucket_address(rt, rt.message().caller())? { + params.from + } else { + rt.message().caller() + }; + let caller = Caller::new_delegated(rt, from, params.sponsor, CallerOption::Auth)?; + let token_amount = rt.message().value_received(); + let config = get_config(rt)?; + + let mut capacity_used = 0; + let (sub, token_rebate) = rt.transaction(|st: &mut State, rt| { + let initial_capacity_used = st.blobs.bytes_size(); + let res = st.add_blob( + rt.store(), + &config, + caller.state_address(), + caller.sponsor_state_address(), + AddBlobStateParams::from_actor_params( + params.clone(), + rt.curr_epoch(), + token_amount, + ), + )?; + capacity_used = st.blobs.bytes_size() - initial_capacity_used; + Ok(res) + })?; + + // Send back unspent tokens + if !token_rebate.is_zero() { + extract_send_result(rt.send_simple( + &caller.state_address(), + METHOD_SEND, + None, + token_rebate, + ))?; + } + + emit_evm_event( + rt, + sol_blobs::BlobAdded { + subscriber: caller.event_address(), + hash: ¶ms.hash, + size: params.size, + expiry: sub.expiry, + bytes_used: capacity_used, + }, + )?; + + Ok(sub) + } + + /// Returns a blob by hash if it exists. + pub fn get_blob(rt: &impl Runtime, params: GetBlobParams) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + match rt.state::()?.get_blob(rt.store(), params.0)? { + Some(blob) => Ok(Some(blob.to_shared(rt)?)), + None => Ok(None), + } + } + + /// Deletes a blob subscription. + /// + /// The `sponsor` will be the subscriber (the account responsible for payment), if it exists + /// and there is an approval from `sponsor` to `from`. + /// + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `sponsor` address must be delegated (only delegated addresses can use credit). + pub fn delete_blob(rt: &impl Runtime, params: DeleteBlobParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from = if is_bucket_address(rt, rt.message().caller())? { + params.from + } else { + rt.message().caller() + }; + + let caller = Caller::new_delegated(rt, from, params.sponsor, CallerOption::Auth)?; + + let mut capacity_released = 0; + let (_, size, _) = rt.transaction(|st: &mut State, rt| { + let initial_capacity_used = st.blobs.bytes_size(); + let res = st.delete_blob( + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + DeleteBlobStateParams::from_actor_params(params.clone(), rt.curr_epoch()), + )?; + capacity_released = initial_capacity_used - st.blobs.bytes_size(); + Ok(res) + })?; + + emit_evm_event( + rt, + sol_blobs::BlobDeleted { + subscriber: caller.event_address(), + hash: ¶ms.hash, + size, + bytes_released: capacity_released, + }, + )?; + + Ok(()) + } + + /// Deletes a blob subscription and adds another in a single call. + /// + /// This method is more efficient than two separate calls to `delete_blob` and `add_blob`, + /// and is useful for some blob workflows like replacing a key in a bucket actor. + /// + /// The `sponsor` will be the subscriber (the account responsible for payment), if it exists + /// and there is an approval from `sponsor` to `from`. + /// + /// The `from` address must be delegated (only delegated addresses can use credit). + /// The `sponsor` address must be delegated (only delegated addresses can use credit). + pub fn overwrite_blob( + rt: &impl Runtime, + params: OverwriteBlobParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from = if is_bucket_address(rt, rt.message().caller())? { + params.add.from + } else { + rt.message().caller() + }; + + let caller = Caller::new_delegated(rt, from, params.add.sponsor, CallerOption::Auth)?; + let config = get_config(rt)?; + + // Determine if we need to delete an existing blob before adding the new one + let overwrite = params.old_hash != params.add.hash; + + let add_hash = params.add.hash; + let add_size = params.add.size; + let mut capacity_released = 0; + let mut capacity_used = 0; + + // To ensure atomicity, we combine the two independent calls into a single transaction. + let (delete, delete_size, sub) = rt.transaction(|st: &mut State, rt| { + let add_params = params.add; + + let initial_capacity_used = st.blobs.bytes_size(); + let (delete, delete_size, _) = if overwrite { + st.delete_blob( + rt.store(), + caller.state_address(), + caller.sponsor_state_address(), + DeleteBlobStateParams { + hash: params.old_hash, + id: add_params.id.clone(), + epoch: rt.curr_epoch(), + skip_credit_return: false, + }, + )? + } else { + (false, 0, false) + }; + capacity_released = initial_capacity_used - st.blobs.bytes_size(); + + let initial_capacity_used = st.blobs.bytes_size(); + let (subscription, _) = st.add_blob( + rt.store(), + &config, + caller.state_address(), + caller.sponsor_state_address(), + AddBlobStateParams::from_actor_params( + add_params, + rt.curr_epoch(), + TokenAmount::zero(), + ), + )?; + capacity_used = st.blobs.bytes_size() - initial_capacity_used; + + Ok((delete, delete_size, subscription)) + })?; + + if delete { + delete_from_disc(params.old_hash)?; + } + + if overwrite { + emit_evm_event( + rt, + sol_blobs::BlobDeleted { + subscriber: caller.event_address(), + hash: ¶ms.old_hash, + size: delete_size, + bytes_released: capacity_released, + }, + )?; + } + emit_evm_event( + rt, + sol_blobs::BlobAdded { + subscriber: caller.event_address(), + hash: &add_hash, + size: add_size, + expiry: sub.expiry, + bytes_used: capacity_used, + }, + )?; + + Ok(sub) + } +} diff --git a/fendermint/actors/blobs/src/caller.rs b/fendermint/actors/blobs/src/caller.rs new file mode 100644 index 0000000000..c49e361419 --- /dev/null +++ b/fendermint/actors/blobs/src/caller.rs @@ -0,0 +1,748 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::{ + Credit, CreditAllowance, CreditApproval, GasAllowance, +}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use ipc_storage_ipld::hamt; +use log::debug; +use num_traits::Zero; + +use crate::state::accounts::Account; + +/// Helper for managing blobs actor state caller. +#[allow(clippy::large_enum_variant)] +pub enum Caller<'a, BS: Blockstore> { + Default((Address, Account)), + Sponsored(Delegation<'a, &'a BS>), +} + +impl<'a, BS: Blockstore> Caller<'a, BS> { + /// Loads the caller and optional sponsor account with its delegation. + pub fn load( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + sponsor: Option
, + ) -> Result { + let account = accounts.get_or_err(&caller)?; + Self::load_account(store, accounts, caller, account, sponsor) + } + + /// Loads the caller and the caller's default sponsor with its delegation. + /// If the sponsor does not exist or the caller does not have an approval from + /// the default sponsor, a default caller type is returned. + pub fn load_with_default_sponsor( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + ) -> Result { + let account = accounts.get_or_err(&caller)?; + match Self::load_account( + store, + accounts, + caller, + account.clone(), + account.credit_sponsor, + ) { + Ok(caller) => Ok(caller), + Err(_) => Self::load_account(store, accounts, caller, account, None), + } + } + + /// Loads the caller and optional sponsor account with its delegation. + /// The caller account will be created if one does not exist. + pub fn load_or_create( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + max_ttl: ChainEpoch, + ) -> Result { + let account = + accounts.get_or_create(&caller, || Account::new(store, current_epoch, max_ttl))?; + Self::load_account(store, accounts, caller, account, sponsor) + } + + /// Loads the caller and optional sponsor account with its delegation. + pub fn load_account( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + caller: Address, + caller_account: Account, + sponsor: Option
, + ) -> Result { + let sponsor = sponsor.unwrap_or(caller); + if sponsor != caller { + let delegation = Delegation::load(store, accounts, sponsor, caller, caller_account)?; + Ok(Self::Sponsored(delegation)) + } else { + Ok(Self::Default((caller, caller_account))) + } + } + + /// Returns the caller address. + #[allow(dead_code)] + pub fn address(&self) -> Address { + match self { + Self::Default((address, _)) => *address, + Self::Sponsored(delegation) => delegation.to, + } + } + + /// Returns the subscriber address. + /// The subscriber is the account responsible for credit and gas fees. + /// The subscriber is the caller or the sponsor if one exists. + pub fn subscriber_address(&self) -> Address { + match self { + Self::Default((address, _)) => *address, + Self::Sponsored(delegation) => delegation.from, + } + } + + /// Returns the delegate address. + /// The delegate only exists if there's a sponsor. + /// If present, the delegate address will be the caller address. + pub fn delegate_address(&self) -> Option
{ + match self { + Self::Default(_) => None, + Self::Sponsored(delegation) => Some(delegation.to), + } + } + + /// Returns the underlying delegate approval. + /// The delegate only exists if there's a sponsor. + pub fn delegate_approval(&self) -> Option<&CreditApproval> { + match self { + Self::Default(_) => None, + Self::Sponsored(delegation) => Some(&delegation.approval_to), + } + } + + /// Returns the subscriber account. + /// The subscriber is the account responsible for credit and gas fees. + /// The subscriber is the caller or the sponsor if one exists. + pub fn subscriber(&self) -> &Account { + match self { + Self::Default((_, account)) => account, + Self::Sponsored(delegation) => &delegation.from_account, + } + } + + /// Returns the subscriber account as a mutable reference. + /// The subscriber is the account responsible for credit and gas fees. + /// The subscriber is the caller or the sponsor if one exists. + #[allow(dead_code)] + pub fn subscriber_mut(&mut self) -> &mut Account { + match self { + Self::Default((_, account)) => account, + Self::Sponsored(delegation) => &mut delegation.from_account, + } + } + + /// Returns whether the caller is a delegate. + pub fn is_delegate(&self) -> bool { + matches!(self, Self::Sponsored(_)) + } + + /// Sets the default sponsor for the caller or the delegate. + pub fn set_default_sponsor(&mut self, sponsor: Option
) { + match self { + Self::Default((_, account)) => account.credit_sponsor = sponsor, + Self::Sponsored(delegation) => { + delegation.to_account.credit_sponsor = sponsor; + } + } + } + + /// Adds credit and gas allowances to the subscriber. + pub fn add_allowances(&mut self, credit: &Credit, value: &TokenAmount) { + match self { + Self::Default((_, account)) => { + account.credit_free += credit; + account.gas_allowance += value; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_free += credit; + delegation.from_account.gas_allowance += value; + } + } + + debug!("added {} credits to {}", credit, self.subscriber_address()); + debug!( + "added {} gas fee allowance to {}", + value, + self.subscriber_address() + ); + } + + /// Returns the credit allowance for the subscriber. + #[allow(dead_code)] + pub fn credit_allowance(&self, current_epoch: ChainEpoch) -> CreditAllowance { + match self { + Self::Default((_, account)) => CreditAllowance { + amount: account.credit_free.clone(), + ..Default::default() + }, + Self::Sponsored(delegation) => delegation.credit_allowance(current_epoch), + } + } + + /// Returns the gas allowance for the subscriber. + pub fn gas_allowance(&self, current_epoch: ChainEpoch) -> GasAllowance { + match self { + Self::Default((_, account)) => GasAllowance { + amount: account.gas_allowance.clone(), + ..Default::default() + }, + Self::Sponsored(delegation) => delegation.gas_allowance(current_epoch), + } + } + + /// Commits new capacity for the subscriber. + pub fn commit_capacity( + &mut self, + size: u64, + cost: &Credit, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + // Check the subscriber's free credit + if &self.subscriber().credit_free < cost { + return Err(ActorError::insufficient_funds(format!( + "account {} has insufficient credit (available: {}; required: {})", + self.subscriber_address(), + &self.subscriber().credit_free, + cost + ))); + } + match self { + Self::Default((_, account)) => { + account.capacity_used = account.capacity_used.saturating_add(size); + account.credit_free -= cost; + account.credit_committed += cost; + } + Self::Sponsored(delegation) => { + delegation.use_credit_allowance(cost, current_epoch)?; + delegation.from_account.capacity_used = + delegation.from_account.capacity_used.saturating_add(size); + delegation.from_account.credit_free -= cost; + delegation.from_account.credit_committed += cost; + } + } + + debug!("used {} bytes from {}", size, self.subscriber_address()); + debug!( + "committed {} credits from {}", + cost, + self.subscriber_address() + ); + + Ok(()) + } + + /// Releases capacity for the subscriber. + pub fn release_capacity(&mut self, size: u64, cost: &Credit) { + match self { + Self::Default((_, account)) => { + account.capacity_used = account.capacity_used.saturating_sub(size); + account.credit_free += cost; + account.credit_committed -= cost; + } + Self::Sponsored(delegation) => { + delegation.return_credit_allowance(cost); + delegation.from_account.capacity_used = + delegation.from_account.capacity_used.saturating_sub(size); + delegation.from_account.credit_free += cost; + delegation.from_account.credit_committed -= cost; + } + } + + debug!("released {} bytes to {}", size, self.subscriber_address()); + debug!("released {} credits to {}", cost, self.subscriber_address()); + } + + /// Debit credits from the subscriber. + pub fn debit_credit(&mut self, amount: &Credit, current_epoch: ChainEpoch) { + match self { + Self::Default((_, account)) => { + account.credit_committed -= amount; + account.last_debit_epoch = current_epoch; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_committed -= amount; + delegation.from_account.last_debit_epoch = current_epoch; + } + } + + debug!( + "debited {} credits from {}", + amount, + self.subscriber_address() + ); + } + + /// Refund credit to the subscriber. + pub fn refund_credit(&mut self, amount: &Credit, correction: &Credit) { + match self { + Self::Default((_, account)) => { + account.credit_free += amount - correction; + account.credit_committed += correction; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_free += amount - correction; + delegation.from_account.credit_committed += correction; + } + } + + debug!( + "refunded {} credits to {}", + amount - correction, + self.subscriber_address() + ); + } + + /// Returns committed credits to the subscriber. + pub fn return_committed_credit(&mut self, amount: &Credit) { + match self { + Self::Default((_, account)) => { + account.credit_committed += amount; + } + Self::Sponsored(delegation) => { + delegation.from_account.credit_committed += amount; + } + } + + debug!( + "returned {} committed credits to {}", + amount, + self.subscriber_address() + ); + } + + /// Updates gas allowance for the subscriber. + pub fn update_gas_allowance( + &mut self, + add_amount: &TokenAmount, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + match self { + Self::Default((_, account)) => { + account.gas_allowance += add_amount; + } + Self::Sponsored(delegation) => { + if add_amount.is_positive() { + delegation.return_gas_allowance(add_amount); + } else if add_amount.is_negative() { + delegation.use_gas_allowance(&-add_amount, current_epoch)?; + } + delegation.from_account.gas_allowance += add_amount; + } + } + + if add_amount.is_positive() { + debug!( + "refunded {} atto to {}", + add_amount.atto(), + self.subscriber_address() + ); + } else { + debug!( + "debited {} atto from {}", + -add_amount.atto(), + self.subscriber_address() + ); + } + Ok(()) + } + + /// Validates the delegate expiration. + pub fn validate_delegate_expiration( + &self, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + match self { + Self::Default(_) => Ok(()), + Self::Sponsored(delegation) => delegation.validate_expiration(current_epoch), + } + } + + /// Validates a blob TTL for the subscriber. + pub fn validate_ttl_usage( + &self, + config: &IPCStorageConfig, + ttl: Option, + ) -> Result { + let ttl = ttl.unwrap_or(config.blob_default_ttl); + if ttl < config.blob_min_ttl { + return Err(ActorError::illegal_argument(format!( + "minimum blob TTL is {}", + config.blob_min_ttl + ))); + } else if ttl > self.subscriber().max_ttl { + return Err(ActorError::forbidden(format!( + "attempt to add a blob with TTL ({}) that exceeds account's max allowed TTL ({})", + ttl, + self.subscriber().max_ttl, + ))); + } + Ok(ttl) + } + + /// Saves state to accounts. + pub fn save( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + match self { + Self::Default((address, account)) => { + accounts.set(address, account.clone())?; + Ok(()) + } + Self::Sponsored(delegation) => delegation.save(accounts), + } + } + + /// Cancels the optional delegation and converts to the default caller type. + pub fn cancel_delegation( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + match self { + Self::Default(_) => Ok(()), + Self::Sponsored(delegation) => { + delegation.cancel(accounts)?; + // Delegation is now invalid, convert to the default caller type + *self = Self::Default((delegation.to, delegation.to_account.clone())); + Ok(()) + } + } + } +} + +/// Helper for handling credit approvals. +pub struct Delegation<'a, BS: Blockstore> { + /// The issuer address. + from: Address, + /// The issuer account. + from_account: Account, + /// The recipient address. + to: Address, + /// The recipient account. + to_account: Account, + /// Approvals from issuer to recipient. + approvals_from: hamt::map::Hamt<'a, BS, Address, CreditApproval>, + /// Approvals to recipient from issuer. + approvals_to: hamt::map::Hamt<'a, BS, Address, CreditApproval>, + /// Approval from issuer to recipient. + approval_from: CreditApproval, + /// Approval to recipient from issuer. + approval_to: CreditApproval, +} + +/// Options for creating a new delegation. +#[derive(Debug, Default)] +pub struct DelegationOptions { + /// Optional credit limit. + pub credit_limit: Option, + /// Optional gas fee limit. + pub gas_fee_limit: Option, + /// Optional time-to-live (TTL). + pub ttl: Option, +} + +impl<'a, BS: Blockstore> Delegation<'a, &'a BS> { + /// Loads an existing delegation. + pub fn load( + store: &'a BS, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + from: Address, + to: Address, + to_account: Account, + ) -> Result { + if from == to { + return Err(ActorError::illegal_argument( + "'from' and 'to' addresses must be different".into(), + )); + } + + let from_account = accounts.get_or_err(&from)?; + let approvals_to = from_account.approvals_to.hamt(store)?; + let approval_to = approvals_to.get(&to)?.ok_or(ActorError::forbidden(format!( + "approval to {} from {} not found", + to, from + )))?; + let approvals_from = to_account.approvals_from.hamt(store)?; + let approval_from = approvals_from + .get(&from)? + .ok_or(ActorError::forbidden(format!( + "approval from {} to {} not found", + from, to + )))?; + + Ok(Self { + from, + from_account, + to, + to_account, + approvals_from, + approvals_to, + approval_from, + approval_to, + }) + } + + /// Creates a new delegation from one account to another. + pub fn update_or_create( + store: &'a BS, + config: &IPCStorageConfig, + accounts: &hamt::map::Hamt<'a, &'a BS, Address, Account>, + from: Address, + to: Address, + options: DelegationOptions, + current_epoch: ChainEpoch, + ) -> Result { + if let Some(ttl) = options.ttl { + if ttl < config.blob_min_ttl { + return Err(ActorError::illegal_argument(format!( + "minimum approval TTL is {}", + config.blob_min_ttl + ))); + } + } + + let expiry = options.ttl.map(|t| i64::saturating_add(t, current_epoch)); + let approval = CreditApproval { + credit_limit: options.credit_limit.clone(), + gas_allowance_limit: options.gas_fee_limit.clone(), + expiry, + credit_used: Credit::zero(), + gas_allowance_used: TokenAmount::zero(), + }; + + // Get or create accounts + let from_account = accounts.get_or_create(&from, || { + Account::new(store, current_epoch, config.blob_default_ttl) + })?; + let to_account = accounts.get_or_create(&to, || { + Account::new(store, current_epoch, config.blob_default_ttl) + })?; + + // Get or create approvals + let approvals_to = from_account.approvals_to.hamt(store)?; + let approvals_from = to_account.approvals_from.hamt(store)?; + let mut approval_to = approvals_to.get_or_create(&to, || Ok(approval.clone()))?; + let mut approval_from = approvals_from.get_or_create(&from, || Ok(approval))?; + if approval_from != approval_to { + return Err(ActorError::illegal_state(format!( + "'from' account ({}) approval does not match 'to' account ({}) approval", + from, to, + ))); + } + + // Validate approval changes (check one of them since they are equal) + if let Some(limit) = options.credit_limit.as_ref() { + if &approval_to.credit_used > limit { + return Err(ActorError::illegal_argument(format!( + "limit cannot be less than amount of already used credits ({})", + approval_to.credit_used + ))); + } + } + if let Some(limit) = options.gas_fee_limit.as_ref() { + if &approval_to.gas_allowance_used > limit { + return Err(ActorError::illegal_argument(format!( + "limit cannot be less than amount of already used gas fees ({})", + approval_to.gas_allowance_used + ))); + } + } + + approval_from.credit_limit = options.credit_limit.clone(); + approval_from.gas_allowance_limit = options.gas_fee_limit.clone(); + approval_from.expiry = expiry; + approval_to.credit_limit = options.credit_limit; + approval_to.gas_allowance_limit = options.gas_fee_limit; + approval_to.expiry = expiry; + + debug!( + "approval created from {} to {} (credit limit: {:?}; gas fee limit: {:?}, expiry: {:?}", + from, + to, + approval_from.credit_limit, + approval_from.gas_allowance_limit, + approval_from.expiry + ); + + Ok(Self { + to, + to_account, + from, + from_account, + approvals_from, + approvals_to, + approval_from, + approval_to, + }) + } + + /// Return credit allowance to the delegation. + pub fn return_credit_allowance(&mut self, amount: &Credit) { + self.approval_from.credit_used -= amount; + self.approval_to.credit_used -= amount; + } + + /// Use credit allowance from the delegation. + pub fn use_credit_allowance( + &mut self, + amount: &Credit, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + self.validate_expiration(current_epoch)?; + self.validate_credit_usage(amount)?; + self.approval_from.credit_used += amount; + self.approval_to.credit_used += amount; + Ok(()) + } + + /// Return gas allowance to the delegation. + pub fn return_gas_allowance(&mut self, amount: &TokenAmount) { + self.approval_from.gas_allowance_used -= amount; + self.approval_to.gas_allowance_used -= amount; + } + + /// Use gas allowance from the delegation. + pub fn use_gas_allowance( + &mut self, + amount: &TokenAmount, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + self.validate_expiration(current_epoch)?; + self.validate_gas_usage(amount)?; + self.approval_from.gas_allowance_used += amount; + self.approval_to.gas_allowance_used += amount; + Ok(()) + } + + /// Saves state to accounts. + pub fn save( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + // Save the "from" account's "to" approval + self.from_account.approvals_to.save_tracked( + self.approvals_to + .set_and_flush_tracked(&self.to, self.approval_to.clone())?, + ); + // Save the "to" account's "from" approval + self.to_account.approvals_from.save_tracked( + self.approvals_from + .set_and_flush_tracked(&self.from, self.approval_from.clone())?, + ); + // Save the "from" account + accounts.set(&self.from, self.from_account.clone())?; + // Save the "to" account + accounts.set(&self.to, self.to_account.clone())?; + Ok(()) + } + + /// Cancels the underlying approval and saves state to accounts. + pub fn cancel( + &mut self, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + // Remove the "from" account's "to" approval + self.from_account + .approvals_to + .save_tracked(self.approvals_to.delete_and_flush_tracked(&self.to)?.0); + // Remove the "to" account's "from" approval + self.to_account + .approvals_from + .save_tracked(self.approvals_from.delete_and_flush_tracked(&self.from)?.0); + // Save the "from" account + accounts.set(&self.from, self.from_account.clone())?; + // Save the "to" account + accounts.set(&self.to, self.to_account.clone())?; + + debug!("approval canceled from {} to {}", self.from, self.to); + Ok(()) + } + + /// Returns the underlying approval. + pub fn approval(&self) -> &CreditApproval { + &self.approval_to + } + + /// Returns the credit allowance for the subscriber. + #[allow(dead_code)] + pub fn credit_allowance(&self, current_epoch: ChainEpoch) -> CreditAllowance { + let mut allowance = CreditAllowance { + amount: self.to_account.credit_free.clone(), + sponsor: Some(self.from), + sponsored_amount: Credit::zero(), + }; + if self.validate_expiration(current_epoch).is_err() { + return allowance; + } + let approval_used = self.approval_to.credit_used.clone(); + let approval_allowance = self.from_account.credit_free.clone(); + let approval_allowance = self + .approval_to + .credit_limit + .clone() + .map_or(approval_allowance.clone(), |limit| { + (limit - approval_used).min(approval_allowance) + }); + allowance.sponsored_amount = approval_allowance; + allowance + } + + /// Returns the gas allowance for the subscriber. + pub fn gas_allowance(&self, current_epoch: ChainEpoch) -> GasAllowance { + let mut allowance = GasAllowance { + amount: self.to_account.gas_allowance.clone(), + sponsor: Some(self.from), + sponsored_amount: TokenAmount::zero(), + }; + if self.validate_expiration(current_epoch).is_err() { + return allowance; + } + let approval_used = self.approval_to.gas_allowance_used.clone(); + let approval_allowance = self.from_account.gas_allowance.clone(); + let approval_allowance = self + .approval_to + .gas_allowance_limit + .clone() + .map_or(approval_allowance.clone(), |limit| { + (limit - approval_used).min(approval_allowance) + }); + allowance.sponsored_amount = approval_allowance; + allowance + } + + /// Verifies that the delegation's expiry is valid for the current epoch. + pub fn validate_expiration(&self, current_epoch: ChainEpoch) -> Result<(), ActorError> { + self.approval_from.validate_expiration(current_epoch)?; + self.approval_to.validate_expiration(current_epoch)?; + Ok(()) + } + + /// Validates whether the delegation can use the amount of credit. + pub fn validate_credit_usage(&self, amount: &Credit) -> Result<(), ActorError> { + self.approval_from.validate_credit_usage(amount)?; + self.approval_to.validate_credit_usage(amount)?; + Ok(()) + } + + /// Validates whether the delegation can use the amount of gas. + pub fn validate_gas_usage(&self, amount: &TokenAmount) -> Result<(), ActorError> { + self.approval_from.validate_gas_usage(amount)?; + self.approval_to.validate_gas_usage(amount)?; + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/lib.rs b/fendermint/actors/blobs/src/lib.rs new file mode 100644 index 0000000000..e7889e0e19 --- /dev/null +++ b/fendermint/actors/blobs/src/lib.rs @@ -0,0 +1,13 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod caller; +mod shared; +mod sol_facade; +mod state; +#[cfg(test)] +mod testing; + +pub use shared::*; diff --git a/fendermint/actors/blobs/src/shared.rs b/fendermint/actors/blobs/src/shared.rs new file mode 100644 index 0000000000..d130f2a553 --- /dev/null +++ b/fendermint/actors/blobs/src/shared.rs @@ -0,0 +1,8 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +pub use crate::state::State; + +/// The name of the blob actor. +pub const BLOBS_ACTOR_NAME: &str = "blobs"; diff --git a/fendermint/actors/blobs/src/sol_facade/blobs.rs b/fendermint/actors/blobs/src/sol_facade/blobs.rs new file mode 100644 index 0000000000..581972d550 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/blobs.rs @@ -0,0 +1,305 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::{ + AddBlobParams, Blob, BlobStatus, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, + TrimBlobExpiriesParams, + }, + bytes::B256, + GetStatsReturn, +}; +use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use ipc_storage_actor_sdk::evm::TryIntoEVMEvent; +pub use ipc_storage_sol_facade::blobs::Calls; +use ipc_storage_sol_facade::{ + blobs as sol, + primitives::U256, + types::{BigUintWrapper, SolCall, SolInterface, H160}, +}; +use num_traits::Zero; + +use crate::sol_facade::{AbiCall, AbiCallRuntime, AbiEncodeError}; + +// ----- Events ----- // + +pub struct BlobAdded<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub size: u64, + pub expiry: ChainEpoch, + pub bytes_used: u64, +} + +impl TryIntoEVMEvent for BlobAdded<'_> { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobAdded(sol::BlobAdded { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + size: U256::from(self.size), + expiry: U256::from(self.expiry), + bytesUsed: U256::from(self.bytes_used), + })) + } +} + +pub struct BlobPending<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub source: &'a B256, +} +impl TryIntoEVMEvent for BlobPending<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobPending(sol::BlobPending { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + sourceId: self.source.0.into(), + })) + } +} + +pub struct BlobFinalized<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub resolved: bool, +} +impl TryIntoEVMEvent for BlobFinalized<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobFinalized(sol::BlobFinalized { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + resolved: self.resolved, + })) + } +} + +pub struct BlobDeleted<'a> { + pub subscriber: Address, + pub hash: &'a B256, + pub size: u64, + pub bytes_released: u64, +} +impl TryIntoEVMEvent for BlobDeleted<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let subscriber: H160 = self.subscriber.try_into()?; + Ok(sol::Events::BlobDeleted(sol::BlobDeleted { + subscriber: subscriber.into(), + hash: self.hash.0.into(), + size: U256::from(self.size), + bytesReleased: U256::from(self.bytes_released), + })) + } +} + +// ----- Calls ----- // + +pub fn can_handle(input_data: &ipc_storage_actor_sdk::evm::InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &ipc_storage_actor_sdk::evm::InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +fn blob_status_as_solidity_enum(blob_status: BlobStatus) -> u8 { + match blob_status { + BlobStatus::Added => 0, + BlobStatus::Pending => 1, + BlobStatus::Resolved => 2, + BlobStatus::Failed => 3, + } +} + +impl AbiCallRuntime for sol::addBlobCall { + type Params = Result; + type Returns = (); + type Output = Vec; + fn params(&self, rt: &impl Runtime) -> Self::Params { + let sponsor: Option
= H160::from(self.sponsor).as_option().map(|a| a.into()); + let source = B256(self.source.into()); + let hash = B256(self.blobHash.into()); + let metadata_hash = B256(self.metadataHash.into()); + let subscription_id = self.subscriptionId.clone().try_into()?; + let size = self.size; + let ttl = if self.ttl.is_zero() { + None + } else { + Some(self.ttl as ChainEpoch) + }; + let from = rt.message().caller(); + Ok(AddBlobParams { + sponsor, + source, + hash, + metadata_hash, + id: subscription_id, + size, + ttl, + from, + }) + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCallRuntime for sol::deleteBlobCall { + type Params = Result; + type Returns = (); + type Output = Vec; + fn params(&self, rt: &impl Runtime) -> Self::Params { + let subscriber = H160::from(self.subscriber).as_option().map(|a| a.into()); + let hash = B256(self.blobHash.into()); + let subscription_id = self.subscriptionId.clone().try_into()?; + let from = rt.message().caller(); + Ok(DeleteBlobParams { + sponsor: subscriber, + hash, + id: subscription_id, + from, + }) + } + fn returns(&self, _: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&()) + } +} + +impl AbiCall for sol::getBlobCall { + type Params = Result; + type Returns = Option; + type Output = Result, AbiEncodeError>; + fn params(&self) -> Self::Params { + let blob_hash = B256(self.blobHash.into()); + Ok(GetBlobParams(blob_hash)) + } + fn returns(&self, blob: Self::Returns) -> Self::Output { + let blob = if let Some(blob) = blob { + sol::Blob { + size: blob.size, + metadataHash: blob.metadata_hash.0.into(), + status: blob_status_as_solidity_enum(blob.status), + subscriptions: blob + .subscribers + .iter() + .map(|(subscription_id, expiry)| sol::Subscription { + expiry: *expiry as u64, + subscriptionId: subscription_id.clone().into(), + }) + .collect(), + } + } else { + sol::Blob { + size: 0, + metadataHash: [0u8; 32].into(), + status: blob_status_as_solidity_enum(BlobStatus::Failed), + subscriptions: Vec::default(), + } + }; + Ok(Self::abi_encode_returns(&(blob,))) + } +} + +impl AbiCall for sol::getStatsCall { + type Params = (); + type Returns = GetStatsReturn; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, stats: Self::Returns) -> Self::Output { + let subnet_stats = sol::SubnetStats { + balance: BigUintWrapper::from(stats.balance).into(), + capacityFree: stats.capacity_free, + capacityUsed: stats.capacity_used, + creditSold: BigUintWrapper::from(stats.credit_sold).into(), + creditCommitted: BigUintWrapper::from(stats.credit_committed).into(), + creditDebited: BigUintWrapper::from(stats.credit_debited).into(), + tokenCreditRate: BigUintWrapper(stats.token_credit_rate.rate().clone()).into(), + numAccounts: stats.num_accounts, + numBlobs: stats.num_blobs, + numAdded: stats.num_added, + bytesAdded: stats.bytes_added, + numResolving: stats.num_resolving, + bytesResolving: stats.bytes_resolving, + }; + Self::abi_encode_returns(&(subnet_stats,)) + } +} + +impl AbiCallRuntime for sol::overwriteBlobCall { + type Params = Result; + type Returns = (); + type Output = Vec; + fn params(&self, rt: &impl Runtime) -> Self::Params { + let old_hash = B256(self.oldHash.into()); + let sponsor = H160::from(self.sponsor).as_option().map(|a| a.into()); + let source = B256(self.source.into()); + let hash = B256(self.blobHash.into()); + let metadata_hash = B256(self.metadataHash.into()); + let subscription_id = self.subscriptionId.clone().try_into()?; + let size = self.size; + let ttl = if self.ttl.is_zero() { + None + } else { + Some(self.ttl as ChainEpoch) + }; + let from = rt.message().caller(); + Ok(OverwriteBlobParams { + old_hash, + add: AddBlobParams { + sponsor, + source, + hash, + metadata_hash, + id: subscription_id, + size, + ttl, + from, + }, + }) + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::trimBlobExpiriesCall { + type Params = TrimBlobExpiriesParams; + type Returns = (u32, Option); + type Output = Vec; + + fn params(&self) -> Self::Params { + let limit = self.limit; + let limit = if limit.is_zero() { None } else { Some(limit) }; + let hash: [u8; 32] = self.startingHash.into(); + let hash = if hash == [0; 32] { + None + } else { + Some(B256(hash)) + }; + TrimBlobExpiriesParams { + subscriber: H160::from(self.subscriber).into(), + limit, + starting_hash: hash, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let next_key = returns.1; + let next_key = next_key.unwrap_or_default(); + let cursor = sol::TrimBlobExpiries { + processed: returns.0, + nextKey: next_key.0.into(), + }; + Self::abi_encode_returns(&(cursor,)) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/credit.rs b/fendermint/actors/blobs/src/sol_facade/credit.rs new file mode 100644 index 0000000000..f0882bc285 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/credit.rs @@ -0,0 +1,442 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::{HashMap, HashSet}; + +use anyhow::Error; +use fendermint_actor_blobs_shared::{ + accounts::{Account, AccountStatus, GetAccountParams, SetAccountStatusParams}, + credit::{ + ApproveCreditParams, BuyCreditParams, Credit, CreditApproval, GetCreditApprovalParams, + RevokeCreditParams, SetSponsorParams, + }, +}; +use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use ipc_storage_actor_sdk::{evm::TryIntoEVMEvent, util::token_to_biguint}; +pub use ipc_storage_sol_facade::credit::Calls; +use ipc_storage_sol_facade::{ + credit as sol, + primitives::U256, + types::{BigUintWrapper, SolCall, SolInterface, H160}, +}; + +use crate::sol_facade::{AbiCall, AbiCallRuntime, AbiEncodeError}; + +pub struct CreditPurchased { + from: Address, + amount: TokenAmount, +} +impl CreditPurchased { + pub fn new(from: Address, amount: TokenAmount) -> Self { + Self { from, amount } + } +} +impl TryIntoEVMEvent for CreditPurchased { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let from: H160 = self.from.try_into()?; + let amount = token_to_biguint(Some(self.amount)); + Ok(sol::Events::CreditPurchased(sol::CreditPurchased { + from: from.into(), + amount: BigUintWrapper(amount).into(), + })) + } +} + +pub struct CreditApproved { + pub from: Address, + pub to: Address, + pub credit_limit: Option, + pub gas_fee_limit: Option, + pub expiry: Option, +} +impl TryIntoEVMEvent for CreditApproved { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let from: H160 = self.from.try_into()?; + let to: H160 = self.to.try_into()?; + let credit_limit = token_to_biguint(self.credit_limit); + let gas_fee_limit = token_to_biguint(self.gas_fee_limit); + Ok(sol::Events::CreditApproved(sol::CreditApproved { + from: from.into(), + to: to.into(), + creditLimit: BigUintWrapper(credit_limit).into(), + gasFeeLimit: BigUintWrapper(gas_fee_limit).into(), + expiry: U256::from(self.expiry.unwrap_or_default()), + })) + } +} + +pub struct CreditRevoked { + pub from: Address, + pub to: Address, +} +impl CreditRevoked { + pub fn new(from: Address, to: Address) -> Self { + Self { from, to } + } +} +impl TryIntoEVMEvent for CreditRevoked { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let from: H160 = self.from.try_into()?; + let to: H160 = self.to.try_into()?; + Ok(sol::Events::CreditRevoked(sol::CreditRevoked { + from: from.into(), + to: to.into(), + })) + } +} + +pub struct CreditDebited { + pub amount: TokenAmount, + pub num_accounts: u64, + pub more_accounts: bool, +} +impl TryIntoEVMEvent for CreditDebited { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let amount = token_to_biguint(Some(self.amount)); + Ok(sol::Events::CreditDebited(sol::CreditDebited { + amount: BigUintWrapper(amount).into(), + numAccounts: U256::from(self.num_accounts), + moreAccounts: self.more_accounts, + })) + } +} + +// ----- Calls ----- // + +pub fn can_handle(input_data: &ipc_storage_actor_sdk::evm::InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &ipc_storage_actor_sdk::evm::InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +/// function buyCredit() external payable; +impl AbiCallRuntime for sol::buyCredit_0Call { + type Params = BuyCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self, rt: &impl Runtime) -> Self::Params { + let recipient = rt.message().caller(); + BuyCreditParams(recipient) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function buyCredit(address recipient) external payable; +impl AbiCall for sol::buyCredit_1Call { + type Params = BuyCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let recipient: Address = H160::from(self.recipient).into(); + BuyCreditParams(recipient) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function approveCredit(address to) external; +impl AbiCall for sol::approveCredit_0Call { + type Params = ApproveCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + ApproveCreditParams { + to, + caller_allowlist: None, + credit_limit: None, + gas_fee_limit: None, + ttl: None, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function approveCredit(address to, address[] memory caller, uint256 creditLimit, uint256 gasFeeLimit, uint64 ttl) external; +impl AbiCall for sol::approveCredit_1Call { + type Params = ApproveCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + let caller_allowlist: HashSet
= HashSet::from_iter( + self.caller + .iter() + .map(|sol_address| H160::from(*sol_address).into()), + ); + let credit_limit: Credit = BigUintWrapper::from(self.creditLimit).into(); + let gas_fee_limit: TokenAmount = BigUintWrapper::from(self.gasFeeLimit).into(); + let ttl = self.ttl; + ApproveCreditParams { + to, + caller_allowlist: Some(caller_allowlist), + credit_limit: Some(credit_limit), + gas_fee_limit: Some(gas_fee_limit), + ttl: Some(ttl as ChainEpoch), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function approveCredit(address to, address[] memory caller) external; +impl AbiCall for sol::approveCredit_2Call { + type Params = ApproveCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + let caller_allowlist: HashSet
= HashSet::from_iter( + self.caller + .iter() + .map(|sol_address| H160::from(*sol_address).into()), + ); + ApproveCreditParams { + to, + caller_allowlist: Some(caller_allowlist), + credit_limit: None, + gas_fee_limit: None, + ttl: None, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function revokeCredit(address to, address caller) external; +impl AbiCall for sol::revokeCredit_0Call { + type Params = RevokeCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + let caller: Address = H160::from(self.caller).into(); + RevokeCreditParams { + to, + for_caller: Some(caller), + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function revokeCredit(address to) external; +impl AbiCall for sol::revokeCredit_1Call { + type Params = RevokeCreditParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let to: Address = H160::from(self.to).into(); + RevokeCreditParams { + to, + for_caller: None, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +/// function setAccountSponsor(address from, address sponsor) external; +impl AbiCall for sol::setAccountSponsorCall { + type Params = SetSponsorParams; // FIXME SU Needs runtime for "from" + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let sponsor = H160::from(self.sponsor); + let sponsor: Option
= if sponsor.is_null() { + None + } else { + Some(sponsor.into()) + }; + SetSponsorParams(sponsor) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +fn convert_approvals( + approvals: HashMap, +) -> Result, Error> { + approvals + .iter() + .map(|(address, credit_approval)| { + let approval = sol::Approval { + addr: H160::try_from(*address)?.into(), + approval: sol::CreditApproval { + creditLimit: credit_approval + .credit_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + gasFeeLimit: credit_approval + .gas_allowance_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + expiry: credit_approval.expiry.unwrap_or_default() as u64, + creditUsed: BigUintWrapper::from(credit_approval.credit_used.clone()).into(), + gasFeeUsed: BigUintWrapper::from(credit_approval.gas_allowance_used.clone()) + .into(), + }, + }; + Ok(approval) + }) + .collect::, Error>>() +} + +/// function getAccount(address addr) external view returns (Account memory account); +impl AbiCall for sol::getAccountCall { + type Params = GetAccountParams; + type Returns = Option; + type Output = Result, AbiEncodeError>; + + fn params(&self) -> Self::Params { + let address: Address = H160::from(self.addr).into(); + GetAccountParams(address) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let sol_account = if let Some(account) = returns { + let credit_sponsor: H160 = account + .credit_sponsor + .map(H160::try_from) + .transpose()? + .unwrap_or_default(); + let approvals_from = convert_approvals(account.approvals_from)?; + let approvals_to = convert_approvals(account.approvals_to)?; + sol::Account { + capacityUsed: account.capacity_used, + creditFree: BigUintWrapper::from(account.credit_free).into(), + creditCommitted: BigUintWrapper::from(account.credit_committed).into(), + creditSponsor: credit_sponsor.into(), + lastDebitEpoch: account.last_debit_epoch as u64, + approvalsFrom: approvals_from, + approvalsTo: approvals_to, + maxTtl: account.max_ttl as u64, + gasAllowance: BigUintWrapper::from(account.gas_allowance).into(), + } + } else { + sol::Account { + capacityUsed: u64::default(), + creditFree: U256::default(), + creditCommitted: U256::default(), + creditSponsor: H160::default().into(), + lastDebitEpoch: u64::default(), + approvalsTo: Vec::default(), + approvalsFrom: Vec::default(), + maxTtl: u64::default(), + gasAllowance: U256::default(), + } + }; + Ok(Self::abi_encode_returns(&(sol_account,))) + } +} + +/// function getCreditApproval(address from, address to) external view returns (CreditApproval memory approval); +impl AbiCall for sol::getCreditApprovalCall { + type Params = GetCreditApprovalParams; + type Returns = Option; + type Output = Vec; + + fn params(&self) -> Self::Params { + let from = H160::from(self.from); + let to = H160::from(self.to); + GetCreditApprovalParams { + from: from.into(), + to: to.into(), + } + } + + fn returns(&self, value: Self::Returns) -> Self::Output { + let approval_result = if let Some(credit_approval) = value { + sol::CreditApproval { + creditLimit: credit_approval + .credit_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + gasFeeLimit: credit_approval + .gas_allowance_limit + .clone() + .map(BigUintWrapper::from) + .unwrap_or_default() + .into(), + expiry: credit_approval.expiry.unwrap_or_default() as u64, + creditUsed: BigUintWrapper::from(credit_approval.credit_used.clone()).into(), + gasFeeUsed: BigUintWrapper::from(credit_approval.gas_allowance_used.clone()).into(), + } + } else { + sol::CreditApproval { + creditLimit: BigUintWrapper::default().into(), + gasFeeLimit: BigUintWrapper::default().into(), + expiry: u64::default(), + creditUsed: BigUintWrapper::default().into(), + gasFeeUsed: BigUintWrapper::default().into(), + } + }; + Self::abi_encode_returns(&(approval_result,)) + } +} + +/// function setAccountStatus(address subscriber, TtlStatus ttlStatus) external; +impl AbiCall for sol::setAccountStatusCall { + type Params = Result; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let subscriber = H160::from(self.subscriber); + let ttl_status = match self.ttlStatus { + 0 => AccountStatus::Default, + 1 => AccountStatus::Reduced, + 2 => AccountStatus::Extended, + _ => return Err(actor_error!(illegal_argument, "invalid account status")), + }; + Ok(SetAccountStatusParams { + subscriber: subscriber.into(), + status: ttl_status, + }) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/gas.rs b/fendermint/actors/blobs/src/sol_facade/gas.rs new file mode 100644 index 0000000000..52e67132c5 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/gas.rs @@ -0,0 +1,40 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::Error; +use fvm_shared::address::Address; +use ipc_storage_actor_sdk::evm::TryIntoEVMEvent; +use ipc_storage_sol_facade::gas as sol; +use ipc_storage_sol_facade::types::H160; + +pub struct GasSponsorSet { + sponsor: Address, +} +impl GasSponsorSet { + pub fn mew(sponsor: Address) -> Self { + Self { sponsor } + } +} +impl TryIntoEVMEvent for GasSponsorSet { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let sponsor: H160 = self.sponsor.try_into()?; + Ok(sol::Events::GasSponsorSet(sol::GasSponsorSet { + sponsor: sponsor.into(), + })) + } +} + +pub struct GasSponsorUnset {} +impl GasSponsorUnset { + pub fn new() -> Self { + Self {} + } +} +impl TryIntoEVMEvent for GasSponsorUnset { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::GasSponsorUnset(sol::GasSponsorUnset {})) + } +} diff --git a/fendermint/actors/blobs/src/sol_facade/mod.rs b/fendermint/actors/blobs/src/sol_facade/mod.rs new file mode 100644 index 0000000000..a4ab72de62 --- /dev/null +++ b/fendermint/actors/blobs/src/sol_facade/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use ipc_storage_actor_sdk::declare_abi_call; + +declare_abi_call!(); + +pub mod blobs; +pub mod credit; +pub mod gas; diff --git a/fendermint/actors/blobs/src/state.rs b/fendermint/actors/blobs/src/state.rs new file mode 100644 index 0000000000..b09747d2a7 --- /dev/null +++ b/fendermint/actors/blobs/src/state.rs @@ -0,0 +1,64 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::GetStatsReturn; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::econ::TokenAmount; + +pub mod accounts; +pub mod blobs; +pub mod credit; +pub mod operators; + +use accounts::Accounts; +use blobs::{Blobs, DeleteBlobStateParams}; +use credit::Credits; +use operators::Operators; + +/// The state represents all accounts and stored blobs. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// Struct containing credit-related state. + pub credits: Credits, + /// HAMT containing all accounts keyed by actor ID address. + pub accounts: Accounts, + /// HAMT containing all blobs keyed by blob hash. + pub blobs: Blobs, + /// Registry of node operators for blob storage. + pub operators: Operators, +} + +impl State { + /// Creates a new [`State`]. + pub fn new(store: &BS) -> Result { + Ok(Self { + credits: Credits::default(), + accounts: Accounts::new(store)?, + blobs: Blobs::new(store)?, + operators: Operators::new(store)?, + }) + } + + /// Returns stats about the current actor state. + pub fn get_stats(&self, config: &IPCStorageConfig, balance: TokenAmount) -> GetStatsReturn { + GetStatsReturn { + balance, + capacity_free: self.capacity_available(config.blob_capacity), + capacity_used: self.blobs.bytes_size(), + credit_sold: self.credits.credit_sold.clone(), + credit_committed: self.credits.credit_committed.clone(), + credit_debited: self.credits.credit_debited.clone(), + token_credit_rate: config.token_credit_rate.clone(), + num_accounts: self.accounts.len(), + num_blobs: self.blobs.len(), + num_added: self.blobs.added.len(), + bytes_added: self.blobs.added.bytes_size(), + num_resolving: self.blobs.pending.len(), + bytes_resolving: self.blobs.pending.bytes_size(), + } + } +} diff --git a/fendermint/actors/blobs/src/state/accounts.rs b/fendermint/actors/blobs/src/state/accounts.rs new file mode 100644 index 0000000000..592ed8bc2e --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod account; +mod methods; +#[cfg(test)] +mod tests; + +pub use account::*; diff --git a/fendermint/actors/blobs/src/state/accounts/account.rs b/fendermint/actors/blobs/src/state/accounts/account.rs new file mode 100644 index 0000000000..4c18a32ad3 --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts/account.rs @@ -0,0 +1,168 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::{self as shared, credit::Credit}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use ipc_storage_actor_sdk::util::to_delegated_address; +use ipc_storage_ipld::hamt::{self, map::TrackedFlushResult, BytesKey}; + +use crate::state::credit::Approvals; + +/// The stored representation of an account. +#[derive(Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Account { + /// Total size of all blobs managed by the account. + pub capacity_used: u64, + /// Current free credit in byte-blocks that can be used for new commitments. + pub credit_free: Credit, + /// Current committed credit in byte-blocks that will be used for debits. + pub credit_committed: Credit, + /// Optional default sponsor account address. + pub credit_sponsor: Option
, + /// The chain epoch of the last debit. + pub last_debit_epoch: ChainEpoch, + /// Credit approvals to other accounts from this account, keyed by receiver. + pub approvals_to: Approvals, + /// Credit approvals to this account from other accounts, keyed by sender. + pub approvals_from: Approvals, + /// The maximum allowed TTL for actor's blobs. + pub max_ttl: ChainEpoch, + /// The total token value an account has used to buy credits. + pub gas_allowance: TokenAmount, +} + +impl Account { + /// Returns a new [`Account`]. + pub fn new( + store: &BS, + current_epoch: ChainEpoch, + max_ttl: ChainEpoch, + ) -> Result { + Ok(Self { + capacity_used: 0, + credit_free: Credit::default(), + credit_committed: Credit::default(), + credit_sponsor: None, + last_debit_epoch: current_epoch, + approvals_to: Approvals::new(store)?, + approvals_from: Approvals::new(store)?, + max_ttl, + gas_allowance: TokenAmount::default(), + }) + } +} + +impl std::fmt::Debug for Account { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Account") + .field("capacity_used", &self.capacity_used) + .field("credit_free", &self.credit_free) + .field("credit_committed", &self.credit_committed) + .field("credit_sponsor", &self.credit_sponsor) + .field("last_debit_epoch", &self.last_debit_epoch) + .field("max_ttl", &self.max_ttl) + .field("gas_allowance", &self.gas_allowance) + .finish() + } +} + +impl Account { + /// Returns [`shared::accounts::Account`] that is safe to return from actor methods. + pub fn to_shared(&self, rt: &impl Runtime) -> Result { + let store = rt.store(); + let mut approvals_to = HashMap::new(); + self.approvals_to + .hamt(store)? + .for_each(|address, approval| { + let external_account_address = to_delegated_address(rt, address)?; + approvals_to.insert(external_account_address, approval.clone()); + Ok(()) + })?; + + let mut approvals_from = HashMap::new(); + self.approvals_from + .hamt(store)? + .for_each(|address, approval| { + let external_account_address = to_delegated_address(rt, address)?; + approvals_from.insert(external_account_address, approval.clone()); + Ok(()) + })?; + + Ok(shared::accounts::Account { + capacity_used: self.capacity_used, + credit_free: self.credit_free.clone(), + credit_committed: self.credit_committed.clone(), + credit_sponsor: self.credit_sponsor, + last_debit_epoch: self.last_debit_epoch, + approvals_to, + approvals_from, + max_ttl: self.max_ttl, + gas_allowance: self.gas_allowance.clone(), + }) + } +} + +/// HAMT wrapper for accounts state. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Accounts { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, + /// The next account to debit in the current debit cycle. + /// If this is None, we have finished the debit cycle. + next_debit_address: Option
, +} + +impl Accounts { + /// Returns a new account collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "accounts")?; + Ok(Self { + root, + size: 0, + next_debit_address: None, + }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size + } + + /// Saves the start address to be used by the next debit round. + pub fn save_debit_progress(&mut self, next_address: Option
) { + self.next_debit_address = next_address; + } + + /// Returns the start address to be used by the next debit round. + pub fn get_debit_start_address(&self) -> Option { + self.next_debit_address + .map(|address| BytesKey::from(address.to_bytes())) + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } +} diff --git a/fendermint/actors/blobs/src/state/accounts/methods.rs b/fendermint/actors/blobs/src/state/accounts/methods.rs new file mode 100644 index 0000000000..674b072fdf --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts/methods.rs @@ -0,0 +1,157 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fendermint_actor_blobs_shared::{accounts::AccountStatus, bytes::B256}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use log::{debug, warn}; + +use super::Account; +use crate::{caller::Caller, state::DeleteBlobStateParams, State}; + +impl State { + /// Returns an [`Account`] by address. + pub fn get_account( + &self, + store: &BS, + address: Address, + ) -> Result, ActorError> { + let accounts = self.accounts.hamt(store)?; + accounts.get(&address) + } + + /// Sets an account's [`TtlStatus`] by address. + /// + /// Flushes state to the blockstore. + pub fn set_account_status( + &mut self, + store: &BS, + config: &IPCStorageConfig, + address: Address, + status: AccountStatus, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + match status { + // We don't want to create an account for default TTL + AccountStatus::Default => { + if let Some(mut account) = accounts.get(&address)? { + account.max_ttl = status.get_max_ttl(config.blob_default_ttl); + self.accounts + .save_tracked(accounts.set_and_flush_tracked(&address, account)?); + } + } + _ => { + // Get or create a new account + let max_ttl = status.get_max_ttl(config.blob_default_ttl); + let mut account = accounts + .get_or_create(&address, || Account::new(store, current_epoch, max_ttl))?; + account.max_ttl = max_ttl; + self.accounts + .save_tracked(accounts.set_and_flush_tracked(&address, account)?); + } + } + Ok(()) + } + + /// Debits accounts for their blob usage and cleans up expired blob subscriptions. + /// + /// This method performs two main operations: + /// 1. Deletes expired blob subscriptions based on the current epoch + /// 2. Debits a batch of accounts for their ongoing blob storage usage + /// + /// The debiting process works in cycles, processing a subset of accounts in each call + /// to avoid excessive computation in a single pass. The number of accounts processed + /// in each batch is controlled by the subnet config parameter `account_debit_batch_size`. + /// Similarly, expired blob deletion is controlled by `blob_delete_batch_size`. + /// + /// Flushes state to the blockstore. + /// + /// TODO: Break this into two methods called by a `cron_tick` actor method. + pub fn debit_accounts( + &mut self, + store: &BS, + config: &IPCStorageConfig, + current_epoch: ChainEpoch, + ) -> Result<(HashSet, bool), ActorError> { + // Delete expired subscriptions + let mut delete_from_disc = HashSet::new(); + let mut num_deleted = 0; + let mut expiries = self.blobs.expiries.clone(); + let mut credit_return_groups = HashSet::new(); + expiries.foreach_up_to_epoch( + store, + current_epoch, + Some(config.blob_delete_batch_size), + |_, subscriber, key| { + let key_tuple = (subscriber, key.hash); + match self.delete_blob( + store, + subscriber, + None, + DeleteBlobStateParams { + hash: key.hash, + id: key.id.clone(), + epoch: current_epoch, + skip_credit_return: credit_return_groups.contains(&key_tuple), + }, + ) { + Ok((from_disc, _, credit_returned)) => { + num_deleted += 1; + if from_disc { + delete_from_disc.insert(key.hash); + } + if credit_returned { + credit_return_groups.insert(key_tuple); + } + } + Err(e) => { + warn!( + "failed to delete blob {} for {} (id: {}): {}", + key.hash, subscriber, key.id, e + ) + } + } + Ok(()) + }, + )?; + + debug!("deleted {} expired subscriptions", num_deleted); + debug!( + "{} blobs marked for deletion from disc", + delete_from_disc.len() + ); + + // Debit accounts for existing usage + let reader = self.accounts.hamt(store)?; + let mut writer = self.accounts.hamt(store)?; + let start_key = self.accounts.get_debit_start_address(); + let (count, next_account) = reader.for_each_ranged( + start_key.as_ref(), + Some(config.account_debit_batch_size as usize), + |address, account| { + let mut caller = + Caller::load_account(store, &reader, address, account.clone(), None)?; + self.debit_caller(&mut caller, current_epoch); + caller.save(&mut writer)?; + Ok(true) + }, + )?; + + // Save accounts + self.accounts.save_tracked(writer.flush_tracked()?); + self.accounts.save_debit_progress(next_account); + + debug!( + "finished debiting {:#?} accounts, next account: {:#?}", + count, next_account + ); + + Ok((delete_from_disc, next_account.is_some())) + } +} diff --git a/fendermint/actors/blobs/src/state/accounts/tests.rs b/fendermint/actors/blobs/src/state/accounts/tests.rs new file mode 100644 index 0000000000..91c2636cee --- /dev/null +++ b/fendermint/actors/blobs/src/state/accounts/tests.rs @@ -0,0 +1,493 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::AccountStatus, + blobs::{BlobStatus, SubscriptionId}, + credit::Credit, +}; +use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, setup_logs, +}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fvm_ipld_blockstore::{Blockstore, MemoryBlockstore}; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use num_traits::Zero; + +use crate::state::blobs::SetPendingBlobStateParams; +use crate::{ + caller::DelegationOptions, + state::blobs::{AddBlobStateParams, FinalizeBlobStateParams}, + testing::check_approval_used, + State, +}; + +#[test] +fn test_set_account_status() { + setup_logs(); + + let config = IPCStorageConfig::default(); + + struct TestCase { + name: &'static str, + initial_ttl_status: Option, // None means don't set the initial status + new_ttl_status: AccountStatus, + expected_ttl: ChainEpoch, + } + + let test_cases = vec![ + TestCase { + name: "Setting Reduced on new account", + initial_ttl_status: None, + new_ttl_status: AccountStatus::Reduced, + expected_ttl: 0, + }, + TestCase { + name: "Setting Default on new account", + initial_ttl_status: None, + new_ttl_status: AccountStatus::Default, + expected_ttl: config.blob_default_ttl, + }, + TestCase { + name: "Changing from Default to Reduced", + initial_ttl_status: Some(AccountStatus::Default), + new_ttl_status: AccountStatus::Reduced, + expected_ttl: 0, + }, + TestCase { + name: "Changing from Extended to Reduced", + initial_ttl_status: Some(AccountStatus::Extended), + new_ttl_status: AccountStatus::Reduced, + expected_ttl: 0, + }, + TestCase { + name: "Changing from Reduced to Extended", + initial_ttl_status: Some(AccountStatus::Reduced), + new_ttl_status: AccountStatus::Extended, + expected_ttl: ChainEpoch::MAX, + }, + ]; + + for tc in test_cases { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let address = new_address(); + let current_epoch = ChainEpoch::from(1); + + // Initialize the account if needed + if tc.initial_ttl_status.is_some() { + state + .set_account_status( + &store, + &config, + address, + tc.initial_ttl_status.unwrap(), + current_epoch, + ) + .unwrap(); + } + + // Change TTL status + let res = + state.set_account_status(&store, &config, address, tc.new_ttl_status, current_epoch); + assert!( + res.is_ok(), + "Test case '{}' failed to set TTL status", + tc.name + ); + + // Verify max TTL + let max_ttl = state.get_account_max_ttl(&config, &store, address).unwrap(); + assert_eq!( + max_ttl, tc.expected_ttl, + "Test case '{}' failed: expected max TTL {}, got {}", + tc.name, tc.expected_ttl, max_ttl + ); + } +} + +#[test] +fn test_debit_accounts_delete_from_disc() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + debit_accounts_delete_from_disc( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_debit_accounts_delete_from_disc_with_approval() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + debit_accounts_delete_from_disc( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn debit_accounts_delete_from_disc( + config: &IPCStorageConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let mut credit_amount = + Credit::from_atto(token_amount.atto().clone()) * &config.token_credit_rate; + + // Add blob with default a subscription ID + let (hash, size) = new_hash(1024); + let add1_epoch = current_epoch; + let id1 = SubscriptionId::default(); + let ttl1 = ChainEpoch::from(config.blob_min_ttl); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size, + ttl: Some(ttl1), + source, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + let stats = state.get_stats(config, TokenAmount::zero()); + // Using a credit delegation creates both the from and to account + let expected_num_accounts = if using_approval { 2 } else { 1 }; + assert_eq!(stats.num_accounts, expected_num_accounts); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size); + + // Set to status pending + let res = state.set_blob_pending( + &store, + subscriber, + SetPendingBlobStateParams { + hash, + size, + id: id1.clone(), + source, + }, + ); + assert!(res.is_ok()); + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 1); + assert_eq!(stats.bytes_resolving, size); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Finalize as resolved + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + subscriber, + FinalizeBlobStateParams { + source, + hash, + size, + id: id1.clone(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(ttl1 as u64 * size) + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); + + // Add the same blob but this time uses a different subscription ID + let add2_epoch = ChainEpoch::from(21); + let ttl2 = ChainEpoch::from(config.blob_min_ttl); + let id2 = SubscriptionId::new("foo").unwrap(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id2.clone(), + size, + ttl: Some(ttl2), + source, + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + assert_eq!( + account.credit_committed, // stays the same becuase we're starting over + Credit::from_whole(ttl2 as u64 * size), + ); + credit_amount -= Credit::from_whole((add2_epoch - add1_epoch) as u64 * size); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); // not changed + + // Check the subscription group + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + let group = subscribers.get(&subscriber).unwrap().unwrap(); + assert_eq!(group.len(), 2); + + // Debit all the accounts at an epoch between the two expiries (3601-3621) + let debit_epoch = ChainEpoch::from(config.blob_min_ttl + 11); + let (deletes_from_disc, _) = state.debit_accounts(&store, config, debit_epoch).unwrap(); + assert!(deletes_from_disc.is_empty()); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, // debit reduces this + Credit::from_whole((ttl2 - (debit_epoch - add2_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); // not changed + + // Check the subscription group + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(&store).unwrap(); + let group = subscribers.get(&subscriber).unwrap().unwrap(); + assert_eq!(group.len(), 1); // the first subscription was deleted + + // Debit all the accounts at an epoch greater than group expiry (3621) + let debit_epoch = ChainEpoch::from(config.blob_min_ttl + 31); + let (deletes_from_disc, _) = state.debit_accounts(&store, config, debit_epoch).unwrap(); + assert!(!deletes_from_disc.is_empty()); // blob is marked for deletion + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, // the second debit reduces this to zero + Credit::from_whole(0), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, 0); + + // Check state + assert_eq!(state.credits.credit_committed, Credit::from_whole(0)); // credit was released + assert_eq!( + state.credits.credit_debited, + token_amount * &config.token_credit_rate - &account.credit_free + ); + assert_eq!(state.blobs.bytes_size(), 0); // capacity was released + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 0); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_paginated_debit_accounts() { + let config = IPCStorageConfig { + account_debit_batch_size: 5, // Process 5 accounts at a time (10 accounts total) + ..Default::default() + }; + + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let current_epoch = ChainEpoch::from(1); + + // Create more than one batch worth of accounts (>5) + for i in 0..10 { + let address = Address::new_id(1000 + i); + let token_amount = TokenAmount::from_whole(10); + + // Buy credits for each account + state + .buy_credit( + &store, + &config, + address, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + + // Add some storage usage + let mut accounts = state.accounts.hamt(&store).unwrap(); + let mut account = accounts.get(&address).unwrap().unwrap(); + account.capacity_used = 1000; + accounts.set(&address, account).unwrap(); + } + + // First batch (should process 5 accounts) + assert!(state.accounts.get_debit_start_address().is_none()); + let (deletes1, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes1.is_empty()); // No expired blobs + assert!(state.accounts.get_debit_start_address().is_some()); + + // Second batch (should process remaining 5 accounts and clear state) + let (deletes2, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes2.is_empty()); + assert!(state.accounts.get_debit_start_address().is_none()); // The state should be cleared after all accounts processed + + // Verify all accounts were processed + let reader = state.accounts.hamt(&store).unwrap(); + reader + .for_each(|_, account| { + assert_eq!(account.last_debit_epoch, current_epoch + 1); + Ok(()) + }) + .unwrap(); +} + +#[test] +fn test_multiple_debit_cycles() { + let config = IPCStorageConfig { + account_debit_batch_size: 5, // Process 5 accounts at a time (10 accounts total) + ..Default::default() + }; + + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let current_epoch = ChainEpoch::from(1); + + // Create accounts + for i in 0..10 { + let address = Address::new_id(1000 + i); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + address, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + + let mut accounts = state.accounts.hamt(&store).unwrap(); + let mut account = accounts.get(&address).unwrap().unwrap(); + account.capacity_used = 1000; + accounts.set(&address, account).unwrap(); + } + + // First cycle + let (deletes1, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes1.is_empty()); + assert!(state.accounts.get_debit_start_address().is_some()); + + let (deletes2, _) = state + .debit_accounts(&store, &config, current_epoch + 1) + .unwrap(); + assert!(deletes2.is_empty()); + assert!(state.accounts.get_debit_start_address().is_none()); // First cycle complete + + // Second cycle + let (deletes3, _) = state + .debit_accounts(&store, &config, current_epoch + 2) + .unwrap(); + assert!(deletes3.is_empty()); + assert!(state.accounts.get_debit_start_address().is_some()); + + let (deletes4, _) = state + .debit_accounts(&store, &config, current_epoch + 2) + .unwrap(); + assert!(deletes4.is_empty()); + assert!(state.accounts.get_debit_start_address().is_none()); // Second cycle complete +} diff --git a/fendermint/actors/blobs/src/state/blobs.rs b/fendermint/actors/blobs/src/state/blobs.rs new file mode 100644 index 0000000000..5c7c90875c --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs.rs @@ -0,0 +1,20 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod blob; +mod expiries; +mod methods; +mod params; +mod queue; +mod subscribers; +mod subscriptions; +#[cfg(test)] +mod tests; + +pub use blob::*; +pub use expiries::*; +pub use params::*; +pub use queue::*; +pub use subscribers::*; +pub use subscriptions::*; diff --git a/fendermint/actors/blobs/src/state/blobs/blob.rs b/fendermint/actors/blobs/src/state/blobs/blob.rs new file mode 100644 index 0000000000..efa8221ce3 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/blob.rs @@ -0,0 +1,454 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::blobs::SubscriptionId; +use fendermint_actor_blobs_shared::{ + self as shared, + blobs::{BlobStatus, Subscription}, + bytes::B256, +}; +use fil_actors_runtime::{runtime::Runtime, ActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use fvm_shared::clock::ChainEpoch; +use ipc_storage_ipld::hamt::{self, map::TrackedFlushResult}; +use log::debug; + +use super::{ + AddBlobStateParams, BlobSource, Expiries, ExpiryUpdate, Queue, Subscribers, Subscriptions, +}; +use crate::caller::Caller; + +/// Represents the result of a blob upsert. +#[derive(Debug, Clone)] +pub struct UpsertBlobResult { + /// New or updated subscription. + pub subscription: Subscription, + /// New capacity used by the caller. + pub capacity_used: u64, + /// Duration for the new credit commitment. + pub commit_duration: ChainEpoch, + /// Duration for the returned credit commitment. + pub return_duration: ChainEpoch, +} + +/// The stored representation of a blob. +#[derive(Clone, PartialEq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Blob { + /// The size of the content. + pub size: u64, + /// Blob metadata that contains information for blob recovery. + pub metadata_hash: B256, + /// Active subscribers (accounts) that are paying for the blob. + pub subscribers: Subscribers, + /// Blob status. + pub status: BlobStatus, +} + +impl Blob { + /// Returns a new [`Blob`]. + pub fn new( + store: &BS, + size: u64, + metadata_hash: B256, + ) -> Result { + Ok(Self { + size, + metadata_hash, + subscribers: Subscribers::new(store)?, + status: BlobStatus::Added, + }) + } + + /// Returns a [`shared::blobs::Blob`] that is safe to return from actor methods. + /// TODO: HAMTs should carry max expiry such that we don't full scan here. + pub fn to_shared(&self, rt: &impl Runtime) -> Result { + let store = rt.store(); + let mut subscribers = HashMap::new(); + self.subscribers.hamt(store)?.for_each(|_, group| { + group.hamt(store)?.for_each(|id, sub| { + subscribers.insert(id, sub.expiry); + Ok(()) + })?; + Ok(()) + })?; + Ok(shared::blobs::Blob { + size: self.size, + metadata_hash: self.metadata_hash, + subscribers, + status: self.status.clone(), + }) + } +} + +/// HAMT wrapper for blobs state. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Blobs { + /// The HAMT root. + pub root: hamt::Root, + /// Map of expiries to blob hashes. + pub expiries: Expiries, + /// Map of currently added blob hashes to account and source Iroh node IDs. + pub added: Queue, + /// Map of currently pending blob hashes to account and source Iroh node IDs. + pub pending: Queue, + /// Number of blobs in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + size: u64, + /// Number of blob bytes in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + bytes_size: u64, +} + +/// Return type used when getting and hydrating a blob. +#[derive(Debug)] +pub struct GetBlobResult { + /// The blob that was retrieved. + pub blob: Blob, + /// The blob's subscriber subscriptions. + pub subscriptions: Subscriptions, + /// The blob subscription. + pub subscription: Subscription, +} + +impl Blobs { + /// Returns a blob collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "blobs")?; + Ok(Self { + root, + expiries: Expiries::new(store)?, + added: Queue::new(store, "added blobs queue")?, + pending: Queue::new(store, "pending blobs queue")?, + size: 0, + bytes_size: 0, + }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// Number of blobs in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Returns the number of blob bytes in the collection. + /// A blob with multiple subscribers and/or subscriptions is only counted once. + pub fn bytes_size(&self) -> u64 { + self.bytes_size + } + + /// Sets subnet bytes capacity. + pub fn set_capacity(&mut self, size: u64) { + self.bytes_size = size; + } + + /// Releases subnet bytes capacity. + pub fn release_capacity(&mut self, size: u64) { + self.bytes_size = self.bytes_size.saturating_sub(size); + + debug!("released {} bytes to subnet", size); + } + + /// Retrieves a blob and subscription information for a given subscriber, blob hash, + /// and subscription ID. + /// + /// This function performs a series of lookups to locate both the requested blob and the + /// specific subscription to that blob for the subscriber: + /// 1. Retrieve the blob using its hash + /// 2. Confirm the subscriber is a valid subscriber to blob + /// 3. Locate the specific subscription by its ID + pub fn get_and_hydrate( + &self, + store: &BS, + subscriber: Address, + hash: B256, + id: &SubscriptionId, + ) -> Result, ActorError> { + let blobs_hamt = self.hamt(store)?; + + // Early return if the blob doesn't exist + let blob = match blobs_hamt.get(&hash)? { + Some(blob) => blob, + None => return Ok(None), + }; + + // Get subscriber's subscriptions + let subscribers_hamt = blob.subscribers.hamt(store)?; + let subscriptions = match subscribers_hamt.get(&subscriber)? { + Some(subscriptions) => subscriptions, + None => { + return Err(ActorError::forbidden(format!( + "subscriber {} is not subscribed to blob {}", + subscriber, hash + ))); + } + }; + + // Get the subscription by ID + let subscriptions_hamt = subscriptions.hamt(store)?; + let subscription = match subscriptions_hamt.get(id)? { + Some(subscription) => subscription, + None => { + return Err(ActorError::not_found(format!( + "subscription id {} not found", + id + ))); + } + }; + + Ok(Some(GetBlobResult { + blob, + subscriptions, + subscription, + })) + } + + /// Creates or updates a blob and subscription, managing all related state changes. + /// + /// This function performs several operations: + /// 1. Check if the blob exists and create it if not + /// 2. Add or update the caller's subscription to blob + /// 3. Update the blob's status to "Added" if it's not already resolved + /// 4. Update the blob source in the "added" queue + /// 5. Update expiry indexes for subscription + /// 6. Save all changes to storage + /// + /// The function handles both the creation of new blobs and updates to existing ones, + /// as well as managing subscriptions, expiries, and status tracking. + pub fn upsert( + &mut self, + store: &BS, + caller: &Caller, + params: &AddBlobStateParams, + expiry: ChainEpoch, + ) -> Result { + let mut blobs = self.hamt(store)?; + let (mut blob, blob_added) = if let Some(blob) = blobs.get(¶ms.hash)? { + (blob, false) + } else { + (Blob::new(store, params.size, params.metadata_hash)?, true) + }; + + // Add/update subscriber and the subscription + let result = blob.subscribers.upsert(store, caller, params, expiry)?; + + // Update blob status and added index if the blob is not already resolved + if !matches!(blob.status, BlobStatus::Resolved) { + // If failed, reset to added state + if matches!(blob.status, BlobStatus::Failed) { + blob.status = BlobStatus::Added; + } + + // Add to or update the source in the added queue + self.added.upsert( + store, + params.hash, + BlobSource::new( + caller.subscriber_address(), + params.id.clone(), + params.source, + ), + blob.size, + )?; + } + + // Update expiry index + let mut expiry_updates = vec![]; + if let Some(previous_expiry) = result.previous_subscription_expiry { + if previous_expiry != expiry { + expiry_updates.push(ExpiryUpdate::Remove(previous_expiry)); + expiry_updates.push(ExpiryUpdate::Add(expiry)); + } + } else { + expiry_updates.push(ExpiryUpdate::Add(expiry)); + } + self.expiries.update( + store, + caller.subscriber_address(), + params.hash, + ¶ms.id, + expiry_updates, + )?; + + self.save_tracked(blobs.set_and_flush_tracked(¶ms.hash, blob)?); + + // Update global state + if blob_added { + self.bytes_size = self.bytes_size.saturating_add(params.size); + + debug!("used {} bytes from subnet", params.size); + debug!("created new blob {}", params.hash); + } else { + debug!("used 0 bytes from subnet"); + } + + Ok(UpsertBlobResult { + subscription: result.subscription, + capacity_used: if result.subscriber_added { + params.size + } else { + 0 + }, + commit_duration: result.commit_duration, + return_duration: result.return_duration, + }) + } + + /// Saves all state changes from a blob retrieval operation. + /// + /// This function updates multiple related data structures after a blob has been retrieved: + /// 1. Update the subscription state in subscriptions collection + /// 2. Update the subscription list for subscriber + /// 3. Update the blob entry in the blobs HAMT + /// + /// This function ensures that all state changes from a blob retrieval operation are + /// saved atomically, maintaining data consistency across the different collections. + pub fn save_result( + &mut self, + store: &BS, + subscriber: Address, + hash: B256, + id: &SubscriptionId, + blob: &mut GetBlobResult, + ) -> Result<(), ActorError> { + blob.subscriptions + .save_subscription(store, id, blob.subscription.clone())?; + + blob.blob + .subscribers + .save_subscriptions(store, subscriber, blob.subscriptions.clone())?; + + let mut blobs = self.hamt(store)?; + self.save_tracked(blobs.set_and_flush_tracked(&hash, blob.blob.clone())?); + + Ok(()) + } + + /// Deletes a subscription to a blob for a specific caller and returns whether the blob was + /// also deleted. + /// + /// This function removes a specific subscription identified by `id` for the given `caller` to + /// the blob identified by `hash`. It performs multiple cleanup operations: + /// 1. Update the expiry index by removing the subscription's expiry entry + /// 2. Remove the blob source from the "added" queue + /// 3. Remove the blob source from the "pending" queue + /// 4. Delete the subscription from the subscriber's subscriptions + /// 5. If the subscriber has no remaining subscriptions to the blob, remove subscriber + /// 6. If no subscribers remain for the blob, delete the blob entirely + pub fn delete_subscription( + &mut self, + store: &BS, + caller: &Caller, + hash: B256, + id: SubscriptionId, + blob_result: &mut GetBlobResult, + ) -> Result { + // Update expiry index + self.expiries.update( + store, + caller.subscriber_address(), + hash, + &id, + vec![ExpiryUpdate::Remove(blob_result.subscription.expiry)], + )?; + + // Remove the source from the added queue + self.added.remove_source( + store, + &hash, + blob_result.blob.size, + BlobSource::new( + caller.subscriber_address(), + id.clone(), + blob_result.subscription.source, + ), + )?; + + // Remove the source from the pending queue + self.pending.remove_source( + store, + &hash, + blob_result.blob.size, + BlobSource::new( + caller.subscriber_address(), + id.clone(), + blob_result.subscription.source, + ), + )?; + + // Delete subscription + let mut subscriptions_hamt = blob_result.subscriptions.hamt(store)?; + blob_result + .subscriptions + .save_tracked(subscriptions_hamt.delete_and_flush_tracked(&id)?.0); + debug!( + "deleted subscription to blob {} for {} (key: {})", + hash, + caller.subscriber_address(), + id + ); + + // Delete the group if empty + let mut blobs_hamt = self.hamt(store)?; + let mut subscribers_hamt = blob_result.blob.subscribers.hamt(store)?; + let blob_deleted = if blob_result.subscriptions.is_empty() { + blob_result.blob.subscribers.save_tracked( + subscribers_hamt + .delete_and_flush_tracked(&caller.subscriber_address())? + .0, + ); + debug!( + "deleted subscriber {} to blob {}", + caller.subscriber_address(), + hash + ); + + // Delete or update blob + let blob_deleted = blob_result.blob.subscribers.is_empty(); + if blob_deleted { + self.save_tracked(blobs_hamt.delete_and_flush_tracked(&hash)?.0); + debug!("deleted blob {}", hash); + } else { + self.save_tracked( + blobs_hamt.set_and_flush_tracked(&hash, blob_result.blob.clone())?, + ); + } + blob_deleted + } else { + blob_result + .blob + .subscribers + .save_tracked(subscribers_hamt.set_and_flush_tracked( + &caller.subscriber_address(), + blob_result.subscriptions.clone(), + )?); + self.save_tracked(blobs_hamt.set_and_flush_tracked(&hash, blob_result.blob.clone())?); + false + }; + + Ok(blob_deleted) + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/expiries.rs b/fendermint/actors/blobs/src/state/blobs/expiries.rs new file mode 100644 index 0000000000..5756e65592 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/expiries.rs @@ -0,0 +1,572 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Display; + +use fendermint_actor_blobs_shared::{blobs::SubscriptionId, bytes::B256}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{tuple::*, RawBytes}; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use ipc_storage_ipld::{ + amt::{self, vec::TrackedFlushResult}, + hamt::{self, MapKey}, +}; +use log::debug; + +/// Key used to namespace subscriptions in the expiry index. +#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ExpiryKey { + /// Key hash. + pub hash: B256, + /// Key subscription ID. + pub id: SubscriptionId, +} + +impl Display for ExpiryKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ExpiryKey(hash: {}, id: {})", self.hash, self.id) + } +} + +impl MapKey for ExpiryKey { + fn from_bytes(b: &[u8]) -> Result { + let raw_bytes = RawBytes::from(b.to_vec()); + fil_actors_runtime::cbor::deserialize(&raw_bytes, "ExpiryKey") + .map_err(|e| format!("Failed to deserialize ExpiryKey {}", e)) + } + + fn to_bytes(&self) -> Result, String> { + let raw_bytes = fil_actors_runtime::cbor::serialize(self, "ExpiryKey") + .map_err(|e| format!("Failed to serialize ExpiryKey {}", e))?; + Ok(raw_bytes.to_vec()) + } +} + +impl ExpiryKey { + /// Create a new expiry key. + pub fn new(hash: B256, id: &SubscriptionId) -> Self { + Self { + hash, + id: id.clone(), + } + } +} + +/// Type used as the root of [`Expiries`]. +type ExpiriesRoot = hamt::Root>; + +/// AMT wrapper for expiry index state. +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct Expiries { + /// The AMT root. + pub root: amt::Root, + /// Index marker for pagination. + /// When present, iteration starts from this index. + /// Otherwise, iteration begins from the first entry. + /// Used for efficient traversal during blob expiration. + next_index: Option, +} + +impl Expiries { + /// Returns a new expiry collection. + pub fn new(store: &BS) -> Result { + let root = amt::Root::::new(store)?; + Ok(Self { + root, + next_index: None, + }) + } + + /// Returns the underlying [`amt::vec::Amt`]. + pub fn amt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.amt(store) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked(&mut self, tracked_flush_result: TrackedFlushResult) { + self.root = tracked_flush_result.root; + } + + /// The size of the collection. + pub fn len(&self, store: BS) -> Result { + Ok(self.root.amt(store)?.count()) + } + + /// Iterates the collection up to the given epoch. + pub fn foreach_up_to_epoch( + &mut self, + store: BS, + epoch: ChainEpoch, + batch_size: Option, + mut f: F, + ) -> Result<(), ActorError> + where + F: FnMut(ChainEpoch, Address, ExpiryKey) -> Result<(), ActorError>, + { + let expiries = self.amt(&store)?; + + debug!( + "walking blobs up to epoch {} (next_index: {:?})", + epoch, self.next_index + ); + + let (_, next_idx) = expiries.for_each_while_ranged( + self.next_index, + batch_size, + |index, per_chain_epoch_root| { + if index > epoch as u64 { + return Ok(false); + } + let per_chain_epoch_hamt = per_chain_epoch_root.hamt(&store, 0)?; + per_chain_epoch_hamt.for_each(|address, per_address_root| { + let per_address_hamt = per_address_root.hamt(&store, 0)?; + per_address_hamt.for_each(|expiry_key, _| f(index as i64, address, expiry_key)) + })?; + Ok(true) + }, + )?; + self.next_index = next_idx.filter(|&idx| idx <= epoch as u64); + + debug!("walked blobs (next_index: {:?})", self.next_index,); + + Ok(()) + } + + /// Updates the collection by applying the list of [`ExpiryUpdate`]s. + pub fn update( + &mut self, + store: BS, + subscriber: Address, + hash: B256, + id: &SubscriptionId, + updates: Vec, + ) -> Result<(), ActorError> { + if updates.is_empty() { + return Ok(()); + } + + let mut expiries = self.amt(&store)?; + for update in updates { + match update { + ExpiryUpdate::Add(chain_epoch) => { + // You cannot do get_or_create here: it expects value, we give it Result> + let per_chain_epoch_root = + if let Some(per_chain_epoch_root) = expiries.get(chain_epoch as u64)? { + per_chain_epoch_root + } else { + hamt::Root::>::new( + &store, + &Expiries::store_name_per_root(chain_epoch), + )? + }; + // The size does not matter + let mut per_chain_epoch_hamt = per_chain_epoch_root.hamt(&store, 1)?; + // You cannot do get_or_create here: it expects value, we give it Result> + let per_address_root = + if let Some(per_address_root) = per_chain_epoch_hamt.get(&subscriber)? { + per_address_root + } else { + hamt::Root::::new( + &store, + &Expiries::store_name_per_address(chain_epoch, &subscriber), + )? + }; + let mut per_address_hamt = per_address_root.hamt(&store, 1)?; // The size does not matter here + let expiry_key = ExpiryKey::new(hash, id); + let per_address_root = per_address_hamt.set_and_flush(&expiry_key, ())?; + let per_chain_epoch_root = + per_chain_epoch_hamt.set_and_flush(&subscriber, per_address_root)?; + self.save_tracked( + expiries.set_and_flush_tracked(chain_epoch as u64, per_chain_epoch_root)?, + ); + } + ExpiryUpdate::Remove(chain_epoch) => { + if let Some(mut per_chain_epoch_root) = expiries.get(chain_epoch as u64)? { + let mut per_chain_epoch_hamt = per_chain_epoch_root.hamt(&store, 1)?; // The size does not matter here + if let Some(mut per_address_root) = per_chain_epoch_hamt.get(&subscriber)? { + let mut per_address_hamt = per_address_root.hamt(&store, 1)?; // The size does not matter here + let expiry_key = ExpiryKey::new(hash, id); + (per_address_root, _) = + per_address_hamt.delete_and_flush(&expiry_key)?; + if per_address_hamt.is_empty() { + (per_chain_epoch_root, _) = + per_chain_epoch_hamt.delete_and_flush(&subscriber)?; + } else { + per_chain_epoch_root = per_chain_epoch_hamt + .set_and_flush(&subscriber, per_address_root)?; + } + } + if per_chain_epoch_hamt.is_empty() { + self.save_tracked( + expiries.delete_and_flush_tracked(chain_epoch as u64)?, + ); + } else { + self.save_tracked( + expiries.set_and_flush_tracked( + chain_epoch as u64, + per_chain_epoch_root, + )?, + ); + } + } + } + } + } + Ok(()) + } + + /// Returns the store display name. + fn store_name() -> String { + "expiries".to_string() + } + + /// Returns the store display name for a root. + fn store_name_per_root(chain_epoch: ChainEpoch) -> String { + format!("{}.{}", Expiries::store_name(), chain_epoch) + } + + /// Returns the store display name for an address. + fn store_name_per_address(chain_epoch: ChainEpoch, address: &Address) -> String { + format!("{}.{}", Expiries::store_name_per_root(chain_epoch), address) + } +} + +/// Helper enum for expiry updates. +pub enum ExpiryUpdate { + /// Entry to add. + Add(ChainEpoch), + /// Entry to remove. + Remove(ChainEpoch), +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_blobs_testing::{new_address, new_hash}; + use fvm_ipld_blockstore::MemoryBlockstore; + + #[test] + fn test_expiries_foreach_up_to_epoch() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + + let addr = new_address(); + let mut hashes = vec![]; + for i in 1..=100 { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(i)], + ) + .unwrap(); + hashes.push(hash); + } + assert_eq!(state.len(&store).unwrap(), 100); + + let mut range = vec![]; + state + .foreach_up_to_epoch(&store, 10, None, |chain_epoch, _, _| { + range.push(chain_epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(range.len(), 10); + + // Remove an element to test against a sparse state + let remove_epoch = 5; + let hash = hashes[remove_epoch - 1]; + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Remove(remove_epoch as ChainEpoch)], + ) + .unwrap(); + assert_eq!(state.len(&store).unwrap(), 99); + + let mut range = vec![]; + state + .foreach_up_to_epoch(&store, 10, None, |chain_epoch, _, _| { + range.push(chain_epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(range.len(), 9); + } + + #[test] + fn test_expiries_pagination() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr = new_address(); + + // Create expiries at epochs 1,2,4,7,8,10 + for i in &[1, 2, 4, 7, 8, 10] { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(*i as ChainEpoch)], + ) + .unwrap(); + } + + // Process with batch size 2 + let mut processed = vec![]; + let mut done = false; + while !done { + state + .foreach_up_to_epoch(&store, 10, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + done = state.next_index.is_none(); + } + + // Should get all epochs in order, despite gaps + assert_eq!(processed, vec![1, 2, 4, 7, 8, 10]); + } + + #[test] + fn test_expiries_pagination_with_mutations() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr = new_address(); + let current_epoch = 100; + + // Initial set: 110,120,130,140,150 + let mut hashes = vec![]; + for ttl in (10..=50).step_by(10) { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(current_epoch + ttl)], + ) + .unwrap(); + hashes.push(hash); + } + + let mut processed = vec![]; + + // Process first batch (110,120) + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(processed, vec![110, 120]); + + // Add new expiry at 135 + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(current_epoch + 35)], + ) + .unwrap(); + + // Remove expiry at 140 + let hash = hashes[3]; + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Remove(current_epoch + 40)], + ) + .unwrap(); + + // Process remaining epochs + while state.next_index.is_some() { + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + } + + // Should get all expiries in order, with 140 removed and 135 added + assert_eq!(processed, vec![110, 120, 130, 135, 150]); + } + + #[test] + fn test_expiries_pagination_with_expiry_update() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr = new_address(); + let current_epoch = 100; + + // Initial set: add blobs with ttl 10,20,30,40,50 + let mut hashes = vec![]; + for ttl in (10..=50).step_by(10) { + let (hash, _) = new_hash(1024); + let expiry = current_epoch + ttl; + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(expiry)], + ) + .unwrap(); + hashes.push(hash); + } + + let mut processed = vec![]; + + // Process the first two expiries (110,120) + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + assert_eq!(processed, vec![110, 120]); + + // Extend the expiry of the blob at 130 to 145 (can only extend, not reduce) + let hash = hashes[2]; // blob with ttl 30 + state + .update( + &store, + addr, + hash, + &SubscriptionId::default(), + vec![ + ExpiryUpdate::Remove(current_epoch + 30), // remove 130 + ExpiryUpdate::Add(current_epoch + 45), // add 145 (extended) + ], + ) + .unwrap(); + + // Process remaining epochs - should see updated expiry + while state.next_index.is_some() { + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, _, _| { + processed.push(epoch); + Ok(()) + }) + .unwrap(); + } + + // Should get all expiries in chronological order, with 130 replaced by 145 + assert_eq!(processed, vec![110, 120, 140, 145, 150]); + } + + #[test] + fn test_expiries_pagination_with_multiple_subscribers() { + let store = MemoryBlockstore::default(); + let mut state = Expiries::new(&store).unwrap(); + let addr1 = new_address(); + let addr2 = new_address(); + + // Add multiple blobs expiring at the same epochs + // addr1: two blobs expiring at 110, one at 120 + // addr2: one blob expiring at 110, two at 130 + let mut entries = vec![]; + + // addr1's blobs + for _ in 0..2 { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr1, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(110)], + ) + .unwrap(); + entries.push((110, addr1, hash)); + } + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr1, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(120)], + ) + .unwrap(); + entries.push((120, addr1, hash)); + + // addr2's blobs + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr2, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(110)], + ) + .unwrap(); + entries.push((110, addr2, hash)); + + for _ in 0..2 { + let (hash, _) = new_hash(1024); + state + .update( + &store, + addr2, + hash, + &SubscriptionId::default(), + vec![ExpiryUpdate::Add(130)], + ) + .unwrap(); + entries.push((130, addr2, hash)); + } + + let mut processed = vec![]; + let mut done = false; + + // Process all entries with batch size 2 + while !done { + state + .foreach_up_to_epoch(&store, 150, Some(2), |epoch, subscriber, key| { + processed.push((epoch, subscriber, key.hash)); + Ok(()) + }) + .unwrap(); + done = state.next_index.is_none(); + } + + // Should get all entries, with multiple entries per epoch + assert_eq!(processed.len(), 6); // Total number of blob expiries + + // Verify we got all entries at epoch 110 + let epoch_110 = processed.iter().filter(|(e, _, _)| *e == 110).count(); + assert_eq!(epoch_110, 3); // 2 from addr1, 1 from addr2 + + // Verify we got all entries at epoch 130 + let epoch_130 = processed.iter().filter(|(e, _, _)| *e == 130).count(); + assert_eq!(epoch_130, 2); // Both from addr2 + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/methods.rs b/fendermint/actors/blobs/src/state/blobs/methods.rs new file mode 100644 index 0000000000..64b5c30823 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/methods.rs @@ -0,0 +1,748 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::error::Error; +use std::str::from_utf8; + +use fendermint_actor_blobs_shared::{ + blobs::{BlobRequest, BlobStatus, Subscription, SubscriptionId}, + bytes::B256, + credit::Credit, +}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{ + address::Address, bigint::BigInt, clock::ChainEpoch, econ::TokenAmount, error::ExitCode, +}; +use ipc_storage_ipld::hamt::BytesKey; +use log::debug; +use num_traits::Zero; + +use super::{ + AddBlobStateParams, Blob, BlobSource, DeleteBlobStateParams, FinalizeBlobStateParams, + SetPendingBlobStateParams, +}; +use crate::{caller::Caller, state::credit::CommitCapacityParams, State}; + +/// Return type for blob queues. +type BlobSourcesResult = Result, ActorError>; + +impl State { + /// Adds or updates a blob subscription. + /// + /// This method handles the entire process of adding a new blob or updating an existing + /// blob subscription, including + /// - Managing subscriber and sponsorship relationships + /// - Handling blob creation or update + /// - Processing subscription groups and expiry tracking + /// - Managing capacity accounting and credit commitments + /// - Updating blob status and indexing + /// + /// Flushes state to the blockstore. + pub fn add_blob( + &mut self, + store: &BS, + config: &IPCStorageConfig, + caller: Address, + sponsor: Option
, + params: AddBlobStateParams, + ) -> Result<(Subscription, TokenAmount), ActorError> { + self.ensure_capacity(config.blob_capacity)?; + + // Get or create a new account + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load_or_create( + store, + &accounts, + caller, + sponsor, + params.epoch, + config.blob_default_ttl, + )?; + + // Validate the TTL + let ttl = caller.validate_ttl_usage(config, params.ttl)?; + let expiry = params.epoch.saturating_add(ttl); + + // Get or create a new blob + let result = self.blobs.upsert(store, &caller, ¶ms, expiry)?; + + // Determine credit commitments + let credit_return = self.get_storage_cost(result.return_duration, ¶ms.size); + if credit_return.is_positive() { + self.return_committed_credit_for_caller(&mut caller, &credit_return); + } + let credit_required = self.get_storage_cost(result.commit_duration, ¶ms.size); + + // Account capacity is changing, debit for existing usage + self.debit_caller(&mut caller, params.epoch); + + // Account for new size and commit credit + let token_rebate = if credit_required.is_positive() { + self.commit_capacity_for_caller( + &mut caller, + config, + CommitCapacityParams { + size: result.capacity_used, + cost: credit_required, + value: params.token_amount, + epoch: params.epoch, + }, + )? + } else if credit_required.is_negative() { + self.release_capacity_for_caller(&mut caller, 0, &-credit_required); + params.token_amount + } else { + params.token_amount + }; + + // Save caller + self.save_caller(&mut caller, &mut accounts)?; + + Ok((result.subscription, token_rebate)) + } + + /// Retuns a [`Blob`] by hash. + pub fn get_blob( + &self, + store: &BS, + hash: B256, + ) -> Result, ActorError> { + let blobs = self.blobs.hamt(store)?; + blobs.get(&hash) + } + + /// Returns [`BlobStatus`] by hash. + pub fn get_blob_status( + &self, + store: &BS, + subscriber: Address, + hash: B256, + id: SubscriptionId, + ) -> Result, ActorError> { + let blob = if let Some(blob) = self + .blobs + .hamt(store) + .ok() + .and_then(|blobs| blobs.get(&hash).ok()) + .flatten() + { + blob + } else { + return Ok(None); + }; + + let subscribers = blob.subscribers.hamt(store)?; + if subscribers.contains_key(&subscriber)? { + match blob.status { + BlobStatus::Added => Ok(Some(BlobStatus::Added)), + BlobStatus::Pending => Ok(Some(BlobStatus::Pending)), + BlobStatus::Resolved => Ok(Some(BlobStatus::Resolved)), + BlobStatus::Failed => { + // The blob state's status may have been finalized as failed by another + // subscription. + // We need to see if this specific subscription failed. + let subscriptions = subscribers.get(&subscriber)?.unwrap(); // safe here + if let Some(sub) = subscriptions.hamt(store)?.get(&id)? { + if sub.failed { + Ok(Some(BlobStatus::Failed)) + } else { + Ok(Some(BlobStatus::Pending)) + } + } else { + Ok(None) + } + } + } + } else { + Ok(None) + } + } + + /// Retrieves a page of newly added blobs that need to be resolved. + /// + /// This method fetches blobs from the "added" queue, which contains blobs that have been + /// added to the system but haven't yet been successfully resolved and stored. + pub fn get_added_blobs(&self, store: &BS, size: u32) -> BlobSourcesResult { + let blobs = self.blobs.hamt(store)?; + self.blobs + .added + .take_page(store, size)? + .into_iter() + .map(|(hash, sources)| { + let blob = blobs + .get(&hash)? + .ok_or_else(|| ActorError::not_found(format!("blob {} not found", hash)))?; + Ok((hash, blob.size, sources)) + }) + .collect() + } + + /// Retrieves a page of blobs that are pending resolve. + /// + /// This method fetches blobs from the "pending" queue, which contains blobs that are + /// actively being resolved but are still in a pending state. + pub fn get_pending_blobs(&self, store: &BS, size: u32) -> BlobSourcesResult { + let blobs = self.blobs.hamt(store)?; + self.blobs + .pending + .take_page(store, size)? + .into_iter() + .map(|(hash, sources)| { + let blob = blobs + .get(&hash)? + .ok_or_else(|| ActorError::not_found(format!("blob {} not found", hash)))?; + Ok((hash, blob.size, sources)) + }) + .collect() + } + + /// Marks a blob as being in the pending resolution state. + /// + /// This method transitions a blob from 'added' to 'pending' state, indicating that its + /// resolution process has started. It updates the blob's status and moves it from the + /// 'added' queue to the 'pending' queue. + /// + /// Flushes state to the blockstore. + pub fn set_blob_pending( + &mut self, + store: &BS, + subscriber: Address, + params: SetPendingBlobStateParams, + ) -> Result<(), ActorError> { + // Get the blob + let mut blob = match self + .blobs + .get_and_hydrate(store, subscriber, params.hash, ¶ms.id) + { + Ok(Some(result)) => result, + Ok(None) => { + // Blob might have been deleted already + // Remove the entire blob entry from the added queue + self.blobs + .added + .remove_entry(store, ¶ms.hash, params.size)?; + return Ok(()); + } + Err(err) + if err.exit_code() == ExitCode::USR_FORBIDDEN + || err.exit_code() == ExitCode::USR_NOT_FOUND => + { + // Blob might not be accessible (forbidden or not found) + // Remove the source from the added queue + self.blobs.added.remove_source( + store, + ¶ms.hash, + params.size, + BlobSource::new(subscriber, params.id.clone(), params.source), + )?; + return Ok(()); + } + Err(err) => return Err(err), + }; + + // Check the current status + match blob.blob.status { + BlobStatus::Resolved => { + // Blob is already finalized as resolved. + // Remove the entire blob entry from the added queue + self.blobs + .added + .remove_entry(store, ¶ms.hash, params.size)?; + return Ok(()); + } + BlobStatus::Failed => { + return Err(ActorError::illegal_state(format!( + "blob {} cannot be set to pending from status failed", + params.hash + ))); + } + _ => {} + } + + // Check if the blob's size matches the size provided when it was added + if blob.blob.size != params.size { + return Err(ActorError::assertion_failed(format!( + "blob {} size mismatch (expected: {}; actual: {})", + params.hash, params.size, blob.blob.size + ))); + } + + // Update status + blob.blob.status = BlobStatus::Pending; + + // Add the source to the pending queue + self.blobs.pending.upsert( + store, + params.hash, + BlobSource::new(subscriber, params.id.clone(), params.source), + params.size, + )?; + + // Remove the source from the added queue + self.blobs.added.remove_source( + store, + ¶ms.hash, + params.size, + BlobSource::new(subscriber, params.id.clone(), params.source), + )?; + + // Save blob + self.blobs + .save_result(store, subscriber, params.hash, ¶ms.id, &mut blob)?; + + debug!("set blob {} to pending", params.hash); + + Ok(()) + } + + /// Finalizes a blob's resolution process with a success or failure status. + /// + /// This method completes the blob resolution process by setting its final status + /// (resolved or failed). For failed blobs, it handles refunding of credits and capacity + /// reclamation as needed. The method also removes the blob from the pending queue. + /// + /// Flushes state to the blockstore. + pub fn finalize_blob( + &mut self, + store: &BS, + subscriber: Address, + params: FinalizeBlobStateParams, + ) -> Result { + // Validate incoming status + if matches!(params.status, BlobStatus::Added | BlobStatus::Pending) { + return Err(ActorError::illegal_state(format!( + "cannot finalize blob {} as added or pending", + params.hash + ))); + } + + // Get the blob + let mut blob = match self + .blobs + .get_and_hydrate(store, subscriber, params.hash, ¶ms.id) + { + Ok(Some(result)) => result, + Ok(None) => { + debug!("blob not found {} (id: {})", params.hash, params.id); + // Blob might have been deleted already + // Remove the entire blob entry from the pending queue + self.blobs + .pending + .remove_entry(store, ¶ms.hash, params.size)?; + return Ok(false); + } + Err(err) + if err.exit_code() == ExitCode::USR_FORBIDDEN + || err.exit_code() == ExitCode::USR_NOT_FOUND => + { + debug!("blob error {} {} (id: {})", params.hash, err, params.id); + // Blob might not be accessible (forbidden or not found) + // Remove the entire blob entry from the pending queue + self.blobs.pending.remove_source( + store, + ¶ms.hash, + params.size, + BlobSource::new(subscriber, params.id.clone(), params.source), + )?; + return Ok(false); + } + Err(err) => return Err(err), + }; + + // Check the current status + if blob.blob.status == BlobStatus::Resolved { + // Blob is already finalized as resolved. + // We can ignore later finalizations, even if they are failed. + // Remove from any queue it might be in + self.blobs + .added + .remove_entry(store, ¶ms.hash, blob.blob.size)?; + self.blobs + .pending + .remove_entry(store, ¶ms.hash, blob.blob.size)?; + return Ok(false); + } + + // Check if the blob's size matches the size provided when it was added + if blob.blob.size != params.size { + return Err(ActorError::assertion_failed(format!( + "blob {} size mismatch (expected: {}; actual: {})", + params.hash, params.size, blob.blob.size + ))); + } + + // Load the caller account and delegation. + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load( + store, + &accounts, + blob.subscription.delegate.unwrap_or(subscriber), + blob.subscription.delegate.map(|_| subscriber), + )?; + + // Update blob status + blob.blob.status = params.status.clone(); + // if matches!(blob.blob.status, BlobStatus::Failed) && !blob.subscription.failed { + // // Mark the subscription as failed + // blob.subscription.failed = true; + + // // We're not going to make a debit, but we need to refund any spent credits that may + // // have been used on this group in the event the last debit is later than the + // // added epoch. + // let (group_expiry, new_group_expiry) = + // blob.subscriptions + // .max_expiries(store, ¶ms.id, Some(0))?; + // let (sub_is_min_added, next_min_added) = + // blob.subscriptions.is_min_added(store, ¶ms.id)?; + // let last_debit_epoch = caller.subscriber().last_debit_epoch; + // if last_debit_epoch > blob.subscription.added && sub_is_min_added { + // // The refund extends up to either the next minimum added epoch that is less + // // than the last debit epoch, or the last debit epoch. + // let refund_end = if let Some(next_min_added) = next_min_added { + // next_min_added.min(blob.subscription.expiry) + // } else { + // last_debit_epoch + // }; + // let refund_credits = self.get_storage_cost( + // refund_end - (blob.subscription.added - blob.subscription.overlap), + // &blob.blob.size, + // ); + // let group_expiry = group_expiry.unwrap(); // safe here + // let correction_credits = if refund_end > group_expiry { + // self.get_storage_cost(refund_end - group_expiry, &blob.blob.size) + // } else { + // Credit::zero() + // }; + // self.refund_caller(&mut caller, &refund_credits, &correction_credits); + // } + + // // Account for reclaimed size and move committed credit to free credit + // self.release_capacity_for_subnet_and_caller( + // &mut caller, + // group_expiry, + // new_group_expiry, + // blob.blob.size, + // blob.blob.subscribers.len(), + // ); + // } + + // Remove the source from both added and pending queues + // (blob may be finalized directly from added status without going through pending) + // Use blob.subscription.source (what was stored) not params.source (what gateway sends) + self.blobs.added.remove_source( + store, + ¶ms.hash, + blob.blob.size, + BlobSource::new(subscriber, params.id.clone(), blob.subscription.source), + )?; + self.blobs.pending.remove_source( + store, + ¶ms.hash, + blob.blob.size, + BlobSource::new(subscriber, params.id.clone(), blob.subscription.source), + )?; + + // Save blob + self.blobs.save_result( + store, + caller.subscriber_address(), + params.hash, + ¶ms.id, + &mut blob, + )?; + + // Save accounts + self.save_caller(&mut caller, &mut accounts)?; + + debug!("finalized blob {} to status {}", params.hash, params.status); + + Ok(true) + } + + /// Deletes a blob subscription or the entire blob if it has no remaining subscriptions. + /// + /// This method handles the process of deleting a blob subscription for a specific caller, + /// which may include: + /// - Removing the caller's subscription from the blob's subscriber list + /// - Refunding unused storage credits to the subscriber + /// - Releasing committed capacity from the subscriber's account + /// - Removing the blob entirely if no subscriptions remain + /// - Cleaning up related queue entries and indexes + /// + /// Flushes state to the blockstore. + pub fn delete_blob( + &mut self, + store: &BS, + caller: Address, + sponsor: Option
, + params: DeleteBlobStateParams, + ) -> Result<(bool, u64, bool), ActorError> { + // Load the caller account and delegation. + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load(store, &accounts, caller, sponsor)?; + caller.validate_delegate_expiration(params.epoch)?; + + // Get the blob + let mut blob = match self.blobs.get_and_hydrate( + store, + caller.subscriber_address(), + params.hash, + ¶ms.id, + )? { + Some(result) => result, + None => { + // We could error here, but since this method is called from other actors, + // they would need to be able to identify this specific case. + // For example, the bucket actor may need to delete a blob while overwriting + // an existing key. + // However, the system may have already deleted the blob due to expiration or + // insufficient funds. + // We could use a custom error code, but this is easier. + return Ok((false, 0, false)); + } + }; + + // Do not allow deletion if the status is added or pending. + // This would cause issues with deletion from disc. + if matches!(blob.blob.status, BlobStatus::Added) + || matches!(blob.blob.status, BlobStatus::Pending) + { + return Err(ActorError::forbidden(format!( + "blob {} pending finalization; please wait", + params.hash + ))); + } + + // Since the charge will be for all the account's blobs, we can only + // account for capacity up to this blob's expiry if it is less than + // the current epoch. + // If the subscription is failed, there may be no group expiry. + let mut return_duration = 0; + if !blob.subscription.failed { + let (group_expiry, new_group_expiry) = + blob.subscriptions + .max_expiries(store, ¶ms.id, Some(0))?; + if let Some(group_expiry) = group_expiry { + let debit_epoch = group_expiry.min(params.epoch); + // Account capacity is changing, debit for existing usage. + // It could be possible that the debit epoch is less than the last debit, + // in which case we need to refund for that duration. + let last_debit_epoch = caller.subscriber().last_debit_epoch; + if last_debit_epoch < debit_epoch { + self.debit_caller(&mut caller, debit_epoch); + } else if last_debit_epoch != debit_epoch && !params.skip_credit_return { + // The account was debited after this blob's expiry + // Return over-debited credit + return_duration = last_debit_epoch - group_expiry; + let return_credits = self.get_storage_cost(return_duration, &blob.blob.size); + self.return_committed_credit_for_caller(&mut caller, &return_credits); + } + } + + // Account for reclaimed size and move committed credit to free credit + self.release_capacity_for_subnet_and_caller( + &mut caller, + group_expiry, + new_group_expiry, + blob.blob.size, + blob.blob.subscribers.len(), + ); + } + + let blob_deleted = self.blobs.delete_subscription( + store, + &caller, + params.hash, + params.id.clone(), + &mut blob, + )?; + + if blob.subscription.failed && blob_deleted { + self.blobs.release_capacity(blob.blob.size); + } + + // Save accounts + self.save_caller(&mut caller, &mut accounts)?; + + Ok((blob_deleted, blob.blob.size, return_duration > 0)) + } + + /// Adjusts all subscriptions for `account` according to its max TTL. + /// + /// Returns the number of subscriptions processed and the next key to continue iteration. + /// If `starting_hash` is `None`, iteration starts from the beginning. + /// If `limit` is `None`, all subscriptions are processed. + /// If `limit` is not `None`, iteration stops after examining `limit` blobs. + /// + /// Flushes state to the blockstore. + pub fn trim_blob_expiries( + &mut self, + config: &IPCStorageConfig, + store: &BS, + subscriber: Address, + current_epoch: ChainEpoch, + starting_hash: Option, + limit: Option, + ) -> Result<(u32, Option, Vec), ActorError> { + let new_ttl = self.get_account_max_ttl(config, store, subscriber)?; + let mut deleted_blobs = Vec::new(); + let mut processed = 0; + let blobs = self.blobs.hamt(store)?; + let starting_key = starting_hash.map(|h| BytesKey::from(h.0.as_slice())); + + fn err_map(e: E) -> ActorError + where + E: Error, + { + ActorError::illegal_state(format!( + "subscriptions group cannot be iterated over: {}", + e + )) + } + + // Walk blobs + let (_, next_key) = blobs.for_each_ranged( + starting_key.as_ref(), + limit.map(|l| l as usize), + |hash, blob| -> Result { + let subscribers = blob.subscribers.hamt(store)?; + if let Some(subscriptions) = subscribers.get(&subscriber)? { + let subscriptions_hamt = subscriptions.hamt(store)?; + for val in subscriptions_hamt.iter() { + let (id_bytes, subscription) = val.map_err(err_map)?; + let id = from_utf8(id_bytes).map_err(err_map)?; + + // Skip expired subscriptions, they will be handled by cron tick + let expired = subscription.expiry <= current_epoch; + if !expired && subscription.expiry - subscription.added > new_ttl { + if new_ttl == 0 { + // Delete subscription + let (from_disc, _, _) = self.delete_blob( + store, + subscriber, + None, + DeleteBlobStateParams { + epoch: current_epoch, + hash, + id: SubscriptionId::new(id)?, + skip_credit_return: false, + }, + )?; + if from_disc { + deleted_blobs.push(hash); + }; + } else { + // Reduce subscription TTL + self.add_blob( + store, + config, + subscriber, + None, + AddBlobStateParams { + hash, + metadata_hash: blob.metadata_hash, + id: SubscriptionId::new(id)?, + size: blob.size, + ttl: Some(new_ttl), + source: subscription.source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + )?; + } + processed += 1; + } + } + } + Ok(true) + }, + )?; + + Ok((processed, next_key, deleted_blobs)) + } + + /// Returns an error if the subnet storage is at capacity. + pub(crate) fn ensure_capacity(&self, capacity: u64) -> Result<(), ActorError> { + if self.capacity_available(capacity).is_zero() { + return Err(ActorError::forbidden( + "subnet has reached storage capacity".into(), + )); + } + Ok(()) + } + + /// Return available capacity as a difference between `blob_capacity_total` and `capacity_used`. + pub(crate) fn capacity_available(&self, blob_capacity_total: u64) -> u64 { + // Prevent underflow. We only care if free capacity is > 0 anyway. + blob_capacity_total.saturating_sub(self.blobs.bytes_size()) + } + + /// Returns the [`Credit`] storage cost for the given duration and size. + pub(crate) fn get_storage_cost(&self, duration: i64, size: &u64) -> Credit { + Credit::from_whole(duration * BigInt::from(*size)) + } + + /// Returns the current [`Credit`] debit amount based on the caller's current capacity used + /// and the given duration. + pub(crate) fn get_debit_for_caller( + &self, + caller: &Caller, + epoch: ChainEpoch, + ) -> Credit { + let debit_duration = epoch.saturating_sub(caller.subscriber().last_debit_epoch); + Credit::from_whole(BigInt::from(caller.subscriber().capacity_used) * debit_duration) + } + + /// Returns an account's current max allowed blob TTL by address. + pub(crate) fn get_account_max_ttl( + &self, + config: &IPCStorageConfig, + store: &BS, + address: Address, + ) -> Result { + let accounts = self.accounts.hamt(store)?; + Ok(accounts + .get(&address)? + .map_or(config.blob_default_ttl, |account| account.max_ttl)) + } + + /// Releases capacity for the subnet and caller. + /// Does NOT flush the state to the blockstore. + fn release_capacity_for_subnet_and_caller( + &mut self, + caller: &mut Caller, + group_expiry: Option, + new_group_expiry: Option, + size: u64, + num_subscribers: u64, + ) { + // If there's no new group expiry, we can reclaim capacity. + let reclaim_capacity = if new_group_expiry.is_none() { size } else { 0 }; + + // Only reclaim subnet capacity if this was the last subscriber + if num_subscribers == 1 { + self.blobs.release_capacity(reclaim_capacity); + } + + // We can release credits if the new group expiry is in the future, + // considering other subscriptions may still be active. + let reclaim_credits = group_expiry + .map(|group_expiry| { + let last_debit_epoch = caller.subscriber().last_debit_epoch; + if last_debit_epoch < group_expiry { + // let reclaim_start = new_group_expiry.unwrap_or(last_debit_epoch); + let reclaim_start = + new_group_expiry.map_or(last_debit_epoch, |e| e.max(last_debit_epoch)); + self.get_storage_cost(group_expiry - reclaim_start, &size) + } else { + Credit::zero() + } + }) + .unwrap_or_default(); + + self.release_capacity_for_caller(caller, reclaim_capacity, &reclaim_credits); + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/params.rs b/fendermint/actors/blobs/src/state/blobs/params.rs new file mode 100644 index 0000000000..5d55fcf87f --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/params.rs @@ -0,0 +1,138 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::{BlobStatus, SubscriptionId}, + bytes::B256, +}; +use fvm_shared::{clock::ChainEpoch, econ::TokenAmount}; + +/// Params for adding a blob. +#[derive(Clone, Debug)] +pub struct AddBlobStateParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for blob recovery. + pub metadata_hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Blob size. + pub size: u64, + /// Blob time-to-live epochs. + /// If not specified, the current default TTL from the config actor is used. + pub ttl: Option, + /// Chain epoch. + pub epoch: ChainEpoch, + /// Token amount sent with the transaction. + pub token_amount: TokenAmount, +} + +impl AddBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::AddBlobParams, + epoch: ChainEpoch, + token_amount: TokenAmount, + ) -> Self { + Self { + source: params.source, + hash: params.hash, + metadata_hash: params.metadata_hash, + id: params.id, + size: params.size, + ttl: params.ttl, + epoch, + token_amount, + } + } +} + +/// Params for deleting a blob. +#[derive(Clone, Debug)] +pub struct DeleteBlobStateParams { + /// Blob blake3 hash. + pub hash: B256, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Chain epoch. + pub epoch: ChainEpoch, + /// Whether to skip returning credit for an over-debit. + /// This is needed to handle cases where multiple subscriptions are being expired in the same + /// epoch for the same subscriber. + pub skip_credit_return: bool, +} + +impl DeleteBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::DeleteBlobParams, + epoch: ChainEpoch, + ) -> Self { + Self { + hash: params.hash, + id: params.id, + epoch, + skip_credit_return: false, + } + } +} + +/// Params for setting a blob to pending state. +#[derive(Clone, Debug)] +pub struct SetPendingBlobStateParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, +} + +impl SetPendingBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::SetBlobPendingParams, + ) -> Self { + Self { + source: params.source, + hash: params.hash, + size: params.size, + id: params.id, + } + } +} + +/// Params for finalizing a blob. +#[derive(Clone, Debug)] +pub struct FinalizeBlobStateParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Blob blake3 hash. + pub hash: B256, + /// Blob size. + pub size: u64, + /// Identifier used to differentiate blob additions for the same subscriber. + pub id: SubscriptionId, + /// Finalized status. + pub status: BlobStatus, + /// Chain epoch. + pub epoch: ChainEpoch, +} + +impl FinalizeBlobStateParams { + pub fn from_actor_params( + params: fendermint_actor_blobs_shared::blobs::FinalizeBlobParams, + epoch: ChainEpoch, + ) -> Self { + Self { + source: params.source, + hash: params.hash, + size: params.size, + id: params.id, + status: params.status, + epoch, + } + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/queue.rs b/fendermint/actors/blobs/src/state/blobs/queue.rs new file mode 100644 index 0000000000..5a39d034e1 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/queue.rs @@ -0,0 +1,210 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashSet; + +use fendermint_actor_blobs_shared::{self as shared, blobs::SubscriptionId, bytes::B256}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{tuple::*, RawBytes}; +use fvm_shared::address::Address; +use ipc_storage_ipld::hamt::{self, map::TrackedFlushResult, MapKey}; + +/// Key used to namespace a blob source set. +#[derive(Clone, Debug, Hash, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct BlobSource { + /// Blob subscriber. + pub subscriber: Address, + /// Subscription ID. + pub id: SubscriptionId, + /// Source Iroh node ID. + pub source: B256, +} + +impl BlobSource { + /// Create a new blob source. + pub fn new(subscriber: Address, id: SubscriptionId, source: B256) -> Self { + Self { + subscriber, + id, + source, + } + } +} + +impl std::fmt::Display for BlobSource { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "BlobSource(subscriber: {}, id: {}, source: {})", + self.subscriber, self.id, self.source + ) + } +} + +impl MapKey for BlobSource { + fn from_bytes(b: &[u8]) -> Result { + let raw_bytes = RawBytes::from(b.to_vec()); + fil_actors_runtime::cbor::deserialize(&raw_bytes, "BlobSource") + .map_err(|e| format!("Failed to deserialize BlobSource {}", e)) + } + + fn to_bytes(&self) -> Result, String> { + let raw_bytes = fil_actors_runtime::cbor::serialize(self, "BlobSource") + .map_err(|e| format!("Failed to serialize BlobSource {}", e))?; + Ok(raw_bytes.to_vec()) + } +} + +/// A set of [`shared::blobs::BlobSource`]s. +/// A blob in the collection may have multiple sources. +type BlobSourceSet = HashSet; + +/// A collection of blobs used for progress queues. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Queue { + /// The HAMT root. + pub root: hamt::Root>, + /// Number of sources in the collection. + size: u64, + /// Number of blob bytes in the collection. + /// A blob with multiple sources is only counted once. + bytes_size: u64, +} + +impl Queue { + /// Returns a new progress collection. + pub fn new(store: &BS, name: &str) -> Result { + let root = hamt::Root::>::new(store, name)?; + Ok(Self { + root, + size: 0, + bytes_size: 0, + }) + } + + /// Returns a store name for the inner root. + fn store_name_per_hash(&self, hash: B256) -> String { + format!("{}.{}", self.root.name(), hash) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result>, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult>, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// Number of sources in the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Returns the number of blob bytes in the collection. + /// A blob with multiple sources is only counted once. + pub fn bytes_size(&self) -> u64 { + self.bytes_size + } + + /// Adds/updates an entry in the collection. + pub fn upsert( + &mut self, + store: BS, + hash: B256, + source: BlobSource, + blob_size: u64, + ) -> Result<(), ActorError> { + let mut collection = self.hamt(&store)?; + let sources_root = if let Some(sources_root) = collection.get(&hash)? { + // Modify the existing entry + let mut sources = sources_root.hamt(&store, 0)?; + sources.set_and_flush(&source, ())? + } else { + // Entry did not exist, add and increase tracked bytes size + let sources_root = + hamt::Root::::new(&store, &self.store_name_per_hash(hash))?; + let mut sources = sources_root.hamt(&store, 0)?; + self.bytes_size = self.bytes_size.saturating_add(blob_size); + sources.set_and_flush(&source, ())? + }; + self.save_tracked(collection.set_and_flush_tracked(&hash, sources_root)?); + Ok(()) + } + + /// Returns a page of entries from the collection. + pub fn take_page( + &self, + store: BS, + size: u32, + ) -> Result, ActorError> { + let collection = self.hamt(&store)?; + let mut page = Vec::with_capacity(size as usize); + collection.for_each_ranged(None, Some(size as usize), |hash, sources_root| { + let sources = sources_root.hamt(&store, 0)?; + let mut set = HashSet::new(); + sources.for_each(|source, _| { + set.insert((source.subscriber, source.id, source.source)); + Ok(()) + })?; + page.push((hash, set)); + Ok(true) + })?; + page.shrink_to_fit(); + Ok(page) + } + + /// Removes a source from an entry in the collection. + /// If the entry is empty after removing the source, the entry is also removed. + pub fn remove_source( + &mut self, + store: BS, + hash: &B256, + size: u64, + source: BlobSource, + ) -> Result<(), ActorError> { + let mut collection = self.hamt(&store)?; + if let Some(mut source_root) = collection.get(hash)? { + let mut sources = source_root.hamt(&store, 1)?; + (source_root, _) = sources.delete_and_flush(&source)?; + if sources.is_empty() { + self.save_tracked(collection.delete_and_flush_tracked(hash)?.0); + self.bytes_size = self.bytes_size.saturating_sub(size); + } else { + self.save_tracked(collection.set_and_flush_tracked(hash, source_root)?); + } + } + Ok(()) + } + + /// Removes an entry from the collection. + pub fn remove_entry( + &mut self, + store: BS, + hash: &B256, + size: u64, + ) -> Result<(), ActorError> { + let mut collection = self.hamt(&store)?; + let (res, deleted) = collection.delete_and_flush_tracked(hash)?; + self.save_tracked(res); + if deleted.is_some() { + self.bytes_size = self.bytes_size.saturating_sub(size); + } + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/subscribers.rs b/fendermint/actors/blobs/src/state/blobs/subscribers.rs new file mode 100644 index 0000000000..b02b5bcbcd --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/subscribers.rs @@ -0,0 +1,142 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::blobs::Subscription; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use ipc_storage_ipld::{hamt, hamt::map::TrackedFlushResult}; + +use super::{AddBlobStateParams, Subscriptions}; +use crate::caller::Caller; + +/// Represents the result of a subscriber upsert. +#[derive(Debug, Clone)] +pub struct UpsertSubscriberResult { + /// New or updated subscription. + pub subscription: Subscription, + /// Whether the subscriber was added or updated. + pub subscriber_added: bool, + /// Previous subscription expiry if the subscription was updated. + pub previous_subscription_expiry: Option, + /// Duration for the new credit commitment. + pub commit_duration: ChainEpoch, + /// Duration for the returned credit commitment. + pub return_duration: ChainEpoch, +} + +/// HAMT wrapper tracking blob [`Subscriptions`]s by subscriber address. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Subscribers { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, +} + +impl Subscribers { + /// Returns a subscriber collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "blob_subscribers")?; + Ok(Self { root, size: 0 }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Creates or updates a subscriber's subscription to a blob, managing all related state + /// changes. + /// + /// This function handles both the creation of new subscribers and updating existing + /// subscribers' subscriptions. It calculates credit commitment and return durations based on + /// the subscription's expiry and the group's maximum expiry. + pub fn upsert( + &mut self, + store: &BS, + caller: &Caller, + params: &AddBlobStateParams, + expiry: ChainEpoch, + ) -> Result { + let mut subscribers = self.hamt(store)?; + let mut subscriptions = + if let Some(subscriptions) = subscribers.get(&caller.subscriber_address())? { + subscriptions + } else { + Subscriptions::new(store)? + }; + + // If the subscriber has been debited after the group's max expiry, we need to + // determine the duration for which credits will be returned. + // The return duration can only extend up to the current epoch. + let (group_expiry, new_group_expiry) = + subscriptions.max_expiries(store, ¶ms.id, Some(expiry))?; + let return_duration = group_expiry + .filter(|&expiry| params.epoch > expiry) + .map_or(0, |expiry| params.epoch - expiry); + + // Determine the duration for which credits will be committed, considering the subscription + // group may have expiries that cover a portion of the added duration. + // Duration can be negative if the subscriber is reducing expiry. + let new_group_expiry = new_group_expiry.unwrap(); // safe here + let commit_start = group_expiry.map_or(params.epoch, |e| e.max(params.epoch)); + let commit_duration = new_group_expiry - commit_start; + let overlap = commit_start - group_expiry.unwrap_or(params.epoch); + + // Add/update subscription + let result = subscriptions.upsert(store, caller, params, overlap, expiry)?; + + self.save_tracked( + subscribers.set_and_flush_tracked(&caller.subscriber_address(), subscriptions)?, + ); + + Ok(UpsertSubscriberResult { + subscription: result.subscription, + subscriber_added: group_expiry.is_none(), + previous_subscription_expiry: result.previous_expiry, + commit_duration, + return_duration, + }) + } + + /// Saves a subscriber's subscriptions to the blockstore. + /// + /// This is a helper function that simplifies the process of saving a subscriber's subscription + /// data by handling the HAMT operations internally. It creates or updates the subscriber entry + /// in the HAMT and saves the changes to the blockstore. + pub fn save_subscriptions( + &mut self, + store: &BS, + subscriber: Address, + subscriptions: Subscriptions, + ) -> Result<(), ActorError> { + let mut subscribers = self.hamt(store)?; + self.save_tracked(subscribers.set_and_flush_tracked(&subscriber, subscriptions)?); + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/subscriptions.rs b/fendermint/actors/blobs/src/state/blobs/subscriptions.rs new file mode 100644 index 0000000000..b39d688072 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/subscriptions.rs @@ -0,0 +1,697 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::str::from_utf8; + +use fendermint_actor_blobs_shared::blobs::{Subscription, SubscriptionId}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::clock::ChainEpoch; +use ipc_storage_ipld::{hamt, hamt::map::TrackedFlushResult}; +use log::debug; + +use super::AddBlobStateParams; +use crate::caller::Caller; + +/// Represents the result of a subscription upsert. +#[derive(Debug, Clone)] +pub struct UpsertSubscriptionResult { + /// New or updated subscription. + pub subscription: Subscription, + /// Previous subscription expiry if the subscription was updated. + pub previous_expiry: Option, +} + +/// HAMT wrapper tracking blob [`Subscription`]s by subscription ID. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Subscriptions { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, +} + +impl Subscriptions { + /// Returns a subscription collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "subscription_group")?; + Ok(Self { root, size: 0 }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Calculates the current maximum expiry and the new maximum expiry after a potential update. + /// + /// This function serves two purposes: + /// 1. It finds the current maximum expiry among all non-failed subscriptions + /// 2. It calculates what the new maximum expiry would be if the subscription with `target_id` + /// had its expiry updated to `new_value` + /// + /// This is particularly useful for determining if group expiry boundaries need to be updated + /// when a single subscription's expiry changes. + pub fn max_expiries( + &self, + store: &BS, + target_id: &SubscriptionId, + new_value: Option, + ) -> Result<(Option, Option), ActorError> { + let mut max = None; + let mut new_max = None; + let subscriptions = self.hamt(store)?; + for val in subscriptions.iter() { + let (id, sub) = deserialize_iter_sub(val)?; + if sub.failed { + continue; + } + if sub.expiry > max.unwrap_or(0) { + max = Some(sub.expiry); + } + let new_value = if &id == target_id { + new_value.unwrap_or_default() + } else { + sub.expiry + }; + if new_value > new_max.unwrap_or(0) { + new_max = Some(new_value); + } + } + // Target ID may not be in the current group + if let Some(new_value) = new_value { + if new_value > new_max.unwrap_or(0) { + new_max = Some(new_value); + } + } + Ok((max, new_max)) + } + + /// Determines if a subscription has the earliest added timestamp and finds the next earliest + /// timestamp. + /// + /// This function checks if the subscription identified by `trim_id` has the earliest "added" + /// timestamp among all active, non-failed subscriptions. It also identifies what would be the + /// new earliest timestamp if this subscription were removed. + /// + /// This is typically used when deciding if a subscription can be safely removed without + /// affecting the overall data retention requirements of the system. + pub fn is_min_added( + &self, + store: &BS, + trim_id: &SubscriptionId, + ) -> Result<(bool, Option), ActorError> { + let subscriptions = self.hamt(store)?; + let trim = subscriptions + .get(trim_id)? + .ok_or(ActorError::not_found(format!( + "subscription id {} not found", + trim_id + )))?; + + let mut next_min = None; + for val in subscriptions.iter() { + let (id, sub) = deserialize_iter_sub(val)?; + if sub.failed || &id == trim_id { + continue; + } + if sub.added < trim.added { + return Ok((false, None)); + } + if sub.added < next_min.unwrap_or(ChainEpoch::MAX) { + next_min = Some(sub.added); + } + } + Ok((true, next_min)) + } + + /// Creates a new subscription or updates an existing one with the provided parameters. + /// + /// This function handles both the creation and update cases for blob subscriptions: + /// - If a subscription with the given ID already exists, it updates its properties + /// - If no subscription exists with the ID, it creates a new one + /// + /// When updating an existing subscription, it preserves the original subscription's + /// added timestamp but updates the expiry, source, delegate, and resets the failed flag. + pub fn upsert( + &mut self, + store: &BS, + caller: &Caller, + params: &AddBlobStateParams, + overlap: ChainEpoch, + expiry: ChainEpoch, + ) -> Result { + let mut subscriptions = self.hamt(store)?; + if let Some(mut subscription) = subscriptions.get(¶ms.id)? { + let previous_expiry = subscription.expiry; + subscription.expiry = expiry; + subscription.source = params.source; // subscriber can retry from a different source + subscription.delegate = caller.delegate_address(); + subscription.failed = false; + + self.save_tracked( + subscriptions.set_and_flush_tracked(¶ms.id, subscription.clone())?, + ); + + debug!( + "updated subscription to blob {} for {} (key: {})", + params.hash, + caller.subscriber_address(), + params.id + ); + + Ok(UpsertSubscriptionResult { + subscription, + previous_expiry: Some(previous_expiry), + }) + } else { + let subscription = Subscription { + added: params.epoch, + overlap, + expiry, + source: params.source, + delegate: caller.delegate_address(), + failed: false, + }; + + self.save_tracked( + subscriptions.set_and_flush_tracked(¶ms.id, subscription.clone())?, + ); + + debug!( + "created new subscription to blob {} for {} (key: {})", + params.hash, + caller.subscriber_address(), + params.id + ); + + Ok(UpsertSubscriptionResult { + subscription, + previous_expiry: None, + }) + } + } + + /// Saves a subscription with the given ID to the blockstore. + /// + /// This is a helper function that simplifies the process of saving a subscription + /// by handling the HAMT operations internally. It creates or updates the subscription + /// in the HAMT and saves the changes to the blockstore. + pub fn save_subscription( + &mut self, + store: &BS, + id: &SubscriptionId, + subscription: Subscription, + ) -> Result<(), ActorError> { + let mut subscriptions = self.hamt(store)?; + self.save_tracked(subscriptions.set_and_flush_tracked(id, subscription)?); + Ok(()) + } +} + +fn deserialize_iter_sub<'a>( + val: Result<(&hamt::BytesKey, &'a Subscription), hamt::Error>, +) -> Result<(SubscriptionId, &'a Subscription), ActorError> { + let (id_bytes, sub) = val.map_err(|e| { + ActorError::illegal_state(format!( + "failed to deserialize subscription from iter: {}", + e + )) + })?; + let id = from_utf8(id_bytes).map_err(|e| { + ActorError::illegal_state(format!( + "failed to deserialize subscription ID from iter: {}", + e + )) + })?; + let subscription_id = SubscriptionId::new(id).map_err(|e| { + ActorError::illegal_state(format!("failed to decode subscription ID from iter: {}", e)) + })?; + Ok((subscription_id, sub)) +} + +#[cfg(test)] +mod tests { + use super::*; + use fendermint_actor_blobs_shared::blobs::{Subscription, SubscriptionId}; + use fendermint_actor_blobs_testing::new_pk; + use fvm_ipld_blockstore::MemoryBlockstore; + use fvm_shared::clock::ChainEpoch; + + fn create_test_subscription( + id: &str, + added: ChainEpoch, + expiry: ChainEpoch, + failed: bool, + ) -> (SubscriptionId, Subscription) { + let subscription_id = SubscriptionId::new(id).unwrap(); + let subscription = Subscription { + added, + overlap: 0, + expiry, + source: new_pk(), + delegate: None, + failed, + }; + (subscription_id, subscription) + } + + #[test] + fn test_max_expiries_empty_group() { + let store = MemoryBlockstore::default(); + let subscriptions = Subscriptions::new(&store).unwrap(); + + let target_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &target_id, Some(100)) + .unwrap(); + + assert_eq!(max, None, "Max expiry should be None for empty group"); + assert_eq!( + new_max, + Some(100), + "New max should be the new value when group is empty" + ); + } + + #[test] + fn test_max_expiries_single_subscription() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add a single subscription + let (id, subscription) = create_test_subscription("test1", 0, 50, false); + subscriptions + .save_subscription(&store, &id, subscription) + .unwrap(); + + // Test with existing ID + let (max, new_max) = subscriptions.max_expiries(&store, &id, Some(100)).unwrap(); + assert_eq!( + max, + Some(50), + "Max should be the existing subscription's expiry" + ); + assert_eq!(new_max, Some(100), "New max should be the new value"); + + // Test with non-existing ID + let non_existing_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &non_existing_id, Some(80)) + .unwrap(); + assert_eq!( + max, + Some(50), + "Max should be the existing subscription's expiry" + ); + assert_eq!( + new_max, + Some(80), + "New max should be the new value for non-existing ID" + ); + } + + #[test] + fn test_max_expiries_multiple_subscriptions() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with different expiries + let (id1, sub1) = create_test_subscription("test1", 0, 50, false); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + let (id3, sub3) = create_test_subscription("test3", 0, 30, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Test updating the middle expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(60)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should still be 70 after update to 60" + ); + + // Test updating to the new highest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(100)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!(new_max, Some(100), "New max should be 100 after update"); + + // Test with non-existing ID + let non_existing_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &non_existing_id, Some(120)) + .unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(120), + "New max should be 120 for non-existing ID" + ); + } + + #[test] + fn test_max_expiries_with_failed_subscriptions() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add a mix of failed and non-failed subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, true); // Failed + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); // Not failed + let (id3, sub3) = create_test_subscription("test3", 0, 90, true); // Failed (highest) + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Failed subscriptions should be ignored in max calculation + let (max, new_max) = subscriptions.max_expiries(&store, &id2, Some(60)).unwrap(); + assert_eq!( + max, + Some(70), + "Max should only consider non-failed subscriptions (70)" + ); + assert_eq!(new_max, Some(60), "New max should be 60 after update"); + + // Test updating a failed subscription + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(100)).unwrap(); + assert_eq!( + max, + Some(70), + "Max should only consider non-failed subscriptions (70)" + ); + assert_eq!( + new_max, + Some(100), + "New max should be 100 after updating a failed subscription" + ); + } + + #[test] + fn test_max_expiries_with_none_new_value() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, false); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Test with None as new_value - should calculate without modifying + let (max, new_max) = subscriptions.max_expiries(&store, &id1, None).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should remain 70 when target expiry is None" + ); + + // Test with target_id that doesn't exist and None as new_value + let non_existing_id = SubscriptionId::new("not-exists").unwrap(); + let (max, new_max) = subscriptions + .max_expiries(&store, &non_existing_id, None) + .unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should remain 70 for non-existing ID with None value" + ); + } + + #[test] + fn test_max_expiries_with_zero_new_value() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, false); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Test with zero as new_value for the highest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id2, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(50), + "New max should be 50 after setting highest to 0" + ); + + // Test with zero as new_value for the lowest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should be the highest expiry (70)" + ); + } + + #[test] + fn test_max_expiries_with_one_zero_new_value() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add subscriptions + let (id1, sub1) = create_test_subscription("test1", 0, 50, true); + let (id2, sub2) = create_test_subscription("test2", 0, 70, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Test with zero as new_value for the highest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id2, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, None, + "New max should be None after setting highest to 0" + ); + + // Test with zero as new_value for the lowest expiry + let (max, new_max) = subscriptions.max_expiries(&store, &id1, Some(0)).unwrap(); + assert_eq!(max, Some(70), "Max should be the highest expiry (70)"); + assert_eq!( + new_max, + Some(70), + "New max should be the highest expiry (70)" + ); + } + + #[test] + fn test_is_min_added_empty_group() { + let store = MemoryBlockstore::default(); + let subscriptions = Subscriptions::new(&store).unwrap(); + + let target_id = SubscriptionId::new("nonexistent").unwrap(); + let result = subscriptions.is_min_added(&store, &target_id); + + // This should return not found error since no subscription exists + assert!(result.is_err()); + + // Verify it's the expected error type + match result { + Err(e) => { + assert!(e.to_string().contains("not found")); + assert!(e.to_string().contains("nonexistent")); + } + _ => panic!("Expected not found error"), + } + } + + #[test] + fn test_is_min_added_single_subscription() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add a single subscription + let (id, subscription) = create_test_subscription("test1", 100, 200, false); + subscriptions + .save_subscription(&store, &id, subscription) + .unwrap(); + + // Check if it's the minimum (it should be since it's the only one) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id).unwrap(); + assert!(is_min, "Single subscription should be minimum"); + assert_eq!(next_min, None, "No next minimum should exist"); + } + + #[test] + fn test_is_min_added_multiple_subscriptions_is_min() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with the first having the earliest added timestamp + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); + let (id3, sub3) = create_test_subscription("test3", 200, 300, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check if id1 is the minimum (it should be) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id1).unwrap(); + assert!( + is_min, + "Subscription with earliest added timestamp should be minimum" + ); + assert_eq!(next_min, Some(150), "Next minimum should be 150 (from id2)"); + } + + #[test] + fn test_is_min_added_multiple_subscriptions_not_min() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with the second one not being the earliest + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); + let (id3, sub3) = create_test_subscription("test3", 200, 300, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check if id2 is the minimum (it shouldn't be) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!( + !is_min, + "Subscription with later added timestamp should not be minimum" + ); + assert_eq!( + next_min, None, + "Next minimum should be None when not the minimum" + ); + } + + #[test] + fn test_is_min_added_equal_timestamps() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with equal earliest timestamps + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 100, 250, false); + let (id3, sub3) = create_test_subscription("test3", 200, 300, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check id1 - both id1 and id2 have the same timestamp + let (is_min, next_min) = subscriptions.is_min_added(&store, &id1).unwrap(); + assert!( + is_min, + "Subscription with equal earliest timestamp should be minimum" + ); + assert_eq!(next_min, Some(100), "Next minimum should be 100 (from id2)"); + + // Check id2 - both id1 and id2 have the same timestamp + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!( + is_min, + "Subscription with equal earliest timestamp should be minimum" + ); + assert_eq!(next_min, Some(100), "Next minimum should be 100 (from id1)"); + } + + #[test] + fn test_is_min_added_with_failed_subscriptions() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions with failed ones having earlier timestamps + let (id1, sub1) = create_test_subscription("test1", 50, 150, true); // Failed (earliest) + let (id2, sub2) = create_test_subscription("test2", 100, 200, false); // Not failed (should be min) + let (id3, sub3) = create_test_subscription("test3", 75, 175, true); // Failed (between id1 and id2) + let (id4, sub4) = create_test_subscription("test4", 150, 250, false); // Not failed (later) + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + subscriptions.save_subscription(&store, &id4, sub4).unwrap(); + + // Check if id2 is the minimum (it should be since failed ones are ignored) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!( + is_min, + "Non-failed subscription with earliest timestamp should be minimum" + ); + assert_eq!(next_min, Some(150), "Next minimum should be 150 (from id4)"); + + // Check a failed subscription + let (is_min, next_min) = subscriptions.is_min_added(&store, &id1).unwrap(); + assert!(is_min, "Failed subscription is checked against itself"); // This is somewhat counterintuitive + assert_eq!(next_min, Some(100), "Next minimum should be 100 (from id2)"); + } + + #[test] + fn test_is_min_added_all_other_subscriptions_are_failed() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add multiple subscriptions where all others are failed + let (id1, sub1) = create_test_subscription("test1", 100, 200, true); // Failed + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); // Only non-failed subscription + let (id3, sub3) = create_test_subscription("test3", 50, 150, true); // Failed, earliest + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + subscriptions.save_subscription(&store, &id3, sub3).unwrap(); + + // Check if id2 is the minimum (it should be since all others are failed) + let (is_min, next_min) = subscriptions.is_min_added(&store, &id2).unwrap(); + assert!(is_min, "Only non-failed subscription should be minimum"); + assert_eq!( + next_min, None, + "No next minimum should exist when all others are failed" + ); + } + + #[test] + fn test_is_min_added_with_nonexistent_id() { + let store = MemoryBlockstore::default(); + let mut subscriptions = Subscriptions::new(&store).unwrap(); + + // Add some subscriptions + let (id1, sub1) = create_test_subscription("test1", 100, 200, false); + let (id2, sub2) = create_test_subscription("test2", 150, 250, false); + subscriptions.save_subscription(&store, &id1, sub1).unwrap(); + subscriptions.save_subscription(&store, &id2, sub2).unwrap(); + + // Check with nonexistent ID + let nonexistent_id = SubscriptionId::new("nonexistent").unwrap(); + let result = subscriptions.is_min_added(&store, &nonexistent_id); + + // Should return a "not found" error + assert!(result.is_err()); + match result { + Err(e) => { + assert!(e.to_string().contains("not found")); + assert!(e.to_string().contains("nonexistent")); + } + _ => panic!("Expected not found error"), + } + } +} diff --git a/fendermint/actors/blobs/src/state/blobs/tests.rs b/fendermint/actors/blobs/src/state/blobs/tests.rs new file mode 100644 index 0000000000..ad4b1bf5f9 --- /dev/null +++ b/fendermint/actors/blobs/src/state/blobs/tests.rs @@ -0,0 +1,1057 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + accounts::AccountStatus, + blobs::{BlobStatus, SubscriptionId}, + credit::Credit, +}; +use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, setup_logs, +}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fvm_ipld_blockstore::{Blockstore, MemoryBlockstore}; +use fvm_shared::{address::Address, bigint::BigInt, clock::ChainEpoch, econ::TokenAmount}; +use num_traits::Zero; + +use super::{ + AddBlobStateParams, DeleteBlobStateParams, FinalizeBlobStateParams, SetPendingBlobStateParams, +}; +use crate::{caller::DelegationOptions, testing::check_approval_used, State}; + +#[test] +fn test_add_blob_refund() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + add_blob_refund( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_add_blob_refund_with_approval() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + add_blob_refund( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn add_blob_refund( + config: &IPCStorageConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let token_credit_rate = BigInt::from(1_000_000_000_000_000_000u64); + let mut credit_amount = token_amount.clone() * &config.token_credit_rate; + + // Add blob with default a subscription ID + let (hash1, size1) = new_hash(1024); + let add1_epoch = current_epoch; + let id1 = SubscriptionId::default(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash1, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size: size1, + ttl: Some(config.blob_min_ttl), + source, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size1); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(config.blob_min_ttl as u64 * size1), + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1); + + assert!(state + .set_account_status( + &store, + config, + subscriber, + AccountStatus::Extended, + current_epoch + ) + .is_ok()); + + // Add another blob past the first blob's expiry + let (hash2, size2) = new_hash(2048); + let add2_epoch = ChainEpoch::from(config.blob_min_ttl + 11); + let id2 = SubscriptionId::new("foo").unwrap(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash2, + metadata_hash: new_metadata_hash(), + id: id2.clone(), + size: size2, + ttl: Some(config.blob_min_ttl), + source, + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 2); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 2); + assert_eq!(stats.bytes_added, size1 + size2); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + let blob1_expiry = ChainEpoch::from(config.blob_min_ttl + add1_epoch); + let overcharge = BigInt::from((add2_epoch - blob1_expiry) as u64 * size1); + assert_eq!( + account.credit_committed, // this includes an overcharge that needs to be refunded + Credit::from_whole(config.blob_min_ttl as u64 * size2 - overcharge), + ); + credit_amount -= Credit::from_whole(config.blob_min_ttl as u64 * size2); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1 + size2); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + (token_amount.clone() * &token_credit_rate) + - (&account.credit_free + &account.credit_committed) + ); + assert_eq!(state.blobs.bytes_size(), account.capacity_used); + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 2); + assert_eq!(state.blobs.added.len(), 2); + assert_eq!(state.blobs.pending.len(), 0); + + // Add the first (now expired) blob again + let add3_epoch = ChainEpoch::from(config.blob_min_ttl + 21); + let id1 = SubscriptionId::default(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash: hash1, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size: size1, + ttl: Some(config.blob_min_ttl), + source, + epoch: add3_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 2); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 2); + assert_eq!(stats.bytes_added, size1 + size2); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add3_epoch); + assert_eq!( + account.credit_committed, // should not include overcharge due to refund + Credit::from_whole( + (config.blob_min_ttl - (add3_epoch - add2_epoch)) as u64 * size2 + + config.blob_min_ttl as u64 * size1 + ), + ); + credit_amount -= Credit::from_whole(config.blob_min_ttl as u64 * size1); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size1 + size2); + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + token_amount.clone() * &token_credit_rate + - (&account.credit_free + &account.credit_committed) + ); + assert_eq!(state.blobs.bytes_size(), account.capacity_used); + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 2); + assert_eq!(state.blobs.added.len(), 2); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_add_blob_same_hash_same_account() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, token_amount.clone(), current_epoch) + .unwrap(); + add_blob_same_hash_same_account( + &config, + &store, + state, + caller, + None, + current_epoch, + token_amount, + false, + ); +} + +#[test] +fn test_add_blob_same_hash_same_account_with_approval() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let sponsor = new_address(); + let current_epoch = ChainEpoch::from(1); + let token_amount = TokenAmount::from_whole(10); + state + .buy_credit( + &store, + &config, + sponsor, + token_amount.clone(), + current_epoch, + ) + .unwrap(); + state + .approve_credit( + &config, + &store, + sponsor, + caller, + DelegationOptions::default(), + current_epoch, + ) + .unwrap(); + add_blob_same_hash_same_account( + &config, + &store, + state, + caller, + Some(sponsor), + current_epoch, + token_amount, + true, + ); +} + +#[allow(clippy::too_many_arguments)] +fn add_blob_same_hash_same_account( + config: &IPCStorageConfig, + store: &BS, + mut state: State, + caller: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + token_amount: TokenAmount, + using_approval: bool, +) { + let subscriber = sponsor.unwrap_or(caller); + let mut credit_amount = + Credit::from_atto(token_amount.atto().clone()) * &config.token_credit_rate; + + assert!(state + .set_account_status( + &store, + config, + subscriber, + AccountStatus::Extended, + current_epoch + ) + .is_ok()); + + // Add a blob with a default subscription ID + let (hash, size) = new_hash(1024); + let add1_epoch = current_epoch; + let id1 = SubscriptionId::default(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add1_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.added, add1_epoch); + assert_eq!(sub.expiry, add1_epoch + config.blob_min_ttl); + assert_eq!(sub.source, source); + assert!(!sub.failed); + if subscriber != caller { + assert_eq!(sub.delegate, Some(caller)); + } + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 1); + assert_eq!(stats.bytes_added, size); + + // Check the blob status + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id1.clone()) + .unwrap(), + Some(BlobStatus::Added) + ); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + assert_eq!(blob.subscribers.len(), 1); + assert_eq!(blob.status, BlobStatus::Added); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 1); + let got_sub = group_hamt.get(&id1.clone()).unwrap().unwrap(); + assert_eq!(got_sub, sub); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add1_epoch); + assert_eq!( + account.credit_committed, + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= &account.credit_committed; + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); + + // Set to status pending + let res = state.set_blob_pending( + &store, + subscriber, + SetPendingBlobStateParams { + hash, + size, + id: id1.clone(), + source, + }, + ); + assert!(res.is_ok()); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 1); + assert_eq!(stats.bytes_resolving, size); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Finalize as resolved + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + subscriber, + FinalizeBlobStateParams { + source, + hash, + size, + id: id1.clone(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id1.clone()) + .unwrap(), + Some(BlobStatus::Resolved) + ); + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Add the same blob again with a default subscription ID + let add2_epoch = ChainEpoch::from(21); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id1.clone(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add2_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.added, add1_epoch); // added should not change + assert_eq!(sub.expiry, add2_epoch + config.blob_min_ttl); + assert_eq!(sub.source, source); + assert!(!sub.failed); + if subscriber != caller { + assert_eq!(sub.delegate, Some(caller)); + } + + // Check the blob status + // Should already be resolved + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id1.clone()) + .unwrap(), + Some(BlobStatus::Resolved) + ); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + assert_eq!(blob.subscribers.len(), 1); + assert_eq!(blob.status, BlobStatus::Resolved); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 1); // Still only one subscription + let got_sub = group_hamt.get(&id1.clone()).unwrap().unwrap(); + assert_eq!(got_sub, sub); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add2_epoch); + assert_eq!( + account.credit_committed, // stays the same becuase we're starting over + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= Credit::from_whole((add2_epoch - add1_epoch) as u64 * size); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); // not changed + + assert_eq!(state.blobs.expiries.len(store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Add the same blob again but use a different subscription ID + let add3_epoch = ChainEpoch::from(31); + let id2 = SubscriptionId::new("foo").unwrap(); + let source = new_pk(); + let res = state.add_blob( + &store, + config, + caller, + sponsor, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: id2.clone(), + size, + ttl: Some(config.blob_min_ttl), + source, + epoch: add3_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.added, add3_epoch); + assert_eq!(sub.expiry, add3_epoch + config.blob_min_ttl); + assert_eq!(sub.source, source); + assert!(!sub.failed); + if subscriber != caller { + assert_eq!(sub.delegate, Some(caller)); + } + + // Check stats + let stats = state.get_stats(config, TokenAmount::zero()); + assert_eq!(stats.num_blobs, 1); + assert_eq!(stats.num_resolving, 0); + assert_eq!(stats.bytes_resolving, 0); + assert_eq!(stats.num_added, 0); + assert_eq!(stats.bytes_added, 0); + + // Check the blob status + // Should already be resolved + assert_eq!( + state + .get_blob_status(&store, subscriber, hash, id2.clone()) + .unwrap(), + Some(BlobStatus::Resolved) + ); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + assert_eq!(blob.subscribers.len(), 1); // still only one subscriber + assert_eq!(blob.status, BlobStatus::Resolved); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 2); + let got_sub = group_hamt.get(&id2.clone()).unwrap().unwrap(); + assert_eq!(got_sub, sub); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, add3_epoch); + assert_eq!( + account.credit_committed, // stays the same becuase we're starting over + Credit::from_whole(config.blob_min_ttl as u64 * size), + ); + credit_amount -= Credit::from_whole((add3_epoch - add2_epoch) as u64 * size); + assert_eq!(account.credit_free, credit_amount); + assert_eq!(account.capacity_used, size); // not changed + + // Debit all accounts + let debit_epoch = ChainEpoch::from(41); + let (deletes_from_disc, _) = state.debit_accounts(&store, config, debit_epoch).unwrap(); + assert!(deletes_from_disc.is_empty()); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, debit_epoch); + assert_eq!( + account.credit_committed, // debit reduces this + Credit::from_whole((config.blob_min_ttl - (debit_epoch - add3_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); // not changed + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 2); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Delete the default subscription ID + let delete_epoch = ChainEpoch::from(51); + let res = state.delete_blob( + &store, + caller, + sponsor, + DeleteBlobStateParams { + hash, + id: id1.clone(), + epoch: delete_epoch, + skip_credit_return: false, + }, + ); + + assert!(res.is_ok()); + let (delete_from_disk, deleted_size, _) = res.unwrap(); + assert!(!delete_from_disk); + assert_eq!(deleted_size, size); + + // Check the blob + let blob = state.get_blob(&store, hash).unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(store).unwrap(); + + assert_eq!(blob.subscribers.len(), 1); // still one subscriber + assert_eq!(blob.status, BlobStatus::Resolved); + assert_eq!(blob.size, size); + + // Check the subscription group + let group = subscribers.get(&subscriber).unwrap().unwrap(); + let group_hamt = group.hamt(store).unwrap(); + assert_eq!(group.len(), 1); + let sub = group_hamt.get(&id2.clone()).unwrap().unwrap(); + assert_eq!(sub.added, add3_epoch); + assert_eq!(sub.expiry, add3_epoch + config.blob_min_ttl); + + // Check the account balance + let account = state.get_account(&store, subscriber).unwrap().unwrap(); + assert_eq!(account.last_debit_epoch, delete_epoch); + assert_eq!( + account.credit_committed, // debit reduces this + Credit::from_whole((config.blob_min_ttl - (delete_epoch - add3_epoch)) as u64 * size), + ); + assert_eq!(account.credit_free, credit_amount); // not changed + assert_eq!(account.capacity_used, size); // not changed + + // Check state + assert_eq!(state.credits.credit_committed, account.credit_committed); + assert_eq!( + state.credits.credit_debited, + (token_amount.clone() * &config.token_credit_rate) + - (&account.credit_free + &account.credit_committed) + ); + assert_eq!(state.blobs.bytes_size(), size); + + // Check indexes + assert_eq!(state.blobs.expiries.len(store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); + + // Check approval + if using_approval { + check_approval_used(&state, store, caller, subscriber); + } +} + +#[test] +fn test_add_blob_ttl_exceeds_account_max_ttl() { + setup_logs(); + + let config = IPCStorageConfig::default(); + const YEAR: ChainEpoch = 365 * 24 * 60 * 60; + + // Test cases structure + struct TestCase { + name: &'static str, + account_ttl_status: AccountStatus, + blob_ttl: Option, + should_succeed: bool, + expected_account_ttl: ChainEpoch, + expected_blob_ttl: ChainEpoch, + } + + // Define test cases + let test_cases = vec![ + TestCase { + name: "Reduced status rejects even minimum TTL", + account_ttl_status: AccountStatus::Reduced, + blob_ttl: Some(config.blob_min_ttl), + should_succeed: false, + expected_account_ttl: 0, + expected_blob_ttl: 0, + }, + TestCase { + name: "Reduced status rejects no TTL", + account_ttl_status: AccountStatus::Reduced, + blob_ttl: Some(config.blob_min_ttl), + should_succeed: false, + expected_account_ttl: 0, + expected_blob_ttl: 0, + }, + TestCase { + name: "Default status allows default TTL", + account_ttl_status: AccountStatus::Default, + blob_ttl: Some(config.blob_default_ttl), + should_succeed: true, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: config.blob_default_ttl, + }, + TestCase { + name: "Default status sets no TTL to default without auto renew", + account_ttl_status: AccountStatus::Default, + blob_ttl: None, + should_succeed: true, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: config.blob_default_ttl, + }, + TestCase { + name: "Default status preserves given TTL if it's less than default", + account_ttl_status: AccountStatus::Default, + blob_ttl: Some(config.blob_default_ttl - 1), + should_succeed: true, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: config.blob_default_ttl - 1, + }, + TestCase { + name: "Default status rejects TTLs higher than default", + account_ttl_status: AccountStatus::Default, + blob_ttl: Some(config.blob_default_ttl + 1), + should_succeed: false, + expected_account_ttl: config.blob_default_ttl, + expected_blob_ttl: 0, + }, + TestCase { + name: "Extended status allows any TTL", + account_ttl_status: AccountStatus::Extended, + blob_ttl: Some(YEAR), + should_succeed: true, + expected_account_ttl: ChainEpoch::MAX, + expected_blob_ttl: YEAR, + }, + ]; + + // Run all test cases + for tc in test_cases { + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + state + .set_account_status( + &store, + &config, + caller, + tc.account_ttl_status, + current_epoch, + ) + .unwrap(); + + let (hash, size) = new_hash(1024); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: tc.blob_ttl, + source: new_pk(), + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + + let account_ttl = state.get_account_max_ttl(&config, &store, caller).unwrap(); + assert_eq!( + account_ttl, tc.expected_account_ttl, + "Test case '{}' has unexpected account TTL (expected {}, got {})", + tc.name, tc.expected_account_ttl, account_ttl + ); + + if tc.should_succeed { + assert!( + res.is_ok(), + "Test case '{}' should succeed but failed: {:?}", + tc.name, + res.err() + ); + + let res = state.get_blob(&store, hash); + assert!(res.is_ok(), "Failed to get blob: {:?}", res.err()); + let blob = res.unwrap().unwrap(); + let subscribers = blob.subscribers.hamt(&store).unwrap(); + subscribers + .for_each(|_, group| { + let group_hamt = group.hamt(&store).unwrap(); + for val in group_hamt.iter() { + let (_, sub) = val.unwrap(); + assert_eq!( + sub.expiry, + current_epoch + tc.expected_blob_ttl, + "Test case '{}' has unexpected blob expiry", + tc.name + ); + } + Ok(()) + }) + .unwrap(); + } else { + assert!( + res.is_err(), + "Test case '{}' should fail but succeeded", + tc.name + ); + assert_eq!( + res.err().unwrap().msg(), + format!( + "attempt to add a blob with TTL ({}) that exceeds account's max allowed TTL ({})", + tc.blob_ttl.map_or_else(|| "none".to_string(), |ttl| ttl.to_string()), tc.account_ttl_status.get_max_ttl(config.blob_default_ttl), + ), + "Test case '{}' failed with unexpected error message", + tc.name + ); + } + } +} + +#[test] +fn test_add_blob_with_overflowing_ttl() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(1000000); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + + let res = state.set_account_status( + &store, + &config, + caller, + AccountStatus::Extended, + current_epoch, + ); + assert!(res.is_ok()); + + let (hash, size) = new_hash(1024); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: Some(ChainEpoch::MAX), + source: new_pk(), + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + let (sub, _) = res.unwrap(); + assert_eq!(sub.expiry, ChainEpoch::MAX); +} + +#[test] +fn test_finalize_blob_from_bad_state() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + + // Add a blob + let (hash, size) = new_hash(1024); + let source = new_pk(); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Finalize as pending + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id: SubscriptionId::default(), + status: BlobStatus::Pending, + epoch: finalize_epoch, + }, + ); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!("cannot finalize blob {} as added or pending", hash) + ); +} + +#[test] +fn test_finalize_blob_resolved() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let caller = new_address(); + let current_epoch = ChainEpoch::from(1); + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, caller, amount.clone(), current_epoch) + .unwrap(); + + // Add a blob + let (hash, size) = new_hash(1024); + let source = new_pk(); + let res = state.add_blob( + &store, + &config, + caller, + None, + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source, + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Set to status pending + let res = state.set_blob_pending( + &store, + caller, + SetPendingBlobStateParams { + hash, + size, + id: SubscriptionId::default(), + source, + }, + ); + assert!(res.is_ok()); + + // Finalize as resolved + let finalize_epoch = ChainEpoch::from(11); + let res = state.finalize_blob( + &store, + caller, + FinalizeBlobStateParams { + source, + hash, + size, + id: SubscriptionId::default(), + status: BlobStatus::Resolved, + epoch: finalize_epoch, + }, + ); + assert!(res.is_ok()); + + // Check status + let status = state + .get_blob_status(&store, caller, hash, SubscriptionId::default()) + .unwrap() + .unwrap(); + assert!(matches!(status, BlobStatus::Resolved)); + + // Check indexes + assert_eq!(state.blobs.expiries.len(&store).unwrap(), 1); + assert_eq!(state.blobs.added.len(), 0); + assert_eq!(state.blobs.pending.len(), 0); +} diff --git a/fendermint/actors/blobs/src/state/credit.rs b/fendermint/actors/blobs/src/state/credit.rs new file mode 100644 index 0000000000..9201a386d6 --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit.rs @@ -0,0 +1,26 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::Credit; +use fvm_ipld_encoding::tuple::*; + +mod approvals; +mod methods; +mod params; +#[cfg(test)] +mod tests; + +pub use approvals::*; +pub use params::*; + +/// Global credit-related state. +#[derive(Debug, Clone, Default, Serialize_tuple, Deserialize_tuple)] +pub struct Credits { + /// The total number of credits sold in the subnet. + pub credit_sold: Credit, + /// The total number of credits committed to active storage in the subnet. + pub credit_committed: Credit, + /// The total number of credits debited in the subnet. + pub credit_debited: Credit, +} diff --git a/fendermint/actors/blobs/src/state/credit/approvals.rs b/fendermint/actors/blobs/src/state/credit/approvals.rs new file mode 100644 index 0000000000..8a777cd538 --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/approvals.rs @@ -0,0 +1,54 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::CreditApproval; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use ipc_storage_ipld::{hamt, hamt::map::TrackedFlushResult}; + +/// HAMT wrapper tracking [`CreditApproval`]s by account address. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Approvals { + /// The HAMT root. + pub root: hamt::Root, + /// The size of the collection. + size: u64, +} + +impl Approvals { + /// Returns a approval collection. + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "credit_approvals")?; + Ok(Self { root, size: 0 }) + } + + /// Returns the underlying [`hamt::map::Hamt`]. + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Saves the state from the [`TrackedFlushResult`]. + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size + } + + /// The size of the collection. + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if the collection is empty. + pub fn is_empty(&self) -> bool { + self.size == 0 + } +} diff --git a/fendermint/actors/blobs/src/state/credit/methods.rs b/fendermint/actors/blobs/src/state/credit/methods.rs new file mode 100644 index 0000000000..e0ce13a8e4 --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/methods.rs @@ -0,0 +1,300 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::{Credit, CreditApproval, GasAllowance}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount, error::ExitCode}; +use ipc_storage_ipld::hamt; + +use super::CommitCapacityParams; +use crate::{ + caller::{Caller, Delegation, DelegationOptions}, + state::accounts::Account, + State, +}; + +/// Returns an error if the amount is negative. +pub fn ensure_positive_amount(amount: &TokenAmount) -> Result<(), ActorError> { + if amount.is_negative() { + return Err(ActorError::illegal_argument( + "amount must be positive".into(), + )); + } + Ok(()) +} + +impl State { + /// Buys credit for an account. + /// Flushes state to the blockstore. + pub fn buy_credit( + &mut self, + store: &BS, + config: &IPCStorageConfig, + to: Address, + value: TokenAmount, + current_epoch: ChainEpoch, + ) -> Result { + self.ensure_capacity(config.blob_capacity)?; + ensure_positive_amount(&value)?; + + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load_or_create( + store, + &accounts, + to, + None, + current_epoch, + config.blob_default_ttl, + )?; + + let amount: Credit = value.clone() * &config.token_credit_rate; + caller.add_allowances(&amount, &value); + + // Update global state + self.credits.credit_sold += &amount; + + // Save caller + self.save_caller(&mut caller, &mut accounts)?; + + Ok(caller.subscriber().clone()) + } + + /// Sets the default credit and gas fee sponsor for an account. + /// Flushes state to the blockstore. + pub fn set_account_sponsor( + &mut self, + config: &IPCStorageConfig, + store: &BS, + from: Address, + sponsor: Option
, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load_or_create( + store, + &accounts, + from, + None, + current_epoch, + config.blob_default_ttl, + )?; + + caller.set_default_sponsor(sponsor); + + // Save caller + self.save_caller(&mut caller, &mut accounts) + } + + /// Updates (adds/removes) gas allowance for an account. + /// Flushes state to the blockstore. + pub fn update_gas_allowance( + &mut self, + store: &BS, + from: Address, + sponsor: Option
, + add_amount: TokenAmount, + current_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load(store, &accounts, from, sponsor)?; + + caller.update_gas_allowance(&add_amount, current_epoch)?; + + // Save caller + self.save_caller(&mut caller, &mut accounts) + } + + /// Approves credit and gas allowance spend from one account to another. + /// Flushes state to the blockstore. + pub fn approve_credit( + &mut self, + config: &IPCStorageConfig, + store: &BS, + from: Address, + to: Address, + options: DelegationOptions, + current_epoch: ChainEpoch, + ) -> Result { + let mut accounts = self.accounts.hamt(store)?; + let mut delegation = Delegation::update_or_create( + store, + config, + &accounts, + from, + to, + options, + current_epoch, + )?; + + // Save delegation + self.save_delegation(&mut delegation, &mut accounts)?; + + Ok(delegation.approval().clone()) + } + + /// Revokes credit and gas allowance spend from one account to another. + /// Flushes state to the blockstore. + pub fn revoke_credit( + &mut self, + store: &BS, + from: Address, + to: Address, + ) -> Result<(), ActorError> { + let mut accounts = self.accounts.hamt(store)?; + let mut caller = Caller::load(store, &accounts, to, Some(from))?; + + caller.cancel_delegation(&mut accounts)?; + + // Save caller + self.save_caller(&mut caller, &mut accounts) + } + + /// Returns a [`CreditApproval`] from the given address to the given address + /// or [`None`] if no approval exists. + pub fn get_credit_approval( + &self, + store: &BS, + from: Address, + to: Address, + ) -> Result, ActorError> { + let accounts = self.accounts.hamt(store)?; + let caller = Caller::load(store, &accounts, to, Some(from))?; + Ok(caller.delegate_approval().cloned()) + } + + /// Returns the gas allowance for the given address, including an amount from a default sponsor. + /// An error returned from this method would be fatal, as it's called from the FVM executor. + pub fn get_gas_allowance( + &self, + store: &BS, + from: Address, + current_epoch: ChainEpoch, + ) -> Result { + let accounts = self.accounts.hamt(store)?; + let allowance = Caller::load_with_default_sponsor(store, &accounts, from) + .map(|caller| caller.gas_allowance(current_epoch)) + .unwrap_or_default(); + Ok(allowance) + } + + /// Debits credit from the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn debit_caller( + &mut self, + caller: &mut Caller, + current_epoch: ChainEpoch, + ) { + let amount = self.get_debit_for_caller(caller, current_epoch); + caller.debit_credit(&amount, current_epoch); + + // Update global state + self.credits.credit_debited += &amount; + self.credits.credit_committed -= &amount; + } + + /// Commits new capacity for the caller. + /// The caller may pay for capacity with free credit or token value. + /// Does NOT flush the state to the blockstore. + pub(crate) fn commit_capacity_for_caller( + &mut self, + caller: &mut Caller, + config: &IPCStorageConfig, + params: CommitCapacityParams, + ) -> Result { + ensure_positive_amount(¶ms.cost)?; + ensure_positive_amount(¶ms.value)?; + + let value_remaining = match caller.commit_capacity(params.size, ¶ms.cost, params.epoch) + { + Ok(()) => Ok(params.value.clone()), + Err(e) => { + // Buy credit to cover the amount + if e.exit_code() == ExitCode::USR_INSUFFICIENT_FUNDS && !params.value.is_zero() { + if caller.is_delegate() { + return Err(ActorError::forbidden( + "cannot auto-buy credits for a sponsor".into(), + )); + } + + let remainder: Credit = ¶ms.cost - &caller.subscriber().credit_free; + let value_required = &remainder / &config.token_credit_rate; + let value_remaining = ¶ms.value - &value_required; + if value_remaining.is_negative() { + return Err(ActorError::insufficient_funds(format!( + "insufficient value (received: {}; required: {})", + params.value, value_required + ))); + } + caller.add_allowances(&remainder, &value_required); + + // Update global state + self.credits.credit_sold += &remainder; + + // Try again + caller.commit_capacity(params.size, ¶ms.cost, params.epoch)?; + Ok(value_remaining) + } else { + Err(e) + } + } + }?; + + // Update global state + self.credits.credit_committed += ¶ms.cost; + + Ok(value_remaining) + } + + /// Releases capacity for the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn release_capacity_for_caller( + &mut self, + caller: &mut Caller, + size: u64, + cost: &Credit, + ) { + caller.release_capacity(size, cost); + + // Update global state + self.credits.credit_committed -= cost; + } + + /// Returns committed credit to the caller. + /// Does NOT flush the state to the blockstore. + pub(crate) fn return_committed_credit_for_caller( + &mut self, + caller: &mut Caller, + amount: &Credit, + ) { + caller.return_committed_credit(amount); + + // Update global state + self.credits.credit_debited -= amount; + self.credits.credit_committed += amount; + } + + /// Save the caller state to the accounts HAMT. + pub(crate) fn save_caller<'a, BS: Blockstore>( + &mut self, + caller: &mut Caller<'a, BS>, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + caller.save(accounts)?; + self.accounts.save_tracked(accounts.flush_tracked()?); + Ok(()) + } + + /// Save the delegation state to the accounts HAMT. + pub(crate) fn save_delegation<'a, BS: Blockstore>( + &mut self, + delegation: &mut Delegation<'a, &'a BS>, + accounts: &mut hamt::map::Hamt<'a, &'a BS, Address, Account>, + ) -> Result<(), ActorError> { + delegation.save(accounts)?; + self.accounts.save_tracked(accounts.flush_tracked()?); + Ok(()) + } +} diff --git a/fendermint/actors/blobs/src/state/credit/params.rs b/fendermint/actors/blobs/src/state/credit/params.rs new file mode 100644 index 0000000000..a38d0647ee --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/params.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::Credit; +use fvm_shared::{clock::ChainEpoch, econ::TokenAmount}; + +/// Params for committing capacity. +#[derive(Debug)] +pub struct CommitCapacityParams { + /// Commitment size for caller. + pub size: u64, + /// Commitment cost. + pub cost: Credit, + /// Token amount available to commitment. + pub value: TokenAmount, + /// Commitment chain epoch. + pub epoch: ChainEpoch, +} diff --git a/fendermint/actors/blobs/src/state/credit/tests.rs b/fendermint/actors/blobs/src/state/credit/tests.rs new file mode 100644 index 0000000000..696f944595 --- /dev/null +++ b/fendermint/actors/blobs/src/state/credit/tests.rs @@ -0,0 +1,377 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{ + blobs::SubscriptionId, + credit::{Credit, CreditApproval}, +}; +use fendermint_actor_blobs_testing::{ + new_address, new_hash, new_metadata_hash, new_pk, setup_logs, +}; +use fendermint_actor_ipc_storage_config_shared::IPCStorageConfig; +use fvm_ipld_blockstore::MemoryBlockstore; +use fvm_shared::{address::Address, clock::ChainEpoch, econ::TokenAmount}; +use num_traits::Zero; + +use crate::{caller::DelegationOptions, state::blobs::AddBlobStateParams, State}; + +fn check_approvals_match( + state: &State, + store: &MemoryBlockstore, + from: Address, + to: Address, + expected: CreditApproval, +) { + let from_account = state.get_account(&store, from).unwrap().unwrap(); + assert_eq!( + from_account + .approvals_to + .hamt(store) + .unwrap() + .get(&to) + .unwrap() + .unwrap(), + expected + ); + let to_account = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!( + to_account + .approvals_from + .hamt(store) + .unwrap() + .get(&from) + .unwrap() + .unwrap(), + expected + ); +} + +#[test] +fn test_buy_credit_success() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let to = new_address(); + let amount = TokenAmount::from_whole(1); + + let res = state.buy_credit(&store, &config, to, amount.clone(), 1); + assert!(res.is_ok()); + let account = res.unwrap(); + let credit_sold = amount.clone() * &config.token_credit_rate; + assert_eq!(account.credit_free, credit_sold); + assert_eq!(account.gas_allowance, amount); + assert_eq!(state.credits.credit_sold, credit_sold); + let account_back = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!(account, account_back); +} + +#[test] +fn test_buy_credit_negative_amount() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let to = new_address(); + let amount = TokenAmount::from_whole(-1); + + let res = state.buy_credit(&store, &config, to, amount, 1); + assert!(res.is_err()); + assert_eq!(res.err().unwrap().msg(), "amount must be positive"); +} + +#[test] +fn test_buy_credit_at_capacity() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let to = new_address(); + let amount = TokenAmount::from_whole(1); + + state.blobs.set_capacity(config.blob_capacity); + let res = state.buy_credit(&store, &config, to, amount, 1); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + "subnet has reached storage capacity" + ); +} + +#[test] +fn test_approve_credit_success() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = IPCStorageConfig::default(); + + // No limit or expiry + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions::default(), + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, None); + assert_eq!(approval.gas_allowance_limit, None); + assert_eq!(approval.expiry, None); + check_approvals_match(&state, &store, from, to, approval); + + // Add credit limit + let limit = 1_000_000_000_000_000_000u64; + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + credit_limit: Some(Credit::from_whole(limit)), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, Some(Credit::from_whole(limit))); + assert_eq!(approval.gas_allowance_limit, None); + assert_eq!(approval.expiry, None); + check_approvals_match(&state, &store, from, to, approval); + + // Add gas fee limit + let limit = 1_000_000_000_000_000_000u64; + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + gas_fee_limit: Some(TokenAmount::from_atto(limit)), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, None); + assert_eq!( + approval.gas_allowance_limit, + Some(TokenAmount::from_atto(limit)) + ); + assert_eq!(approval.expiry, None); + check_approvals_match(&state, &store, from, to, approval); + + // Add ttl + let ttl = ChainEpoch::from(config.blob_min_ttl); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + credit_limit: Some(Credit::from_whole(limit)), + ttl: Some(ttl), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.credit_limit, Some(Credit::from_whole(limit))); + assert_eq!(approval.gas_allowance_limit, None); + assert_eq!(approval.expiry, Some(ttl + current_epoch)); + check_approvals_match(&state, &store, from, to, approval); +} + +#[test] +fn test_approve_credit_invalid_ttl() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = IPCStorageConfig::default(); + let ttl = ChainEpoch::from(config.blob_min_ttl - 1); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + ttl: Some(ttl), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!("minimum approval TTL is {}", config.blob_min_ttl) + ); +} + +#[test] +fn test_approve_credit_overflowing_ttl() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = IPCStorageConfig::default(); + + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + ttl: Some(ChainEpoch::MAX), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_ok()); + let approval = res.unwrap(); + assert_eq!(approval.expiry, Some(i64::MAX)); +} + +#[test] +fn test_approve_credit_insufficient_credit() { + setup_logs(); + let config = IPCStorageConfig::default(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let amount = TokenAmount::from_whole(10); + state + .buy_credit(&store, &config, from, amount.clone(), current_epoch) + .unwrap(); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions::default(), + current_epoch, + ); + assert!(res.is_ok()); + + // Add a blob + let (hash, size) = new_hash(1024); + let res = state.add_blob( + &store, + &config, + to, + Some(from), + AddBlobStateParams { + hash, + metadata_hash: new_metadata_hash(), + id: SubscriptionId::default(), + size, + ttl: None, + source: new_pk(), + epoch: current_epoch, + token_amount: TokenAmount::zero(), + }, + ); + assert!(res.is_ok()); + + // Check approval + let account = state.get_account(&store, from).unwrap().unwrap(); + let approval = account + .approvals_to + .hamt(&store) + .unwrap() + .get(&to) + .unwrap() + .unwrap(); + assert_eq!(account.credit_committed, approval.credit_used); + + // Try to update approval with a limit below what's already been committed + let limit = 1_000u64; + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions { + credit_limit: Some(Credit::from_whole(limit)), + ..Default::default() + }, + current_epoch, + ); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!( + "limit cannot be less than amount of already used credits ({})", + approval.credit_used + ) + ); +} + +#[test] +fn test_revoke_credit_success() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + let current_epoch = 1; + + let config = IPCStorageConfig::default(); + let res = state.approve_credit( + &config, + &store, + from, + to, + DelegationOptions::default(), + current_epoch, + ); + assert!(res.is_ok()); + + // Check the account approvals + let from_account = state.get_account(&store, from).unwrap().unwrap(); + assert_eq!(from_account.approvals_to.len(), 1); + let to_account = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!(to_account.approvals_from.len(), 1); + + // Remove the approval + let res = state.revoke_credit(&store, from, to); + assert!(res.is_ok()); + let from_account = state.get_account(&store, from).unwrap().unwrap(); + assert_eq!(from_account.approvals_to.len(), 0); + let to_account = state.get_account(&store, to).unwrap().unwrap(); + assert_eq!(to_account.approvals_from.len(), 0); +} + +#[test] +fn test_revoke_credit_account_not_found() { + setup_logs(); + let store = MemoryBlockstore::default(); + let mut state = State::new(&store).unwrap(); + let from = new_address(); + let to = new_address(); + + let res = state.revoke_credit(&store, from, to); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().msg(), + format!("{} not found in accounts", to) + ); +} diff --git a/fendermint/actors/blobs/src/state/operators.rs b/fendermint/actors/blobs/src/state/operators.rs new file mode 100644 index 0000000000..a7f8d962fc --- /dev/null +++ b/fendermint/actors/blobs/src/state/operators.rs @@ -0,0 +1,401 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::fvm_ipld_hamt::{BytesKey, Config, Hamt, Sha256}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::Address; +use fvm_shared::clock::ChainEpoch; +use ipc_storage_ipld::hamt::{self, map::TrackedFlushResult}; + +pub use cid::Cid; + +/// Default HAMT configuration for pubkey mapping +const PUBKEY_HAMT_CONFIG: Config = Config { + bit_width: 5, + min_data_depth: 0, + max_array_width: 3, +}; + +/// Information about a registered node operator +#[derive(Clone, Debug, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct NodeOperatorInfo { + /// BLS public key (48 bytes) + pub bls_pubkey: Vec, + + /// RPC URL for gateway to query signatures + pub rpc_url: String, + + /// Epoch when operator registered + pub registered_epoch: ChainEpoch, + + /// Whether operator is active + pub active: bool, +} + +/// Registry of node operators +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Operators { + /// HAMT root: Address → NodeOperatorInfo + pub root: hamt::Root, + + /// HAMT root CID: BLS public key (BytesKey) → Address + /// Used for fast uniqueness check during registration + /// Uses fvm_ipld_hamt directly to avoid Display constraint + pub pubkey_to_addr: Cid, + + /// Ordered list of active operator addresses + /// Index in this vec = bit position in bitmap for signature aggregation + pub active_list: Vec
, + + /// Total number of registered operators + size: u64, + + /// Total number of entries in pubkey_to_addr HAMT + pubkey_size: u64, +} + +impl Operators { + /// Creates a new empty [`Operators`] registry + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "operators")?; + // Create empty pubkey HAMT using fvm_ipld_hamt directly with explicit config + let mut pubkey_hamt: Hamt<&BS, Address, BytesKey, Sha256> = + Hamt::new_with_config(store, PUBKEY_HAMT_CONFIG); + let pubkey_to_addr = pubkey_hamt.flush().map_err(|e| { + ActorError::illegal_state(format!("failed to flush pubkey HAMT: {}", e)) + })?; + Ok(Self { + root, + pubkey_to_addr, + active_list: Vec::new(), + size: 0, + pubkey_size: 0, + }) + } + + /// Returns the underlying [`hamt::map::Hamt`] for operators + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + /// Returns the underlying fvm_ipld_hamt for pubkey → address mapping + pub fn pubkey_hamt( + &self, + store: BS, + ) -> Result, ActorError> { + Hamt::load_with_config(&self.pubkey_to_addr, store, PUBKEY_HAMT_CONFIG) + .map_err(|e| ActorError::illegal_state(format!("failed to load pubkey HAMT: {}", e))) + } + + /// Saves the state from the [`TrackedFlushResult`] for operators + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size; + } + + /// Saves the pubkey HAMT root CID and updates size + pub fn save_pubkey(&mut self, cid: Cid, size_delta: i64) { + self.pubkey_to_addr = cid; + self.pubkey_size = (self.pubkey_size as i64 + size_delta) as u64; + } + + /// Returns the number of registered operators + pub fn len(&self) -> u64 { + self.size + } + + /// Returns true if there are no registered operators + pub fn is_empty(&self) -> bool { + self.size == 0 + } + + /// Register a new operator (adds to end of active_list) + /// Returns the operator's index in the active_list + pub fn register( + &mut self, + store: BS, + address: Address, + info: NodeOperatorInfo, + ) -> Result { + let mut hamt = self.hamt(store.clone())?; + + // Check if operator already exists + if hamt.get(&address)?.is_some() { + return Err(ActorError::illegal_argument( + "Operator already registered".into(), + )); + } + + // Check if BLS public key is already registered (O(log n) lookup) + let mut pubkey_hamt = self.pubkey_hamt(store)?; + let pubkey_key = BytesKey::from(info.bls_pubkey.clone()); + if pubkey_hamt + .get(&pubkey_key) + .map_err(|e| ActorError::illegal_state(format!("failed to get pubkey: {}", e)))? + .is_some() + { + return Err(ActorError::illegal_argument( + "BLS public key already registered by another operator".into(), + )); + } + + // Add pubkey → address mapping + pubkey_hamt + .set(pubkey_key, address) + .map_err(|e| ActorError::illegal_state(format!("failed to set pubkey: {}", e)))?; + let pubkey_cid = pubkey_hamt.flush().map_err(|e| { + ActorError::illegal_state(format!("failed to flush pubkey HAMT: {}", e)) + })?; + self.save_pubkey(pubkey_cid, 1); + + // Add to operator HAMT + self.save_tracked(hamt.set_and_flush_tracked(&address, info)?); + + // Add to active list (gets next available index) + let index = self.active_list.len(); + self.active_list.push(address); + + Ok(index) + } + + /// Get operator info by address + pub fn get( + &self, + store: BS, + address: &Address, + ) -> Result, ActorError> { + self.hamt(store)?.get(address) + } + + /// Get operator index in active_list (for bitmap generation) + /// Returns None if operator is not in the active list + pub fn get_index(&self, address: &Address) -> Option { + self.active_list.iter().position(|a| a == address) + } + + /// Get all active operators in order + pub fn get_active_operators(&self) -> Vec
{ + self.active_list.clone() + } + + /// Update operator info (e.g., to change RPC URL or deactivate) + pub fn update( + &mut self, + store: BS, + address: &Address, + info: NodeOperatorInfo, + ) -> Result<(), ActorError> { + let mut hamt = self.hamt(store)?; + + // Check if operator exists + if hamt.get(address)?.is_none() { + return Err(ActorError::not_found("Operator not found".into())); + } + + // Update in HAMT + self.save_tracked(hamt.set_and_flush_tracked(address, info)?); + + Ok(()) + } + + /// Deactivate an operator (removes from active_list but keeps in HAMT) + /// Note: This will change indices of all operators after the removed one + pub fn deactivate( + &mut self, + store: BS, + address: &Address, + ) -> Result<(), ActorError> { + let mut hamt = self.hamt(store.clone())?; + + // Get existing info + let mut info = hamt + .get(address)? + .ok_or_else(|| ActorError::not_found("Operator not found".into()))?; + + // Remove pubkey → address mapping to allow re-registration with same pubkey + let mut pubkey_hamt = self.pubkey_hamt(store)?; + let pubkey_key = BytesKey::from(info.bls_pubkey.clone()); + pubkey_hamt + .delete(&pubkey_key) + .map_err(|e| ActorError::illegal_state(format!("failed to delete pubkey: {}", e)))?; + let pubkey_cid = pubkey_hamt.flush().map_err(|e| { + ActorError::illegal_state(format!("failed to flush pubkey HAMT: {}", e)) + })?; + self.save_pubkey(pubkey_cid, -1); + + // Mark as inactive + info.active = false; + self.save_tracked(hamt.set_and_flush_tracked(address, info)?); + + // Remove from active_list + if let Some(pos) = self.active_list.iter().position(|a| a == address) { + self.active_list.remove(pos); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fvm_ipld_blockstore::MemoryBlockstore; + + fn new_test_address(id: u64) -> Address { + Address::new_id(id) + } + + fn new_test_operator(pubkey: u8) -> NodeOperatorInfo { + NodeOperatorInfo { + bls_pubkey: vec![pubkey; 48], + rpc_url: format!("http://operator{}.example.com:8080", pubkey), + registered_epoch: 0, + active: true, + } + } + + #[test] + fn test_register_operator() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let info1 = new_test_operator(1); + + let index = operators.register(&store, addr1, info1.clone()).unwrap(); + assert_eq!(index, 0); + assert_eq!(operators.len(), 1); + + let retrieved = operators.get(&store, &addr1).unwrap().unwrap(); + assert_eq!(retrieved, info1); + } + + #[test] + fn test_active_list_ordering() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + let addr3 = new_test_address(102); + + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + operators + .register(&store, addr2, new_test_operator(2)) + .unwrap(); + operators + .register(&store, addr3, new_test_operator(3)) + .unwrap(); + + assert_eq!(operators.get_index(&addr1), Some(0)); + assert_eq!(operators.get_index(&addr2), Some(1)); + assert_eq!(operators.get_index(&addr3), Some(2)); + + let active = operators.get_active_operators(); + assert_eq!(active, vec![addr1, addr2, addr3]); + } + + #[test] + fn test_duplicate_registration() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + + let result = operators.register(&store, addr1, new_test_operator(2)); + assert!(result.is_err()); + } + + #[test] + fn test_duplicate_pubkey_registration() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + + // Register first operator with pubkey 1 + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + + // Try to register second operator with same pubkey - should fail + let result = operators.register(&store, addr2, new_test_operator(1)); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .msg() + .contains("BLS public key already registered")); + } + + #[test] + fn test_pubkey_reuse_after_deactivation() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + + // Register first operator with pubkey 1 + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + + // Deactivate operator 1 + operators.deactivate(&store, &addr1).unwrap(); + + // Now registering with same pubkey from different address should succeed + let result = operators.register(&store, addr2, new_test_operator(1)); + assert!(result.is_ok()); + } + + #[test] + fn test_deactivate_operator() { + let store = MemoryBlockstore::default(); + let mut operators = Operators::new(&store).unwrap(); + + let addr1 = new_test_address(100); + let addr2 = new_test_address(101); + let addr3 = new_test_address(102); + + operators + .register(&store, addr1, new_test_operator(1)) + .unwrap(); + operators + .register(&store, addr2, new_test_operator(2)) + .unwrap(); + operators + .register(&store, addr3, new_test_operator(3)) + .unwrap(); + + // Deactivate middle operator + operators.deactivate(&store, &addr2).unwrap(); + + // Check active list updated + let active = operators.get_active_operators(); + assert_eq!(active, vec![addr1, addr3]); + + // Check indices shifted + assert_eq!(operators.get_index(&addr1), Some(0)); + assert_eq!(operators.get_index(&addr2), None); + assert_eq!(operators.get_index(&addr3), Some(1)); + + // Check still in HAMT but marked inactive + let info = operators.get(&store, &addr2).unwrap().unwrap(); + assert!(!info.active); + } +} diff --git a/fendermint/actors/blobs/src/testing.rs b/fendermint/actors/blobs/src/testing.rs new file mode 100644 index 0000000000..21df45bbef --- /dev/null +++ b/fendermint/actors/blobs/src/testing.rs @@ -0,0 +1,41 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use crate::State; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::address::Address; + +pub fn check_approval_used( + state: &State, + store: &BS, + caller: Address, + sponsor: Address, +) { + assert_ne!(caller, sponsor); + let subscriber_account = state.get_account(&store, sponsor).unwrap().unwrap(); + let subscriber_approval = subscriber_account + .approvals_to + .hamt(store) + .unwrap() + .get(&caller) + .unwrap() + .unwrap(); + assert_eq!( + subscriber_approval.credit_used, + state.credits.credit_debited.clone() + subscriber_account.credit_committed.clone() + ); + let origin_account = state.get_account(&store, caller).unwrap().unwrap(); + let origin_approval = origin_account + .approvals_from + .hamt(store) + .unwrap() + .get(&sponsor) + .unwrap() + .unwrap(); + assert_eq!( + subscriber_approval.credit_used, + &state.credits.credit_debited + &subscriber_account.credit_committed + ); + assert_eq!(subscriber_approval.credit_used, origin_approval.credit_used); +} diff --git a/fendermint/actors/blobs/testing/Cargo.toml b/fendermint/actors/blobs/testing/Cargo.toml new file mode 100644 index 0000000000..9c2ef0dbd3 --- /dev/null +++ b/fendermint/actors/blobs/testing/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "fendermint_actor_blobs_testing" +description = "Test utils for blobs" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +fvm_shared = { workspace = true } +iroh-blobs = { workspace = true } +rand = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } + +fendermint_actor_blobs_shared = { path = "../shared" } diff --git a/fendermint/actors/blobs/testing/src/lib.rs b/fendermint/actors/blobs/testing/src/lib.rs new file mode 100644 index 0000000000..a9cc46ea1e --- /dev/null +++ b/fendermint/actors/blobs/testing/src/lib.rs @@ -0,0 +1,66 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::{blobs::SubscriptionId, bytes::B256}; +use fvm_shared::address::Address; +use rand::{distributions::Alphanumeric, Rng, RngCore}; + +pub fn setup_logs() { + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::util::SubscriberInitExt; + use tracing_subscriber::EnvFilter; + tracing_subscriber::registry() + .with( + tracing_subscriber::fmt::layer() + .event_format(tracing_subscriber::fmt::format().with_line_number(true)) + .with_writer(std::io::stdout), + ) + .with(EnvFilter::from_default_env()) + .try_init() + .ok(); +} + +pub fn new_hash(size: usize) -> (B256, u64) { + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; size]; + rng.fill_bytes(&mut data); + (B256(*iroh_blobs::Hash::new(&data).as_bytes()), size as u64) +} + +pub fn new_hash_from_vec(buf: Vec) -> (B256, u64) { + ( + B256(*iroh_blobs::Hash::new(&buf).as_bytes()), + buf.len() as u64, + ) +} + +pub fn new_metadata_hash() -> B256 { + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; 8]; + rng.fill_bytes(&mut data); + B256(*iroh_blobs::Hash::new(&data).as_bytes()) +} + +pub fn new_pk() -> B256 { + let mut rng = rand::thread_rng(); + let mut data = [0u8; 32]; + rng.fill_bytes(&mut data); + B256(data) +} + +pub fn new_address() -> Address { + let mut rng = rand::thread_rng(); + let mut data = vec![0u8; 32]; + rng.fill_bytes(&mut data); + Address::new_actor(&data) +} + +pub fn new_subscription_id(length: usize) -> SubscriptionId { + let str: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect(); + SubscriptionId::try_from(str).unwrap() +} diff --git a/fendermint/actors/bucket/Cargo.toml b/fendermint/actors/bucket/Cargo.toml new file mode 100644 index 0000000000..ea7a61933f --- /dev/null +++ b/fendermint/actors/bucket/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "fendermint_actor_bucket" +description = "Actor for bucket object storage" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +blake3 = { workspace = true } +cid = { workspace = true, default-features = false } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["bucket"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +fendermint_actor_machine = { path = "../machine" } +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } +ipc_storage_ipld = { path = "../../../ipc-storage/ipld" } + +[dev-dependencies] +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } + +fendermint_actor_blobs_testing = { path = "../blobs/testing" } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/bucket/src/actor.rs b/fendermint/actors/bucket/src/actor.rs new file mode 100644 index 0000000000..8cacd059ca --- /dev/null +++ b/fendermint/actors/bucket/src/actor.rs @@ -0,0 +1,478 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::{ + blobs::{ + AddBlobParams, Blob, BlobStatus, DeleteBlobParams, GetBlobParams, OverwriteBlobParams, + SubscriptionId, + }, + sdk::{add_blob, delete_blob, get_blob, has_credit_approval, overwrite_blob}, +}; +use fendermint_actor_machine::MachineActor; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, +}; +use fvm_shared::address::Address; +use ipc_storage_actor_sdk::evm::{ + emit_evm_event, InputData, InvokeContractParams, InvokeContractReturn, +}; +use ipc_storage_ipld::hamt::BytesKey; + +use crate::shared::{ + AddParams, DeleteParams, GetParams, ListObjectsReturn, ListParams, Method, Object, + BUCKET_ACTOR_NAME, +}; +use crate::sol_facade as sol; +use crate::sol_facade::AbiCall; +use crate::state::{ObjectState, State}; +use crate::{ + UpdateObjectMetadataParams, MAX_METADATA_ENTRIES, MAX_METADATA_KEY_SIZE, + MAX_METADATA_VALUE_SIZE, +}; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(Actor); + +pub struct Actor; + +impl Actor { + /// Adds an object to a bucket. + /// + /// Access control will be enforced by the Blobs actor. + /// We will pass the bucket owner as the `subscriber`, + /// and the Blobs actor will enforce that the `from` address is either + /// the `subscriber` or has a valid credit delegation from the `subscriber`. + /// The `from` address must be the origin or the caller. + fn add_object(rt: &impl Runtime, params: AddParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let from = rt.message().caller(); + + let state = rt.state::()?; + let sub_id = get_blob_id(&state, ¶ms.key)?; + let key = BytesKey(params.key.clone()); + + validate_metadata(¶ms.metadata)?; + + let sub = if let Some(object) = state.get(rt.store(), &key)? { + // If we have existing blob and it's not expired + let expired = object.expiry <= rt.curr_epoch(); + if params.overwrite || expired { + // Overwrite if the flag is passed + overwrite_blob( + rt, + OverwriteBlobParams { + old_hash: object.hash, + add: AddBlobParams { + from, + sponsor: Some(state.owner), + source: params.source, + hash: params.hash, + metadata_hash: params.recovery_hash, + id: sub_id, + size: params.size, + ttl: params.ttl, + }, + }, + )? + } else { + // Return an error if no overwrite flag gets passed + return Err(ActorError::illegal_state( + "key exists; use overwrite".into(), + )); + } + } else { + // No object found, just a new blob + add_blob( + rt, + AddBlobParams { + from, + sponsor: Some(state.owner), + source: params.source, + hash: params.hash, + metadata_hash: params.recovery_hash, + id: sub_id, + size: params.size, + ttl: params.ttl, + }, + )? + }; + + rt.transaction(|st: &mut State, rt| { + st.add( + rt.store(), + key, + params.hash, + params.size, + sub.expiry, + params.metadata.clone(), + params.overwrite, + ) + })?; + + emit_evm_event( + rt, + sol::ObjectAdded::new(¶ms.key, ¶ms.hash, ¶ms.metadata), + )?; + + Ok(Object { + hash: params.hash, + recovery_hash: params.recovery_hash, + size: params.size, + expiry: sub.expiry, + metadata: params.metadata, + }) + } + + /// Deletes an object from a bucket. + /// + /// Access control will be enforced by the Blobs actor. + /// We will pass the bucket owner as the `subscriber`, + /// and the Blobs actor will enforce that the `from` address is either + /// the `subscriber` or has a valid credit delegation from the `subscriber`. + /// The `from` address must be the origin or the caller. + fn delete_object(rt: &impl Runtime, params: DeleteParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from = rt.message().caller(); + + let state = rt.state::()?; + let sub_id = get_blob_id(&state, ¶ms.0)?; + let key = BytesKey(params.0); + let object = state + .get(rt.store(), &key)? + .ok_or(ActorError::illegal_state("object not found".into()))?; + + // Delete blob for object + delete_blob( + rt, + DeleteBlobParams { + from, + sponsor: Some(state.owner), + hash: object.hash, + id: sub_id, + }, + )?; + + rt.transaction(|st: &mut State, rt| st.delete(rt.store(), &key))?; + + emit_evm_event(rt, sol::ObjectDeleted::new(&key, &object.hash))?; + + Ok(()) + } + + /// Returns an object. + fn get_object(rt: &impl Runtime, params: GetParams) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let state = rt.state::()?; + let owner = state.owner; + let sub_id = get_blob_id(&state, ¶ms.0)?; + let key = BytesKey(params.0); + if let Some(object_state) = state.get(rt.store(), &key)? { + if let Some(blob) = get_blob(rt, GetBlobParams(object_state.hash))? { + let object = build_object(&blob, &object_state, sub_id, owner)?; + Ok(object) + } else { + Ok(None) + } + } else { + Ok(None) + } + } + + /// Lists bucket objects. + fn list_objects( + rt: &impl Runtime, + params: ListParams, + ) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let current_epoch = rt.curr_epoch(); + let mut objects = Vec::new(); + let start_key = params.start_key.map(BytesKey::from); + let state = rt.state::()?; + let (prefixes, next_key) = state.list( + rt.store(), + params.prefix, + params.delimiter, + start_key.as_ref(), + params.limit, + |key: Vec, object_state: ObjectState| -> Result<(), ActorError> { + if object_state.expiry > current_epoch { + objects.push((key, object_state)); + } + Ok(()) + }, + )?; + + let next_key = next_key.map(|key| key.0); + + Ok(ListObjectsReturn { + objects, + next_key, + common_prefixes: prefixes, + }) + } + + /// Updates object metadata. + /// + /// Only the bucket owner or an account with a credit delegation + /// from the bucket owner can update object metadata. + /// The `from` address must be the origin or the caller. + fn update_object_metadata( + rt: &impl Runtime, + params: UpdateObjectMetadataParams, + ) -> Result<(), ActorError> { + rt.validate_immediate_caller_accept_any()?; + + let from = rt.message().caller(); + + let key = BytesKey(params.key.clone()); + let state = rt.state::()?; + let mut object = state + .get(rt.store(), &key)? + .ok_or(ActorError::illegal_state("object not found".into()))?; + + let bucket_owner = state.owner; + if !has_credit_approval(rt, bucket_owner, from)? { + return Err(actor_error!( + forbidden; + format!("Unauthorized: missing delegation from bucket owner {} to {}", bucket_owner, from))); + } + + validate_metadata_optional(¶ms.metadata)?; + + let metadata = rt.transaction(|st: &mut State, rt| { + for (key, val) in params.metadata { + match val { + Some(v) => { + object + .metadata + .entry(key) + .and_modify(|s| *s = v.clone()) + .or_insert(v); + } + None => { + object.metadata.remove(&key); + } + } + } + + if object.metadata.len() as u32 > MAX_METADATA_ENTRIES { + return Err(ActorError::illegal_state(format!( + "the maximum metadata entries allowed is {}", + MAX_METADATA_ENTRIES + ))); + } + + st.add( + rt.store(), + key, + object.hash, + object.size, + object.expiry, + object.metadata.clone(), + true, + )?; + + Ok(object.metadata) + })?; + + emit_evm_event(rt, sol::ObjectMetadataUpdated::new(¶ms.key, &metadata))?; + + Ok(()) + } + + fn invoke_contract( + rt: &impl Runtime, + params: InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol::can_handle(&input_data) { + let output_data = match sol::parse_input(&input_data)? { + sol::Calls::addObject_0(call) => { + // function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size) external; + let params = call.params(); + Self::add_object(rt, params)?; + call.returns(()) + } + sol::Calls::addObject_1(call) => { + // function addObject(AddObjectParams memory params) external; + let params = call.params(); + Self::add_object(rt, params)?; + call.returns(()) + } + sol::Calls::deleteObject(call) => { + // function deleteObject(string memory key) external; + let params = call.params(); + Self::delete_object(rt, params)?; + call.returns(()) + } + sol::Calls::getObject(call) => { + // function getObject(string memory key) external view returns (ObjectValue memory); + let params = call.params(); + let object = Self::get_object(rt, params)?; + call.returns(object) + } + sol::Calls::queryObjects_0(call) => { + // function queryObjects(string memory prefix, string memory delimiter, string memory startKey, uint64 limit) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_1(call) => { + // function queryObjects(string memory prefix, string memory delimiter, string memory startKey) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_2(call) => { + // function queryObjects(string memory prefix) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_3(call) => { + // function queryObjects() external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::queryObjects_4(call) => { + // function queryObjects(string memory prefix, string memory delimiter) external view returns (Query memory); + let params = call.params(); + let list = Self::list_objects(rt, params)?; + call.returns(list) + } + sol::Calls::updateObjectMetadata(call) => { + // function updateObjectMetadata(string memory key, KeyValue[] memory metadata) external; + let params = call.params(); + Self::update_object_metadata(rt, params)?; + call.returns(()) + } + }; + Ok(InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } +} + +/// Returns a blob subscription ID specific to this machine and object key. +fn get_blob_id(state: &State, key: &[u8]) -> Result { + let mut data = state.address.get()?.payload_bytes(); + data.extend(key); + let id = blake3::hash(&data).to_hex().to_string(); + SubscriptionId::new(&id) +} + +/// Build an object from its state and blob. +fn build_object( + blob: &Blob, + object_state: &ObjectState, + sub_id: SubscriptionId, + subscriber: Address, +) -> Result, ActorError> { + match blob.status { + BlobStatus::Resolved => { + blob.subscribers.get(&sub_id).cloned().ok_or_else(|| { + ActorError::illegal_state(format!( + "owner {} is not subscribed to blob {}; this should not happen", + subscriber, object_state.hash + )) + })?; + Ok(Some(Object { + hash: object_state.hash, + recovery_hash: blob.metadata_hash, + size: blob.size, + expiry: object_state.expiry, + metadata: object_state.metadata.clone(), + })) + } + BlobStatus::Added | BlobStatus::Pending | BlobStatus::Failed => Ok(None), + } +} + +fn validate_metadata(metadata: &HashMap) -> Result<(), ActorError> { + if metadata.len() as u32 > MAX_METADATA_ENTRIES { + return Err(ActorError::illegal_state(format!( + "the maximum metadata entries allowed is {}", + MAX_METADATA_ENTRIES + ))); + } + + for (key, value) in metadata { + if key.len() as u32 > MAX_METADATA_KEY_SIZE { + return Err(ActorError::illegal_state(format!( + "key must be less than or equal to {}", + MAX_METADATA_KEY_SIZE + ))); + } + + if value.is_empty() || value.len() as u32 > MAX_METADATA_VALUE_SIZE { + return Err(ActorError::illegal_state(format!( + "value must non-empty and less than or equal to {}", + MAX_METADATA_VALUE_SIZE + ))); + } + } + + Ok(()) +} + +fn validate_metadata_optional( + metadata: &HashMap>, +) -> Result<(), ActorError> { + for (key, value) in metadata { + if key.len() as u32 > MAX_METADATA_KEY_SIZE { + return Err(ActorError::illegal_state(format!( + "key must be less than or equal to {}", + MAX_METADATA_KEY_SIZE + ))); + } + + if let Some(value) = value { + if value.is_empty() || value.len() as u32 > MAX_METADATA_VALUE_SIZE { + return Err(ActorError::illegal_state(format!( + "value must non-empty and less than or equal to {}", + MAX_METADATA_VALUE_SIZE + ))); + } + } + } + + Ok(()) +} + +impl MachineActor for Actor { + type State = State; +} + +impl ActorCode for Actor { + type Methods = Method; + + fn name() -> &'static str { + BUCKET_ACTOR_NAME + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + Init => init, + GetAddress => get_address, + GetMetadata => get_metadata, + AddObject => add_object, + DeleteObject => delete_object, + GetObject => get_object, + ListObjects => list_objects, + UpdateObjectMetadata => update_object_metadata, + // EVM interop + InvokeContract => invoke_contract, + _ => fallback, + } +} diff --git a/fendermint/actors/bucket/src/lib.rs b/fendermint/actors/bucket/src/lib.rs new file mode 100644 index 0000000000..a784389323 --- /dev/null +++ b/fendermint/actors/bucket/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod shared; +mod sol_facade; +mod state; + +pub use shared::*; diff --git a/fendermint/actors/bucket/src/shared.rs b/fendermint/actors/bucket/src/shared.rs new file mode 100644 index 0000000000..ad7f597b00 --- /dev/null +++ b/fendermint/actors/bucket/src/shared.rs @@ -0,0 +1,123 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_machine::{ + GET_ADDRESS_METHOD, GET_METADATA_METHOD, INIT_METHOD, METHOD_CONSTRUCTOR, +}; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared::clock::ChainEpoch; +use num_derive::FromPrimitive; +use serde::{Deserialize, Serialize}; + +pub use crate::state::{ObjectState, State}; + +pub const BUCKET_ACTOR_NAME: &str = "bucket"; +pub const MAX_METADATA_ENTRIES: u32 = 20; +pub const MAX_METADATA_KEY_SIZE: u32 = 32; +pub const MAX_METADATA_VALUE_SIZE: u32 = 128; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Init = INIT_METHOD, + GetAddress = GET_ADDRESS_METHOD, + GetMetadata = GET_METADATA_METHOD, + AddObject = frc42_dispatch::method_hash!("AddObject"), + DeleteObject = frc42_dispatch::method_hash!("DeleteObject"), + GetObject = frc42_dispatch::method_hash!("GetObject"), + ListObjects = frc42_dispatch::method_hash!("ListObjects"), + UpdateObjectMetadata = frc42_dispatch::method_hash!("UpdateObjectMetadata"), + // EVM Interop + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), +} + +/// Params for adding an object. +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct AddParams { + /// Source Iroh node ID used for ingestion. + pub source: B256, + /// Object key. + #[serde(with = "strict_bytes")] + pub key: Vec, + /// Object blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for object recovery. + pub recovery_hash: B256, + /// Object size. + pub size: u64, + /// Object time-to-live epochs. + /// If not specified, the current default TTL from the config actor is used. + pub ttl: Option, + /// Object metadata. + pub metadata: HashMap, + /// Whether to overwrite a key if it already exists. + pub overwrite: bool, +} + +/// Key of the object to delete from a bucket. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct DeleteParams(#[serde(with = "strict_bytes")] pub Vec); + +/// Params for getting an object. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct GetParams(#[serde(with = "strict_bytes")] pub Vec); + +/// Params for listing objects. +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListParams { + /// The prefix to filter objects by. + #[serde(with = "strict_bytes")] + pub prefix: Vec, + /// The delimiter used to define object hierarchy. + #[serde(with = "strict_bytes")] + pub delimiter: Vec, + /// The key to start listing objects from. + pub start_key: Option>, + /// The maximum number of objects to list. + pub limit: u64, +} + +/// The stored representation of an object in the bucket. +#[derive(Clone, Debug, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Object { + /// The object blake3 hash. + pub hash: B256, + /// Blake3 hash of the metadata to use for object recovery. + pub recovery_hash: B256, + /// The object size. + pub size: u64, + /// Expiry block. + pub expiry: ChainEpoch, + /// User-defined object metadata (e.g., last modified timestamp, etc.). + pub metadata: HashMap, +} + +/// A list of objects and their common prefixes. +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListObjectsReturn { + /// List of key-values matching the list query. + pub objects: Vec<(Vec, ObjectState)>, + /// When a delimiter is used in the list query, this contains common key prefixes. + pub common_prefixes: Vec>, + /// Next key to use for paginating when there are more objects to list. + pub next_key: Option>, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct UpdateObjectMetadataParams { + /// Object key. + #[serde(with = "strict_bytes")] + pub key: Vec, + /// Object metadata to be inserted/updated/deleted. + /// + /// If a key-value is present, we'll update the entry (or insert if it does not exist) + /// If only the key is present, we will delete the metadata entry + pub metadata: HashMap>, +} diff --git a/fendermint/actors/bucket/src/sol_facade.rs b/fendermint/actors/bucket/src/sol_facade.rs new file mode 100644 index 0000000000..5c1fa00184 --- /dev/null +++ b/fendermint/actors/bucket/src/sol_facade.rs @@ -0,0 +1,413 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; +use std::string::ToString; + +use anyhow::Error; +use fendermint_actor_blobs_shared::bytes::B256; +use fil_actors_runtime::{actor_error, ActorError}; +use fvm_shared::clock::ChainEpoch; +use ipc_storage_actor_sdk::{declare_abi_call, evm::TryIntoEVMEvent}; +pub use ipc_storage_sol_facade::bucket::Calls; +use ipc_storage_sol_facade::{ + bucket as sol, + types::{SolCall, SolInterface}, +}; +use num_traits::Zero; + +use crate::{ + AddParams, DeleteParams, GetParams, ListObjectsReturn, ListParams, Object, + UpdateObjectMetadataParams, +}; + +declare_abi_call!(); + +// ----- Events ----- // + +pub struct ObjectAdded<'a> { + pub key: &'a Vec, + pub blob_hash: &'a B256, + pub metadata: &'a HashMap, +} +impl<'a> ObjectAdded<'a> { + pub fn new( + key: &'a Vec, + blob_hash: &'a B256, + metadata: &'a HashMap, + ) -> Self { + Self { + key, + blob_hash, + metadata, + } + } +} +impl TryIntoEVMEvent for ObjectAdded<'_> { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + let metadata = fvm_ipld_encoding::to_vec(self.metadata)?; + Ok(sol::Events::ObjectAdded(sol::ObjectAdded { + key: self.key.clone().into(), + blobHash: self.blob_hash.0.into(), + metadata: metadata.into(), + })) + } +} + +pub struct ObjectMetadataUpdated<'a> { + pub key: &'a Vec, + pub metadata: &'a HashMap, +} +impl<'a> ObjectMetadataUpdated<'a> { + pub fn new(key: &'a Vec, metadata: &'a HashMap) -> Self { + Self { key, metadata } + } +} +impl TryIntoEVMEvent for ObjectMetadataUpdated<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let metadata = fvm_ipld_encoding::to_vec(self.metadata)?; + Ok(sol::Events::ObjectMetadataUpdated( + sol::ObjectMetadataUpdated { + key: self.key.clone().into(), + metadata: metadata.into(), + }, + )) + } +} + +pub struct ObjectDeleted<'a> { + pub key: &'a Vec, + pub blob_hash: &'a B256, +} +impl<'a> ObjectDeleted<'a> { + pub fn new(key: &'a Vec, blob_hash: &'a B256) -> Self { + Self { key, blob_hash } + } +} +impl TryIntoEVMEvent for ObjectDeleted<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ObjectDeleted(sol::ObjectDeleted { + key: self.key.clone().into(), + blobHash: self.blob_hash.0.into(), + })) + } +} + +// ----- Calls ----- // + +pub fn can_handle(input_data: &ipc_storage_actor_sdk::evm::InputData) -> bool { + Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &ipc_storage_actor_sdk::evm::InputData) -> Result { + Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +impl AbiCall for sol::addObject_0Call { + type Params = AddParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let source = B256(self.source.into()); + let key: Vec = self.key.clone().into_bytes(); + let hash = B256(self.hash.into()); + let recovery_hash = B256(self.recoveryHash.into()); + let size = self.size; + AddParams { + source, + key, + hash, + recovery_hash, + size, + ttl: None, + metadata: HashMap::default(), + overwrite: false, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::addObject_1Call { + type Params = AddParams; + type Returns = (); + type Output = Vec; + fn params(&self) -> Self::Params { + let source = B256(self.source.into()); + let key: Vec = self.key.clone().into_bytes(); + let hash = B256(self.hash.into()); + let recovery_hash = B256(self.recoveryHash.into()); + let size = self.size; + let ttl = if self.ttl.clone().is_zero() { + None + } else { + Some(self.ttl as ChainEpoch) + }; + let mut metadata: HashMap = HashMap::with_capacity(self.metadata.len()); + for kv in self.metadata.iter().cloned() { + metadata.insert(kv.key, kv.value); + } + let overwrite = self.overwrite; + AddParams { + source, + key, + hash, + recovery_hash, + size, + ttl, + metadata, + overwrite, + } + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::deleteObjectCall { + type Params = DeleteParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let key: Vec = self.key.clone().into_bytes(); + DeleteParams(key) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} + +impl AbiCall for sol::getObjectCall { + type Params = GetParams; + type Returns = Option; + type Output = Vec; + + fn params(&self) -> Self::Params { + let key = self.key.clone().into_bytes(); + GetParams(key) + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let object = returns + .map(|object| sol::ObjectValue { + blobHash: object.hash.0.into(), + recoveryHash: object.recovery_hash.0.into(), + size: object.size, + expiry: object.expiry as u64, + metadata: sol_metadata(object.metadata), + }) + .unwrap_or(sol::ObjectValue { + blobHash: [0u8; 32].into(), + recoveryHash: [0u8; 32].into(), + size: 0, + expiry: 0, + metadata: vec![], + }); + Self::abi_encode_returns(&(object,)) + } +} + +fn sol_metadata(metadata: HashMap) -> Vec { + metadata + .iter() + .map(|(k, v)| sol::KeyValue { + key: k.clone(), + value: v.clone(), + }) + .collect() +} + +fn sol_query(list: ListObjectsReturn) -> sol::Query { + sol::Query { + objects: list + .objects + .iter() + .map(|(key, object_state)| sol::Object { + key: String::from_utf8_lossy(key.as_slice()).to_string(), + state: sol::ObjectState { + blobHash: object_state.hash.0.into(), + size: object_state.size, + expiry: object_state.expiry as u64, + metadata: sol_metadata(object_state.metadata.clone()), + }, + }) + .collect(), + commonPrefixes: list + .common_prefixes + .iter() + .map(|prefix| String::from_utf8_lossy(prefix.as_slice()).to_string()) + .collect(), + nextKey: list + .next_key + .map(|k| String::from_utf8_lossy(k.as_slice()).to_string()) + .unwrap_or_default(), + } +} + +const DEFAULT_DELIMITER: &[u8] = b"/"; // "/" in ASCII and UTF-8 +const DEFAULT_START_KEY: Option> = None; //= "" +const DEFAULT_PREFIX: Vec = vec![]; //= "" +const DEFAULT_LIMIT: u64 = 0; + +impl AbiCall for sol::queryObjects_0Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = self.delimiter.clone().into_bytes(); + let start_key = if self.startKey.is_empty() { + None + } else { + Some(self.startKey.clone().into_bytes()) + }; + let limit = self.limit; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_1Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = self.delimiter.clone().into_bytes(); + let start_key = if self.startKey.is_empty() { + None + } else { + Some(self.startKey.clone().into_bytes()) + }; + let limit = DEFAULT_LIMIT; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_2Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = DEFAULT_DELIMITER.to_vec(); + let start_key = DEFAULT_START_KEY; + let limit = DEFAULT_LIMIT; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_3Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = DEFAULT_PREFIX; + let delimiter = DEFAULT_DELIMITER.to_vec(); + let start_key = DEFAULT_START_KEY; + let limit = 0; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::queryObjects_4Call { + type Params = ListParams; + type Returns = ListObjectsReturn; + type Output = Vec; + + fn params(&self) -> Self::Params { + let prefix = self.prefix.clone().into_bytes(); + let delimiter = self.delimiter.clone().into_bytes(); + let start_key = DEFAULT_START_KEY; + let limit = DEFAULT_LIMIT; + ListParams { + prefix, + delimiter, + start_key, + limit, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + let query = sol_query(returns); + Self::abi_encode_returns(&(query,)) + } +} + +impl AbiCall for sol::updateObjectMetadataCall { + type Params = UpdateObjectMetadataParams; + type Returns = (); + type Output = Vec; + + fn params(&self) -> Self::Params { + let mut metadata: HashMap> = HashMap::default(); + for kv in self.metadata.iter().cloned() { + let key = kv.key; + let value = kv.value; + let value = if value.is_empty() { None } else { Some(value) }; + metadata.insert(key, value); + } + UpdateObjectMetadataParams { + key: self.key.clone().into_bytes(), + metadata, + } + } + + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&returns) + } +} diff --git a/fendermint/actors/bucket/src/state.rs b/fendermint/actors/bucket/src/state.rs new file mode 100644 index 0000000000..20df164519 --- /dev/null +++ b/fendermint/actors/bucket/src/state.rs @@ -0,0 +1,790 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; +use std::fmt::{Debug, Display, Formatter}; +use std::string::FromUtf8Error; + +use cid::Cid; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_machine::{Kind, MachineAddress, MachineState}; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use ipc_storage_ipld::hamt::{self, map::TrackedFlushResult, BytesKey, MapKey}; +use serde::{Deserialize, Serialize}; + +const MAX_LIST_LIMIT: usize = 1000; + +fn utf8_error(e: FromUtf8Error) -> ActorError { + ActorError::illegal_argument(e.to_string()) +} + +/// The state represents a bucket backed by a Hamt. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The machine address set by the init actor. + pub address: MachineAddress, + /// The machine robust owner address. + pub owner: Address, + /// The objects Hamt. + pub objects: ObjectsState, + /// User-defined metadata (e.g., bucket name, etc.). + pub metadata: HashMap, +} +impl MachineState for State { + fn new( + store: &BS, + owner: Address, + metadata: HashMap, + ) -> Result { + Ok(Self { + address: Default::default(), + objects: ObjectsState::new(store)?, + owner, + metadata, + }) + } + + fn init(&mut self, address: Address) -> Result<(), ActorError> { + self.address.set(address) + } + + fn address(&self) -> MachineAddress { + self.address.clone() + } + + fn kind(&self) -> Kind { + Kind::Bucket + } + + fn owner(&self) -> Address { + self.owner + } + + fn metadata(&self) -> HashMap { + self.metadata.clone() + } +} + +/// The stored representation of an object in the bucket. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct ObjectState { + /// The object blake3 hash. + pub hash: B256, + /// The object size. + pub size: u64, + /// Expiry block. + pub expiry: ChainEpoch, + /// User-defined object metadata (e.g., last modified timestamp, etc.). + pub metadata: HashMap, +} + +/// A list of objects and their common prefixes. +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ObjectList { + /// List of key-values matching the list query. + pub objects: Vec<(Vec, ObjectState)>, + /// When a delimiter is used in the list query, this contains common key prefixes. + pub common_prefixes: Vec>, +} + +impl State { + #[allow(clippy::too_many_arguments)] + pub fn add( + &mut self, + store: &BS, + key: BytesKey, + hash: B256, + size: u64, + expiry: ChainEpoch, + metadata: HashMap, + overwrite: bool, + ) -> Result { + let object_key = ObjectKey(key.clone()); + let mut objects = self.objects.hamt(store)?; + let object = ObjectState { + hash, + size, + expiry, + metadata, + }; + if overwrite { + objects.set(&object_key, object)?; + } else { + objects.set_if_absent(&object_key, object)?; + } + self.objects.save_tracked(objects.flush_tracked()?); + Ok(*self.objects.root.cid()) + } + + pub fn delete( + &mut self, + store: &BS, + key: &BytesKey, + ) -> Result<(ObjectState, Cid), ActorError> { + let mut objects = self.objects.hamt(store)?; + let object_key = ObjectKey(key.clone()); + let (tracked_result, object) = objects.delete_and_flush_tracked(&object_key)?; + self.objects.save_tracked(tracked_result); + + match object { + Some(object) => Ok((object, self.objects.root.cid().to_owned())), + None => Err(ActorError::not_found("key not found".into())), + } + } + + pub fn get( + &self, + store: &BS, + key: &BytesKey, + ) -> Result, ActorError> { + let object_key = ObjectKey(key.clone()); + let object = self.objects.hamt(store)?.get(&object_key)?; + Ok(object) + } + + pub fn list( + &self, + store: &BS, + prefix: Vec, + delimiter: Vec, + start_key: Option<&BytesKey>, + limit: u64, + mut collector: F, + ) -> Result<(Vec>, Option), ActorError> + where + F: FnMut(Vec, ObjectState) -> Result<(), ActorError>, + { + let objects = self.objects.hamt(store)?; + let mut common_prefixes = std::collections::BTreeSet::>::new(); + let limit = if limit == 0 { + MAX_LIST_LIMIT + } else { + (limit as usize).min(MAX_LIST_LIMIT) + }; + + let (_, next_key) = objects.for_each_ranged(start_key, Some(limit), |k, v| { + let key = k.0 .0.clone(); + if !prefix.is_empty() && !key.starts_with(&prefix) { + return Ok(false); + } + if !delimiter.is_empty() { + let utf8_prefix = String::from_utf8(prefix.clone()).map_err(utf8_error)?; + let prefix_length = utf8_prefix.len(); + let utf8_key = String::from_utf8(key.clone()).map_err(utf8_error)?; + let utf8_delimiter = String::from_utf8(delimiter.clone()).map_err(utf8_error)?; + if let Some(index) = utf8_key[prefix_length..].find(&utf8_delimiter) { + let subset = utf8_key[..=(index + prefix_length)].as_bytes().to_owned(); + common_prefixes.insert(subset); + return Ok(false); + } + } + collector(key, v.to_owned())?; + Ok(true) + })?; + + let common_prefixes = common_prefixes.into_iter().collect(); + Ok((common_prefixes, next_key.map(|key| key.0))) + } +} + +#[derive(Debug, PartialEq)] +pub struct ObjectKey(pub BytesKey); + +impl MapKey for ObjectKey { + fn from_bytes(b: &[u8]) -> Result { + Ok(ObjectKey(BytesKey(b.to_vec()))) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.0 .0.to_vec()) + } +} + +impl Display for ObjectKey { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "{}", String::from_utf8_lossy(&self.0 .0)) + } +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ObjectsState { + pub root: hamt::Root, + size: u64, +} + +impl ObjectsState { + pub fn new(store: &BS) -> Result { + let root = hamt::Root::::new(store, "objects")?; + Ok(Self { root, size: 0 }) + } + + pub fn hamt( + &self, + store: BS, + ) -> Result, ActorError> { + self.root.hamt(store, self.size) + } + + pub fn save_tracked( + &mut self, + tracked_flush_result: TrackedFlushResult, + ) { + self.root = tracked_flush_result.root; + self.size = tracked_flush_result.size + } + + pub fn len(&self) -> u64 { + self.size + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_blobs_testing::{new_hash, new_hash_from_vec}; + use fvm_ipld_blockstore::MemoryBlockstore; + use quickcheck::Arbitrary; + use quickcheck_macros::quickcheck; + use std::str::FromStr; + + impl Arbitrary for ObjectState { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let hash = new_hash(u16::arbitrary(g) as usize); + ObjectState { + hash: hash.0, + expiry: i64::arbitrary(g), + size: u64::arbitrary(g), + metadata: HashMap::arbitrary(g), + } + } + } + + fn object_one() -> ObjectState { + let (hash, size) = new_hash_from_vec([1, 2, 3, 4, 5].to_vec()); + let mut metadata = HashMap::::new(); + metadata.insert("_created".to_string(), String::from("1718464344")); + metadata.insert("_modified".to_string(), String::from("1718464345")); + ObjectState { + hash, + size, + expiry: 123456789, + metadata, + } + } + + const OBJECT_ONE_CID: &str = "bafy2bzacea5tbd4x6okckdkb2yl7wbyjqpxkow6whr46dswwv5xj7va4uro2g"; + + fn object_two() -> ObjectState { + let (hash, size) = new_hash_from_vec([6, 7, 8, 9, 10, 11].to_vec()); + let mut metadata = HashMap::::new(); + metadata.insert("_created".to_string(), String::from("1718464456")); + metadata.insert("_modified".to_string(), String::from("1718480987")); + ObjectState { + hash, + size, + expiry: 123456789, + metadata, + } + } + + fn object_three() -> ObjectState { + let (hash, size) = new_hash_from_vec([11, 12, 13, 14, 15, 16, 17].to_vec()); + let mut metadata = HashMap::::new(); + metadata.insert("_created".to_string(), String::from("1718465678")); + metadata.insert("_modified".to_string(), String::from("1718512346")); + ObjectState { + hash, + size, + expiry: 123456789, + metadata, + } + } + + #[allow(clippy::type_complexity)] + fn list( + state: &State, + store: &BS, + prefix: Vec, + delimiter: Vec, + start_key: Option<&BytesKey>, + limit: u64, + ) -> Result<(Vec<(Vec, ObjectState)>, Vec>, Option), ActorError> { + let mut objects = Vec::new(); + let (prefixes, next_key) = state.list( + store, + prefix, + delimiter, + start_key, + limit, + |key: Vec, object: ObjectState| -> Result<(), ActorError> { + objects.push((key, object)); + Ok(()) + }, + )?; + Ok((objects, prefixes, next_key)) + } + + fn get_lex_sequence(start: Vec, count: usize) -> Vec> { + let mut current = start; + let mut sequence = Vec::with_capacity(count); + for _ in 0..count { + sequence.push(current.clone()); + for i in (0..current.len()).rev() { + if current[i] < 255 { + current[i] += 1; + break; + } else { + current[i] = 0; // Reset this byte to 0 and carry to the next byte + } + } + } + sequence + } + + #[test] + fn test_constructor() { + let store = MemoryBlockstore::default(); + let state = State::new(&store, Address::new_id(100), HashMap::new()); + assert!(state.is_ok()); + assert_eq!( + *state.unwrap().objects.root.cid(), + Cid::from_str("bafy2bzaceamp42wmmgr2g2ymg46euououzfyck7szknvfacqscohrvaikwfay") + .unwrap() + ); + } + + #[test] + fn test_add() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let object = object_one(); + assert!(state + .add( + &store, + BytesKey(vec![1, 2, 3]), + object.hash, + object.size, + object.expiry, + object.metadata, + true, + ) + .is_ok()); + + assert_eq!( + *state.objects.root.cid(), + Cid::from_str(OBJECT_ONE_CID).unwrap() + ); + } + + #[quickcheck] + fn test_delete(object: ObjectState) { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let key = BytesKey(vec![1, 2, 3]); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + true, + ) + .unwrap(); + assert!(state.delete(&store, &key).is_ok()); + + let result = state.get(&store, &key); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), None); + } + + #[quickcheck] + fn test_get(object: ObjectState) { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let key = BytesKey(vec![1, 2, 3]); + let md = object.metadata.clone(); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + md, + true, + ) + .unwrap(); + let result = state.get(&store, &key); + + assert!(result.is_ok()); + assert_eq!(result.unwrap().unwrap(), object); + } + + fn create_and_put_objects( + state: &mut State, + store: &MemoryBlockstore, + ) -> anyhow::Result<(BytesKey, BytesKey, BytesKey)> { + let baz_key = BytesKey("foo/baz.png".as_bytes().to_vec()); // index 0 + let object = object_one(); + state.add( + store, + baz_key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + )?; + let bar_key = BytesKey("foo/bar.png".as_bytes().to_vec()); // index 1 + let object = object_two(); + state.add( + store, + bar_key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + )?; + // We'll mostly ignore this one + let other_key = BytesKey("zzzz/image.png".as_bytes().to_vec()); // index 2 + let hash = new_hash(256); + state.add( + &store, + other_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + )?; + let jpeg_key = BytesKey("foo.jpeg".as_bytes().to_vec()); // index 3 + let object = object_three(); + state.add( + store, + jpeg_key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + )?; + Ok((baz_key, bar_key, jpeg_key)) + } + + #[test] + fn test_list_all_keys() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (baz_key, _, _) = create_and_put_objects(&mut state, &store).unwrap(); + + // List all keys with a limit + let result = list(&state, &store, vec![], vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 4); + assert_eq!(result.0.first(), Some(&(baz_key.0, object_one()))); + assert_eq!(result.2, None); + } + + #[test] + fn test_list_more_than_max_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let sequence = get_lex_sequence(vec![0, 0, 0], MAX_LIST_LIMIT + 10); + for key in sequence { + let key = BytesKey(key); + let object = object_one(); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + ) + .unwrap(); + } + + // List all keys but has more + let result = list(&state, &store, vec![], vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), MAX_LIST_LIMIT); + // Note: This isn't the element at MAX_LIST_LIMIT + 1 as one might expect. + // The ordering is deterministic but depends on the HAMT structure. + assert_eq!(result.2, Some(BytesKey(vec![0, 3, 86]))); + + let next_key = result.2.unwrap(); + + // List remaining objects + let result = list(&state, &store, vec![], vec![], Some(&next_key), 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 10); + assert_eq!(result.2, None); + } + + #[test] + fn test_list_at_max_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + for i in 0..MAX_LIST_LIMIT { + let key = BytesKey(format!("{}.txt", i).as_bytes().to_vec()); + let object = object_one(); + state + .add( + &store, + key.clone(), + object.hash, + object.size, + object.expiry, + object.metadata, + false, + ) + .unwrap(); + } + + // List all keys + let result = list(&state, &store, vec![], vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), MAX_LIST_LIMIT); + assert_eq!(result.2, None); + } + + #[test] + fn test_list_keys_with_prefix() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (baz_key, bar_key, _) = create_and_put_objects(&mut state, &store).unwrap(); + + let foo_key = BytesKey("foo".as_bytes().to_vec()); + let result = list(&state, &store, foo_key.0.clone(), vec![], None, 0); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 3); + assert_eq!(result.0[0], (baz_key.0, object_one())); + assert_eq!(result.0[1], (bar_key.0, object_two())); + } + + #[test] + fn test_list_keys_with_delimiter() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (_, _, jpeg_key) = create_and_put_objects(&mut state, &store).unwrap(); + + let foo_key = BytesKey("foo".as_bytes().to_vec()); + let delimiter_key = BytesKey("/".as_bytes().to_vec()); + let full_key = [foo_key.clone(), delimiter_key.clone()].concat(); + let result = list( + &state, + &store, + foo_key.0.clone(), + delimiter_key.0.clone(), + None, + 4, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + assert_eq!(result.0[0], (jpeg_key.0, object_three())); + assert_eq!(result.1[0], full_key); + } + + #[test] + fn test_list_keys_with_nested_delimiter() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let jpeg_key = BytesKey("foo.jpeg".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + jpeg_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let bar_key = BytesKey("bin/foo/bar.png".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + bar_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let baz_key = BytesKey("bin/foo/baz.png".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + baz_key.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + + let bin_key = BytesKey("bin/".as_bytes().to_vec()); + let full_key = BytesKey("bin/foo/".as_bytes().to_vec()); + let delimiter_key = BytesKey("/".as_bytes().to_vec()); + let result = list( + &state, + &store, + bin_key.0.clone(), + delimiter_key.0.clone(), + None, + 0, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 0); + assert_eq!(result.1.len(), 1); + assert_eq!(result.1[0], full_key.0); + } + + #[test] + fn test_list_with_start_key_and_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let (_, bar_key, _) = create_and_put_objects(&mut state, &store).unwrap(); + + // List all keys with a limit and start key + let result = list(&state, &store, vec![], vec![], Some(&bar_key), 1); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + // Note that baz is listed first in order + assert_eq!(result.0.first(), Some(&(bar_key.0, object_two()))); + } + + #[test] + fn test_list_with_prefix_delimiter_and_start_key_and_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let one = BytesKey("hello/world".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + one.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let two = BytesKey("hello/again".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + two.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + + // List all keys with a limit and start key + let result = list( + &state, + &store, + "hello/".as_bytes().to_vec(), + "/".as_bytes().to_vec(), + Some(&two), + 0, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + } + + #[test] + fn test_list_with_prefix_and_without_and_limit() { + let store = MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let one = BytesKey("test/hello".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + one.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + let two = BytesKey("hello".as_bytes().to_vec()); + let hash = new_hash(256); + state + .add( + &store, + two.clone(), + hash.0, + 8, + 123456789, + HashMap::::new(), + false, + ) + .unwrap(); + + // List with prefix and limit 1 + let result = list( + &state, + &store, + "test/".as_bytes().to_vec(), + "/".as_bytes().to_vec(), + None, + 1, + ); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + assert_eq!( + result.0.first().unwrap().0, + "test/hello".as_bytes().to_vec(), + ); + + // List without a prefix and limit 1 + let result = list(&state, &store, vec![], "/".as_bytes().to_vec(), None, 1); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.0.len(), 1); + assert_eq!(result.0.first().unwrap().0, "hello".as_bytes().to_vec()); + } +} diff --git a/fendermint/actors/init/Cargo.toml b/fendermint/actors/init/Cargo.toml new file mode 100644 index 0000000000..7776738cdd --- /dev/null +++ b/fendermint/actors/init/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "fendermint_actor_init" +description = "Builtin Init actor replacement for IPC with ADM support" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +log = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +serde = { workspace = true } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/init/src/lib.rs b/fendermint/actors/init/src/lib.rs new file mode 100644 index 0000000000..d3e8ce6e0c --- /dev/null +++ b/fendermint/actors/init/src/lib.rs @@ -0,0 +1,184 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2025 Recall Contributors +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Custom Init actor for IPC that allows the ADM actor to spawn any actor type. + +use cid::Cid; +use fil_actors_runtime::runtime::builtins::Type; +use fil_actors_runtime::runtime::{ActorCode, Runtime}; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, extract_send_result, ActorContext, ActorError, + AsActorError, SYSTEM_ACTOR_ADDR, +}; +use fvm_shared::address::Address; +use fvm_shared::{ActorID, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; + +pub use fil_actors_runtime::INIT_ACTOR_ADDR; + +mod state; +mod types; + +pub use state::State; +pub use types::*; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(IPCInitActor); + +/// ADM Actor ID - hardcoded to match fendermint_vm_actor_interface::adm::ADM_ACTOR_ID +pub const ADM_ACTOR_ID: ActorID = 17; + +/// Custom Init actor name for the manifest +pub const IPC_INIT_ACTOR_NAME: &str = "init"; + +/// Init actor methods +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Exec = 2, + Exec4 = 3, +} + +/// IPC Init actor with ADM support +pub struct IPCInitActor; + +impl IPCInitActor { + pub fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let state = State::new(rt.store(), params.network_name)?; + rt.create(&state)?; + Ok(()) + } + + pub fn exec(rt: &impl Runtime, params: ExecParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + let caller_code = rt + .get_actor_code_cid(&rt.message().caller().id().unwrap()) + .ok_or_else(|| { + actor_error!( + illegal_state, + "no code for caller as {}", + rt.message().caller() + ) + })?; + + if !can_exec(rt, &caller_code, ¶ms.code_cid) { + return Err(actor_error!(forbidden; + "caller type {} cannot exec actor type {}", + &caller_code, ¶ms.code_cid + )); + } + + let robust_address = rt.new_actor_address()?; + + let (id_address, existing): (ActorID, bool) = rt.transaction(|s: &mut State, rt| { + s.map_addresses_to_id(rt.store(), &robust_address, None) + .context("failed to allocate ID address") + })?; + + if existing { + return Err(actor_error!( + forbidden, + "cannot exec over existing actor {}", + id_address + )); + } + + rt.create_actor(params.code_cid, id_address, None)?; + + extract_send_result(rt.send_simple( + &Address::new_id(id_address), + METHOD_CONSTRUCTOR, + params.constructor_params.into(), + rt.message().value_received(), + )) + .context("constructor failed")?; + + Ok(ExecReturn { + id_address: Address::new_id(id_address), + robust_address, + }) + } + + pub fn exec4(rt: &impl Runtime, params: Exec4Params) -> Result { + rt.validate_immediate_caller_is(std::iter::once(&fil_actors_runtime::EAM_ACTOR_ADDR))?; + + let caller_id = rt.message().caller().id().unwrap(); + let delegated_address = Address::new_delegated(caller_id, ¶ms.subaddress.to_vec()) + .map_err(|e| { + ActorError::illegal_argument(format!("invalid delegated address: {}", e)) + })?; + + let robust_address = rt.new_actor_address()?; + + let (id_address, existing): (ActorID, bool) = rt.transaction(|s: &mut State, rt| { + s.map_addresses_to_id(rt.store(), &robust_address, Some(&delegated_address)) + .context("failed to map addresses to ID") + })?; + + if existing { + let code_cid = rt.get_actor_code_cid(&id_address).context_code( + fvm_shared::error::ExitCode::USR_FORBIDDEN, + "cannot redeploy a deleted actor", + )?; + let placeholder_cid = rt.get_code_cid_for_type(Type::Placeholder); + if code_cid != placeholder_cid { + return Err(ActorError::forbidden(format!( + "cannot replace existing non-placeholder actor with code: {code_cid}" + ))); + } + } + + rt.create_actor(params.code_cid, id_address, Some(delegated_address))?; + + extract_send_result(rt.send_simple( + &Address::new_id(id_address), + METHOD_CONSTRUCTOR, + params.constructor_params.into(), + rt.message().value_received(), + )) + .context("constructor failed")?; + + Ok(Exec4Return { + id_address: Address::new_id(id_address), + robust_address, + }) + } +} + +impl ActorCode for IPCInitActor { + type Methods = Method; + + fn name() -> &'static str { + IPC_INIT_ACTOR_NAME + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + Exec => exec, + Exec4 => exec4, + } +} + +/// Key modification: Allow ADM actor to exec any actor type +fn can_exec(rt: &impl Runtime, caller: &Cid, exec: &Cid) -> bool { + let caller_id = rt.message().caller().id(); + + // Allow ADM actor (ID 17) to create any actor type + if caller_id == Ok(ADM_ACTOR_ID) { + return true; + } + + // Standard builtin actor checks + rt.resolve_builtin_actor_type(exec) + .map(|typ| match typ { + Type::Multisig | Type::PaymentChannel => true, + Type::Miner if rt.resolve_builtin_actor_type(caller) == Some(Type::Power) => true, + _ => false, + }) + .unwrap_or(false) +} diff --git a/fendermint/actors/init/src/state.rs b/fendermint/actors/init/src/state.rs new file mode 100644 index 0000000000..4d2b795889 --- /dev/null +++ b/fendermint/actors/init/src/state.rs @@ -0,0 +1,101 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::address::{Address, Protocol}; +use fvm_shared::ActorID; + +use fil_actors_runtime::{ + actor_error, ActorError, Map2, DEFAULT_HAMT_CONFIG, FIRST_NON_SINGLETON_ADDR, +}; + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + /// HAMT[Address]ActorID + pub address_map: Cid, + pub next_id: ActorID, + pub network_name: String, +} + +pub type AddressMap = Map2; + +impl State { + pub fn new(store: &BS, network_name: String) -> Result { + let empty = AddressMap::flush_empty(store, DEFAULT_HAMT_CONFIG)?; + Ok(Self { + address_map: empty, + next_id: FIRST_NON_SINGLETON_ADDR, + network_name, + }) + } + + /// Maps argument addresses to to a new or existing actor ID. + /// With no delegated address, or if the delegated address is not already mapped, + /// allocates a new ID address and maps both to it. + /// If the delegated address is already present, maps the robust address to that actor ID. + /// Fails if the robust address is already mapped. The assignment of an ID to an address is one-time-only, even if the actor at that ID is deleted. + /// Returns the actor ID and a boolean indicating whether or not the actor already exists. + pub fn map_addresses_to_id( + &mut self, + store: &BS, + robust_addr: &Address, + delegated_addr: Option<&Address>, + ) -> Result<(ActorID, bool), ActorError> { + let mut map = AddressMap::load(store, &self.address_map, DEFAULT_HAMT_CONFIG, "addresses")?; + let (id, existing) = if let Some(delegated_addr) = delegated_addr { + // If there's a delegated address, either ipc_storage the already-mapped actor ID or + // create and map a new one. + if let Some(existing_id) = map.get(delegated_addr)? { + (*existing_id, true) + } else { + let new_id = self.next_id; + self.next_id += 1; + map.set(delegated_addr, new_id)?; + (new_id, false) + } + } else { + // With no delegated address, always create a new actor ID. + let new_id = self.next_id; + self.next_id += 1; + (new_id, false) + }; + + // Map the robust address to the ID, failing if it's already mapped to anything. + let is_new = map.set_if_absent(robust_addr, id)?; + if !is_new { + return Err(actor_error!( + forbidden, + "robust address {} is already allocated in the address map", + robust_addr + )); + } + self.address_map = map.flush()?; + Ok((id, existing)) + } + + /// ResolveAddress resolves an address to an ID-address, if possible. + /// If the provided address is an ID address, it is returned as-is. + /// This means that mapped ID-addresses (which should only appear as values, not keys) and + /// singleton actor addresses (which are not in the map) pass through unchanged. + /// + /// Returns an ID-address and `true` if the address was already an ID-address or was resolved + /// in the mapping. + /// Returns an undefined address and `false` if the address was not an ID-address and not found + /// in the mapping. + /// Returns an error only if state was inconsistent. + pub fn resolve_address( + &self, + store: &BS, + addr: &Address, + ) -> Result, ActorError> { + if addr.protocol() == Protocol::ID { + return Ok(Some(*addr)); + } + let map = AddressMap::load(store, &self.address_map, DEFAULT_HAMT_CONFIG, "addresses")?; + let found = map.get(addr)?; + Ok(found.copied().map(Address::new_id)) + } +} diff --git a/fendermint/actors/init/src/types.rs b/fendermint/actors/init/src/types.rs new file mode 100644 index 0000000000..5d1895481a --- /dev/null +++ b/fendermint/actors/init/src/types.rs @@ -0,0 +1,41 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; + +/// Init actor Constructor parameters +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub network_name: String, +} + +/// Init actor Exec Params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ExecParams { + pub code_cid: Cid, + pub constructor_params: RawBytes, +} + +/// Init actor Exec Return value +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExecReturn { + /// ID based address for created actor + pub id_address: Address, + /// Reorg safe address for actor + pub robust_address: Address, +} + +/// Init actor Exec4 Params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct Exec4Params { + pub code_cid: Cid, + pub constructor_params: RawBytes, + pub subaddress: RawBytes, +} + +/// Init actor Exec4 Return value +pub type Exec4Return = ExecReturn; diff --git a/fendermint/actors/ipc_storage_config/Cargo.toml b/fendermint/actors/ipc_storage_config/Cargo.toml new file mode 100644 index 0000000000..ecc1c5838b --- /dev/null +++ b/fendermint/actors/ipc_storage_config/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "fendermint_actor_ipc_storage_config" +description = "Singleton actor for updateable ipc storage network parameters" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +fil_actors_runtime = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-traits = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["config"] } +serde = { workspace = true, features = ["derive"] } + +fendermint_actor_ipc_storage_config_shared = { path = "../ipc_storage_config/shared" } +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } + +[dev-dependencies] +fil_actors_evm_shared = { workspace = true } +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +hex-literal = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/ipc_storage_config/shared/Cargo.toml b/fendermint/actors/ipc_storage_config/shared/Cargo.toml new file mode 100644 index 0000000000..51b365f413 --- /dev/null +++ b/fendermint/actors/ipc_storage_config/shared/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "fendermint_actor_ipc_storage_config_shared" +description = "Shared resources for the ipc_storage config" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +fendermint_actor_blobs_shared = { path = "../../blobs/shared" } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/ipc_storage_config/shared/src/lib.rs b/fendermint/actors/ipc_storage_config/shared/src/lib.rs new file mode 100644 index 0000000000..1a72dd72bd --- /dev/null +++ b/fendermint/actors/ipc_storage_config/shared/src/lib.rs @@ -0,0 +1,103 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::TokenCreditRate; +use fil_actors_runtime::{deserialize_block, extract_send_result, runtime::Runtime, ActorError}; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{ + address::Address, clock::ChainEpoch, econ::TokenAmount, sys::SendFlags, ActorID, MethodNum, + METHOD_CONSTRUCTOR, +}; +use num_derive::FromPrimitive; +use num_traits::Zero; +use serde::{Deserialize, Serialize}; + +pub const IPC_STORAGE_CONFIG_ACTOR_ID: ActorID = 70; +pub const IPC_STORAGE_CONFIG_ACTOR_ADDR: Address = Address::new_id(IPC_STORAGE_CONFIG_ACTOR_ID); + +/// The updatable config. +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct IPCStorageConfig { + /// The total storage capacity of the subnet. + pub blob_capacity: u64, + /// The token to credit rate. + pub token_credit_rate: TokenCreditRate, + /// Epoch interval at which to debit all credit accounts. + pub blob_credit_debit_interval: ChainEpoch, + /// The minimum epoch duration a blob can be stored. + pub blob_min_ttl: ChainEpoch, + /// The default epoch duration a blob is stored. + pub blob_default_ttl: ChainEpoch, + /// Maximum number of blobs to delete in a single batch during debit. + pub blob_delete_batch_size: u64, + /// Maximum number of accounts to process in a single batch during debit. + pub account_debit_batch_size: u64, +} + +impl Default for IPCStorageConfig { + fn default() -> Self { + Self { + blob_capacity: 10 * 1024 * 1024 * 1024 * 1024, // 10 TiB + // 1 RECALL buys 1e18 credits ~ 1 RECALL buys 1e36 atto credits. + token_credit_rate: TokenCreditRate::from(10u128.pow(36)), + // This needs to be low enough to avoid out-of-gas errors. + // TODO: Stress test with max-throughput (~100 blobs/s) + blob_credit_debit_interval: ChainEpoch::from(60 * 10), // ~10 min + blob_min_ttl: ChainEpoch::from(60 * 60), // ~1 hour + blob_default_ttl: ChainEpoch::from(60 * 60 * 24), // ~1 day + blob_delete_batch_size: 100, + account_debit_batch_size: 1000, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SetAdminParams(pub Address); + +pub type SetConfigParams = IPCStorageConfig; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + SetAdmin = frc42_dispatch::method_hash!("SetAdmin"), + GetAdmin = frc42_dispatch::method_hash!("GetAdmin"), + SetConfig = frc42_dispatch::method_hash!("SetConfig"), + GetConfig = frc42_dispatch::method_hash!("GetConfig"), +} + +pub fn get_admin(rt: &impl Runtime) -> Result, ActorError> { + deserialize_block(extract_send_result(rt.send( + &IPC_STORAGE_CONFIG_ACTOR_ADDR, + Method::GetAdmin as MethodNum, + None, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + ))?) +} + +/// Requires caller is the ipc storage admin. +pub fn require_caller_is_admin(rt: &impl Runtime) -> Result<(), ActorError> { + let admin = get_admin(rt)?; + if admin.is_none() { + Err(ActorError::illegal_state( + "admin address not set".to_string(), + )) + } else { + Ok(rt.validate_immediate_caller_is(std::iter::once(&admin.unwrap()))?) + } +} + +pub fn get_config(rt: &impl Runtime) -> Result { + deserialize_block(extract_send_result(rt.send( + &IPC_STORAGE_CONFIG_ACTOR_ADDR, + Method::GetConfig as MethodNum, + None, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + ))?) +} diff --git a/fendermint/actors/ipc_storage_config/src/lib.rs b/fendermint/actors/ipc_storage_config/src/lib.rs new file mode 100644 index 0000000000..1c32da639a --- /dev/null +++ b/fendermint/actors/ipc_storage_config/src/lib.rs @@ -0,0 +1,623 @@ +// Copyright 2024 Textile +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::TokenCreditRate; +use fendermint_actor_ipc_storage_config_shared::{ + IPCStorageConfig, Method, SetAdminParams, SetConfigParams, +}; +use fil_actors_runtime::{ + actor_dispatch, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, SYSTEM_ACTOR_ADDR, +}; +use fvm_ipld_encoding::tuple::*; +use fvm_shared::{address::Address, bigint::BigUint, clock::ChainEpoch}; +use ipc_storage_actor_sdk::{ + evm::emit_evm_event, + util::{to_delegated_address, to_id_and_delegated_address}, +}; +use num_traits::Zero; + +use crate::sol_facade::{ConfigAdminSet, ConfigSet}; + +mod sol_facade; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(Actor); + +pub const ACTOR_NAME: &str = "ipc_storage_config"; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct State { + /// The admin address that is allowed to update the config. + pub admin: Option
, + /// The network configuration. + pub config: IPCStorageConfig, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct ConstructorParams { + initial_blob_capacity: u64, + initial_token_credit_rate: TokenCreditRate, + initial_blob_credit_debit_interval: ChainEpoch, + initial_blob_min_ttl: ChainEpoch, + initial_blob_default_ttl: ChainEpoch, + initial_blob_delete_batch_size: u64, + initial_account_debit_batch_size: u64, +} + +pub struct Actor {} + +impl Actor { + /// Creates the actor + pub fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&SYSTEM_ACTOR_ADDR))?; + let st = State { + admin: None, + config: IPCStorageConfig { + blob_capacity: params.initial_blob_capacity, + token_credit_rate: params.initial_token_credit_rate, + blob_credit_debit_interval: params.initial_blob_credit_debit_interval, + blob_min_ttl: params.initial_blob_min_ttl, + blob_default_ttl: params.initial_blob_default_ttl, + blob_delete_batch_size: params.initial_blob_delete_batch_size, + account_debit_batch_size: params.initial_account_debit_batch_size, + }, + }; + rt.create(&st) + } + + fn set_admin(rt: &impl Runtime, params: SetAdminParams) -> Result<(), ActorError> { + Self::ensure_update_allowed(rt)?; + + let (admin_id_addr, admin_delegated_addr) = to_id_and_delegated_address(rt, params.0)?; + + rt.transaction(|st: &mut State, _rt| { + st.admin = Some(admin_id_addr); + Ok(()) + })?; + + emit_evm_event(rt, ConfigAdminSet::new(admin_delegated_addr))?; + + Ok(()) + } + + fn get_admin(rt: &impl Runtime) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + match rt.state::().map(|s| s.admin)? { + Some(admin) => { + let admin = to_delegated_address(rt, admin)?; + Ok(Some(admin)) + } + None => Ok(None), + } + } + + fn set_config(rt: &impl Runtime, params: SetConfigParams) -> Result<(), ActorError> { + let admin_exists = Self::ensure_update_allowed(rt)?; + + if params.token_credit_rate.rate() <= &BigUint::zero() { + return Err(actor_error!( + illegal_argument, + "token credit rate must be positive" + )); + } + if params.blob_capacity == 0 { + return Err(actor_error!( + illegal_argument, + "blob capacity must be positive" + )); + } + if params.blob_credit_debit_interval <= 0 { + return Err(actor_error!( + illegal_argument, + "credit debit interval must be positive" + )); + } + if params.blob_min_ttl <= 0 { + return Err(actor_error!( + illegal_argument, + "minimum TTL must be positive" + )); + } + if params.blob_default_ttl <= 0 { + return Err(actor_error!( + illegal_argument, + "default TTL must be positive" + )); + } + if params.blob_default_ttl < params.blob_min_ttl { + return Err(actor_error!( + illegal_argument, + "default TTL must be greater than or equal to minimum TTL" + )); + } + if params.blob_delete_batch_size == 0 { + return Err(actor_error!( + illegal_argument, + "blob delete batch size must be positive" + )); + } + if params.account_debit_batch_size == 0 { + return Err(actor_error!( + illegal_argument, + "account debit batch size must be positive" + )); + } + + let (admin_id_addr, admin_delegated_addr) = if !admin_exists { + // The first caller becomes admin + let addrs = to_id_and_delegated_address(rt, rt.message().caller())?; + (Some(addrs.0), Some(addrs.1)) + } else { + (None, None) + }; + + rt.transaction(|st: &mut State, _rt| { + if let Some(admin) = admin_id_addr { + st.admin = Some(admin); + } + st.config = params.clone(); + Ok(()) + })?; + + if let Some(admin) = admin_delegated_addr { + emit_evm_event(rt, ConfigAdminSet::new(admin))?; + } + emit_evm_event( + rt, + ConfigSet { + blob_capacity: params.blob_capacity, + token_credit_rate: params.token_credit_rate, + blob_credit_debit_interval: params.blob_credit_debit_interval, + blob_min_ttl: params.blob_min_ttl, + blob_default_ttl: params.blob_default_ttl, + blob_delete_batch_size: params.blob_delete_batch_size, + account_debit_batch_size: params.account_debit_batch_size, + }, + )?; + + Ok(()) + } + + fn get_config(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + rt.state::().map(|s| s.config) + } + + /// Ensures that immediate caller is allowed to update the config. + /// Returns whether the admin exists. + fn ensure_update_allowed(rt: &impl Runtime) -> Result { + let st = rt.state::()?; + let admin_exists = if let Some(admin) = st.admin { + if let Some(admin_id) = rt.resolve_address(&admin) { + rt.validate_immediate_caller_is(std::iter::once(&Address::new_id(admin_id)))? + } else { + // This should not happen. + return Err(ActorError::forbidden(String::from( + "failed to resolve config admin id", + ))); + } + true + } else { + // The first caller becomes the admin + rt.validate_immediate_caller_accept_any()?; + false + }; + Ok(admin_exists) + } +} + +impl ActorCode for Actor { + type Methods = Method; + + fn name() -> &'static str { + ACTOR_NAME + } + + actor_dispatch! { + Constructor => constructor, + SetAdmin => set_admin, + GetAdmin => get_admin, + SetConfig => set_config, + GetConfig => get_config, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use fendermint_actor_ipc_storage_config_shared::{ + IPCStorageConfig, IPC_STORAGE_CONFIG_ACTOR_ID, + }; + use fil_actors_evm_shared::address::EthAddress; + use fil_actors_runtime::test_utils::{ + expect_empty, MockRuntime, ETHACCOUNT_ACTOR_CODE_ID, SYSTEM_ACTOR_CODE_ID, + }; + use fvm_ipld_encoding::ipld_block::IpldBlock; + use fvm_shared::error::ExitCode; + use ipc_storage_actor_sdk::evm::to_actor_event; + + pub fn construct_and_verify( + blob_capacity: u64, + token_credit_rate: TokenCreditRate, + blob_credit_debit_interval: i32, + initial_blob_min_ttl: ChainEpoch, + initial_blob_default_ttl: ChainEpoch, + ) -> MockRuntime { + let rt = MockRuntime { + receiver: Address::new_id(IPC_STORAGE_CONFIG_ACTOR_ID), + ..Default::default() + }; + + rt.set_caller(*SYSTEM_ACTOR_CODE_ID, SYSTEM_ACTOR_ADDR); + rt.expect_validate_caller_addr(vec![SYSTEM_ACTOR_ADDR]); + + let result = rt + .call::( + Method::Constructor as u64, + IpldBlock::serialize_cbor(&ConstructorParams { + initial_blob_capacity: blob_capacity, + initial_token_credit_rate: token_credit_rate, + initial_blob_credit_debit_interval: ChainEpoch::from( + blob_credit_debit_interval, + ), + initial_blob_min_ttl, + initial_blob_default_ttl, + initial_blob_delete_batch_size: 100, + initial_account_debit_batch_size: 100, + }) + .unwrap(), + ) + .unwrap(); + expect_empty(result); + rt.verify(); + rt.reset(); + + rt + } + + #[test] + fn test_get_initial_admin() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert!(admin.is_none()); + } + + #[test] + fn test_set_admin() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_any(); + let event = to_actor_event(ConfigAdminSet::new(f4_eth_addr)).unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(f4_eth_addr)).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert_eq!(admin, Some(f4_eth_addr)); + + // Reset admin + let new_id_addr = Address::new_id(111); + let new_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let new_f4_eth_addr = Address::new_delegated(10, &new_eth_addr.0).unwrap(); + rt.set_delegated_address(new_id_addr.id().unwrap(), new_f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); // current admin + rt.expect_validate_caller_addr(vec![id_addr]); + let event = to_actor_event(ConfigAdminSet::new(new_f4_eth_addr)).unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(new_f4_eth_addr)).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert_eq!(admin, Some(new_f4_eth_addr)); + } + + #[test] + fn test_set_admin_unauthorized() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_any(); + let event = to_actor_event(ConfigAdminSet::new(f4_eth_addr)).unwrap(); + rt.expect_emitted_event(event); + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(f4_eth_addr)).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + // Try to set again with a different caller + let unauthorized_id_addr = Address::new_id(111); + let unauthorized_eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000001" + )); + let unauthorized_f4_eth_addr = + Address::new_delegated(10, &unauthorized_eth_addr.0).unwrap(); + rt.set_delegated_address(unauthorized_id_addr.id().unwrap(), unauthorized_f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, unauthorized_id_addr); // unauthorized caller + rt.expect_validate_caller_addr(vec![id_addr]); // expect current admin + let result = rt.call::( + Method::SetAdmin as u64, + IpldBlock::serialize_cbor(&SetAdminParams(unauthorized_f4_eth_addr)).unwrap(), + ); + rt.verify(); + + assert!(result.is_err()); + assert_eq!(result.unwrap_err().exit_code(), ExitCode::USR_FORBIDDEN); + } + + #[test] + fn test_set_config() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + rt.expect_validate_caller_any(); + + let admin_event = to_actor_event(ConfigAdminSet::new(f4_eth_addr)).unwrap(); + rt.expect_emitted_event(admin_event); + + let config = IPCStorageConfig { + blob_capacity: 2048, + token_credit_rate: TokenCreditRate::from(10usize), + blob_credit_debit_interval: ChainEpoch::from(1800), + blob_min_ttl: ChainEpoch::from(2 * 60 * 60), + blob_default_ttl: ChainEpoch::from(24 * 60 * 60), + blob_delete_batch_size: 100, + account_debit_batch_size: 100, + }; + let config_event = to_actor_event(ConfigSet { + blob_capacity: config.blob_capacity, + token_credit_rate: config.token_credit_rate.clone(), + blob_credit_debit_interval: config.blob_credit_debit_interval, + blob_min_ttl: config.blob_min_ttl, + blob_default_ttl: config.blob_default_ttl, + blob_delete_batch_size: config.blob_delete_batch_size, + account_debit_batch_size: config.account_debit_batch_size, + }) + .unwrap(); + rt.expect_emitted_event(config_event); + + let result = rt.call::( + Method::SetConfig as u64, + IpldBlock::serialize_cbor(&config).unwrap(), + ); + assert!(result.is_ok()); + rt.verify(); + + rt.expect_validate_caller_any(); + let ipc_storage_config = rt + .call::(Method::GetConfig as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + assert_eq!(ipc_storage_config.blob_capacity, 2048); + assert_eq!( + ipc_storage_config.token_credit_rate, + TokenCreditRate::from(10usize) + ); + assert_eq!(ipc_storage_config.blob_credit_debit_interval, 1800); + assert_eq!( + ipc_storage_config.blob_min_ttl, + ChainEpoch::from(2 * 60 * 60) + ); + assert_eq!( + ipc_storage_config.blob_default_ttl, + ChainEpoch::from(24 * 60 * 60) + ); + + rt.expect_validate_caller_any(); + let admin = rt + .call::(Method::GetAdmin as u64, None) + .unwrap() + .unwrap() + .deserialize::>() + .unwrap(); + rt.verify(); + + assert_eq!(admin, Some(f4_eth_addr)); + } + + #[test] + fn test_set_invalid_config() { + struct TestCase { + name: &'static str, + config: IPCStorageConfig, + } + + let valid_config = IPCStorageConfig { + blob_capacity: 2048, + token_credit_rate: TokenCreditRate::from(10usize), + blob_credit_debit_interval: ChainEpoch::from(1800), + blob_min_ttl: ChainEpoch::from(2 * 60 * 60), + blob_default_ttl: ChainEpoch::from(24 * 60 * 60), + blob_delete_batch_size: 100, + account_debit_batch_size: 100, + }; + + let test_cases = vec![ + // Token credit rate validation + TestCase { + name: "token credit rate cannot be zero", + config: IPCStorageConfig { + token_credit_rate: TokenCreditRate::from(0usize), + ..valid_config.clone() + }, + }, + // Blob capacity validation + TestCase { + name: "blob capacity cannot be zero", + config: IPCStorageConfig { + blob_capacity: 0, + ..valid_config.clone() + }, + }, + // Credit debit interval validation + TestCase { + name: "blob credit debit interval cannot be zero", + config: IPCStorageConfig { + blob_credit_debit_interval: 0, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob credit debit interval cannot be negative", + config: IPCStorageConfig { + blob_credit_debit_interval: -1, + ..valid_config.clone() + }, + }, + // TTL validations + TestCase { + name: "blob min ttl cannot be negative", + config: IPCStorageConfig { + blob_min_ttl: -1, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob min ttl cannot be zero", + config: IPCStorageConfig { + blob_min_ttl: 0, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob default ttl must be greater than or equal to min ttl", + config: IPCStorageConfig { + blob_min_ttl: 4 * 60 * 60, + blob_default_ttl: 2 * 60 * 60, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob default ttl cannot be zero", + config: IPCStorageConfig { + blob_default_ttl: 0, + ..valid_config.clone() + }, + }, + TestCase { + name: "blob default ttl cannot be negative", + config: IPCStorageConfig { + blob_default_ttl: -1, + ..valid_config.clone() + }, + }, + ]; + + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + let id_addr = Address::new_id(110); + let eth_addr = EthAddress(hex_literal::hex!( + "CAFEB0BA00000000000000000000000000000000" + )); + let f4_eth_addr = Address::new_delegated(10, ð_addr.0).unwrap(); + rt.set_delegated_address(id_addr.id().unwrap(), f4_eth_addr); + + rt.set_caller(*ETHACCOUNT_ACTOR_CODE_ID, id_addr); + + // Now test all invalid configurations + for test_case in test_cases { + rt.expect_validate_caller_any(); + let result = rt.call::( + Method::SetConfig as u64, + IpldBlock::serialize_cbor(&test_case.config).unwrap(), + ); + rt.verify(); + assert!( + result.is_err(), + "expected case \"{}\" to fail but it succeeded", + test_case.name + ); + } + } + + #[test] + fn test_get_config() { + let rt = construct_and_verify(1024, TokenCreditRate::from(5usize), 3600, 3600, 3600); + + rt.expect_validate_caller_any(); + let ipc_storage_config = rt + .call::(Method::GetConfig as u64, None) + .unwrap() + .unwrap() + .deserialize::() + .unwrap(); + rt.verify(); + + assert_eq!(ipc_storage_config.blob_capacity, 1024); + assert_eq!( + ipc_storage_config.token_credit_rate, + TokenCreditRate::from(5usize) + ); + assert_eq!(ipc_storage_config.blob_credit_debit_interval, 3600); + assert_eq!(ipc_storage_config.blob_min_ttl, 3600); + assert_eq!(ipc_storage_config.blob_default_ttl, 3600); + } +} diff --git a/fendermint/actors/ipc_storage_config/src/sol_facade.rs b/fendermint/actors/ipc_storage_config/src/sol_facade.rs new file mode 100644 index 0000000000..3f633d7e7e --- /dev/null +++ b/fendermint/actors/ipc_storage_config/src/sol_facade.rs @@ -0,0 +1,54 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fendermint_actor_blobs_shared::credit::TokenCreditRate; +use fvm_shared::{address::Address, clock::ChainEpoch}; +use ipc_storage_actor_sdk::evm::TryIntoEVMEvent; +use ipc_storage_sol_facade::{ + config as sol, + primitives::U256, + types::{BigUintWrapper, H160}, +}; + +pub struct ConfigAdminSet { + pub admin: Address, +} +impl ConfigAdminSet { + pub fn new(admin: Address) -> Self { + Self { admin } + } +} +impl TryIntoEVMEvent for ConfigAdminSet { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let admin: H160 = self.admin.try_into()?; + Ok(sol::Events::ConfigAdminSet(sol::ConfigAdminSet { + admin: admin.into(), + })) + } +} + +pub struct ConfigSet { + pub blob_capacity: u64, + pub token_credit_rate: TokenCreditRate, + pub blob_credit_debit_interval: ChainEpoch, + pub blob_min_ttl: ChainEpoch, + pub blob_default_ttl: ChainEpoch, + pub blob_delete_batch_size: u64, + pub account_debit_batch_size: u64, +} +impl TryIntoEVMEvent for ConfigSet { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::ConfigSet(sol::ConfigSet { + blobCapacity: U256::from(self.blob_capacity), + tokenCreditRate: BigUintWrapper(self.token_credit_rate.rate().clone()).into(), + blobCreditDebitInterval: U256::from(self.blob_credit_debit_interval), + blobMinTtl: U256::from(self.blob_min_ttl), + blobDefaultTtl: U256::from(self.blob_default_ttl), + blobDeleteBatchSize: U256::from(self.blob_delete_batch_size), + accountDebitBatchSize: U256::from(self.account_debit_batch_size), + })) + } +} diff --git a/fendermint/actors/machine/Cargo.toml b/fendermint/actors/machine/Cargo.toml new file mode 100644 index 0000000000..33eda82eec --- /dev/null +++ b/fendermint/actors/machine/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "fendermint_actor_machine" +description = "Shared types for ADM machine actors" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +fil_actors_runtime = { workspace = true } +fil_actor_adm = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["machine"] } +serde = { workspace = true, features = ["derive"] } + +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/machine/src/lib.rs b/fendermint/actors/machine/src/lib.rs new file mode 100644 index 0000000000..ae92f69c93 --- /dev/null +++ b/fendermint/actors/machine/src/lib.rs @@ -0,0 +1,167 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +pub use fil_actor_adm::Kind; +use fil_actors_runtime::{ + actor_error, runtime::Runtime, ActorError, FIRST_EXPORTED_METHOD_NUMBER, INIT_ACTOR_ADDR, +}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{ipld_block::IpldBlock, tuple::*}; +pub use fvm_shared::METHOD_CONSTRUCTOR; +use fvm_shared::{address::Address, MethodNum}; +use ipc_storage_actor_sdk::constants::ADM_ACTOR_ADDR; +use ipc_storage_actor_sdk::{ + evm::emit_evm_event, + util::{to_delegated_address, to_id_address, to_id_and_delegated_address}, +}; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::sol_facade::{MachineCreated, MachineInitialized}; + +pub mod sol_facade; + +/// Params for creating a machine. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + /// The machine owner ID address. + pub owner: Address, + /// User-defined metadata. + pub metadata: HashMap, +} + +/// Params for initializing a machine. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct InitParams { + /// The machine ID address. + pub address: Address, +} + +/// Machine initialization method number. +pub const INIT_METHOD: MethodNum = 2; +/// Get machine address method number. +pub const GET_ADDRESS_METHOD: MethodNum = frc42_dispatch::method_hash!("GetAddress"); +/// Get machine metadata method number. +pub const GET_METADATA_METHOD: MethodNum = frc42_dispatch::method_hash!("GetMetadata"); + +// TODO: Add method for changing owner from ADM actor. +pub trait MachineActor { + type State: MachineState + Serialize + DeserializeOwned; + + /// Machine actor constructor. + fn constructor(rt: &impl Runtime, params: ConstructorParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&INIT_ACTOR_ADDR))?; + + let (id_addr, delegated_addr) = to_id_and_delegated_address(rt, params.owner)?; + + let state = Self::State::new(rt.store(), id_addr, params.metadata)?; + rt.create(&state)?; + + emit_evm_event( + rt, + MachineCreated::new(state.kind(), delegated_addr, &state.metadata()), + ) + } + + /// Initializes the machine with its ID address. + fn init(rt: &impl Runtime, params: InitParams) -> Result<(), ActorError> { + rt.validate_immediate_caller_is(std::iter::once(&ADM_ACTOR_ADDR))?; + + let id_addr = to_id_address(rt, params.address, false)?; + + let kind = rt.transaction(|st: &mut Self::State, _| { + st.init(id_addr)?; + Ok(st.kind()) + })?; + + emit_evm_event(rt, MachineInitialized::new(kind, id_addr)) + } + + /// Get machine robust address. + fn get_address(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st = rt.state::()?; + st.address().get() + } + + /// Get machine metadata. + fn get_metadata(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st = rt.state::()?; + let owner = st.owner(); + let address = to_delegated_address(rt, owner).unwrap_or(owner); + Ok(Metadata { + owner: address, + kind: st.kind(), + metadata: st.metadata(), + }) + } + + fn fallback( + rt: &impl Runtime, + method: MethodNum, + _: Option, + ) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + if method >= FIRST_EXPORTED_METHOD_NUMBER { + Ok(None) + } else { + Err(actor_error!(unhandled_message; "invalid method: {}", method)) + } + } +} + +/// Machine metadata. +#[derive(Debug, Clone, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Metadata { + /// Machine kind. + pub kind: Kind, + /// Machine owner ID address. + pub owner: Address, + /// User-defined data. + pub metadata: HashMap, +} + +/// Trait that must be implemented by machine state. +pub trait MachineState { + fn new( + store: &BS, + owner: Address, + metadata: HashMap, + ) -> Result + where + Self: Sized; + fn init(&mut self, address: Address) -> Result<(), ActorError>; + fn address(&self) -> MachineAddress; + fn kind(&self) -> Kind; + fn owner(&self) -> Address; + fn metadata(&self) -> HashMap; +} + +/// Machine address wrapper. +#[derive(Debug, Clone, Default, Serialize_tuple, Deserialize_tuple)] +pub struct MachineAddress { + address: Option
, +} + +impl MachineAddress { + /// Get machine address. + pub fn get(&self) -> Result { + self.address.ok_or(ActorError::illegal_state(String::from( + "machine address not set", + ))) + } + + /// Set machine address. This can only be called once. + pub fn set(&mut self, address: Address) -> Result<(), ActorError> { + if self.address.is_some() { + return Err(ActorError::forbidden(String::from( + "machine address already set", + ))); + } + self.address = Some(address); + Ok(()) + } +} diff --git a/fendermint/actors/machine/src/sol_facade.rs b/fendermint/actors/machine/src/sol_facade.rs new file mode 100644 index 0000000000..02b0f781d7 --- /dev/null +++ b/fendermint/actors/machine/src/sol_facade.rs @@ -0,0 +1,60 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use fil_actor_adm::Kind; +use fvm_shared::address::Address; +use ipc_storage_actor_sdk::evm::TryIntoEVMEvent; +use ipc_storage_sol_facade::{machine as sol, types::H160}; + +pub struct MachineCreated<'a> { + kind: Kind, + owner: Address, + metadata: &'a HashMap, +} +impl<'a> MachineCreated<'a> { + pub fn new(kind: Kind, owner: Address, metadata: &'a HashMap) -> Self { + Self { + kind, + owner, + metadata, + } + } +} +impl TryIntoEVMEvent for MachineCreated<'_> { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let owner: H160 = self.owner.try_into()?; + let metadata = fvm_ipld_encoding::to_vec(self.metadata)?; + Ok(sol::Events::MachineCreated(sol::MachineCreated { + kind: self.kind as u8, + owner: owner.into(), + metadata: metadata.into(), + })) + } +} + +pub struct MachineInitialized { + kind: Kind, + machine_address: Address, +} +impl MachineInitialized { + pub fn new(kind: Kind, machine_address: Address) -> Self { + Self { + kind, + machine_address, + } + } +} +impl TryIntoEVMEvent for MachineInitialized { + type Target = sol::Events; + fn try_into_evm_event(self) -> Result { + let machine_address: H160 = self.machine_address.try_into()?; + Ok(sol::Events::MachineInitialized(sol::MachineInitialized { + kind: self.kind as u8, + machineAddress: machine_address.into(), + })) + } +} diff --git a/fendermint/actors/timehub/Cargo.toml b/fendermint/actors/timehub/Cargo.toml new file mode 100644 index 0000000000..16a027cff0 --- /dev/null +++ b/fendermint/actors/timehub/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "fendermint_actor_timehub" +description = "Actor for timestamping data hashes" +license.workspace = true +edition.workspace = true +authors.workspace = true +version = "0.1.0" + +[lib] +## lib is necessary for integration tests +## cdylib is necessary for Wasm build +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true, default-features = false } +multihash-codetable = { workspace = true } +fil_actors_runtime = { workspace = true } +frc42_dispatch = { workspace = true } +fvm_ipld_amt = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } +num-derive = { workspace = true } +num-traits = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = ["timehub"] } +serde = { workspace = true, features = ["derive"] } +tracing = { workspace = true, features = ["log"] } + +fendermint_actor_blobs_shared = { path = "../blobs/shared" } +fendermint_actor_machine = { path = "../machine" } +ipc_storage_actor_sdk = { path = "../../../ipc-storage/actor_sdk" } + +[dev-dependencies] +fil_actors_runtime = { workspace = true, features = ["test_utils"] } +fil_actors_evm_shared = { workspace = true } +hex-literal = { workspace = true } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] diff --git a/fendermint/actors/timehub/src/actor.rs b/fendermint/actors/timehub/src/actor.rs new file mode 100644 index 0000000000..1832d88e04 --- /dev/null +++ b/fendermint/actors/timehub/src/actor.rs @@ -0,0 +1,162 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fendermint_actor_blobs_shared::sdk::has_credit_approval; +use fendermint_actor_machine::MachineActor; +use fil_actors_runtime::{ + actor_dispatch_unrestricted, actor_error, + runtime::{ActorCode, Runtime}, + ActorError, +}; +use ipc_storage_actor_sdk::evm::emit_evm_event; +use ipc_storage_actor_sdk::evm::{InputData, InvokeContractParams, InvokeContractReturn}; +use ipc_storage_sol_facade::timehub::Calls; +use tracing::debug; + +use crate::sol_facade::{AbiCall, EventPushed}; +use crate::{sol_facade, Leaf, Method, PushParams, PushReturn, State, TIMEHUB_ACTOR_NAME}; + +#[cfg(feature = "fil-actor")] +fil_actors_runtime::wasm_trampoline!(TimehubActor); + +pub struct TimehubActor; + +// Raw type persisted in the store. +// This avoids using CID so that the store does not try to validate or resolve it. +type RawLeaf = (u64, Vec); + +impl TimehubActor { + fn push(rt: &impl Runtime, params: PushParams) -> Result { + rt.validate_immediate_caller_accept_any()?; + + // Check access control. + // Either the caller needs to be the Timehub owner, or the owner needs to have given a + // credit approval to the caller. + let state = rt.state::()?; + let owner = state.owner; + let from = rt.message().caller(); + + let actor_address = state.address.get()?; + if !has_credit_approval(rt, owner, from)? { + return Err(actor_error!( + forbidden; + format!("Unauthorized: missing credit approval from Timehub owner {} to {} for Timehub {}", owner, from, actor_address))); + } + + // Decode the raw bytes as a Cid and report any errors. + // However, we pass opaque bytes to the store as it tries to validate and resolve any CID + // it stores. + let cid = Cid::try_from(params.0.as_slice()).map_err(|_err| { + actor_error!(illegal_argument; + "data must be valid CID bytes") + })?; + let timestamp = rt.tipset_timestamp(); + let data: RawLeaf = (timestamp, params.0); + + let ret = rt.transaction(|st: &mut State, rt| st.push(rt.store(), data))?; + + emit_evm_event(rt, EventPushed::new(ret.index, timestamp, cid))?; + + Ok(ret) + } + + fn get_leaf_at(rt: &impl Runtime, index: u64) -> Result, ActorError> { + debug!(index, "get_leaf_at"); + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + // Decode leaf as timestamp and raw bytes. Then decode as a CID + let leaf: Option = st.get_leaf_at(rt.store(), index)?; + leaf.map(|(timestamp, bytes)| -> Result { + Ok(Leaf { + timestamp, + witnessed: Cid::try_from(bytes).map_err( + |_err| actor_error!(illegal_argument; "internal bytes are not a valid CID"), + )?, + }) + }) + .transpose() + } + + fn get_root(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + st.get_root(rt.store()) + } + + fn get_peaks(rt: &impl Runtime) -> Result, ActorError> { + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + st.get_peaks(rt.store()) + } + + fn get_count(rt: &impl Runtime) -> Result { + rt.validate_immediate_caller_accept_any()?; + let st: State = rt.state()?; + Ok(st.leaf_count) + } + + fn invoke_contract( + rt: &impl Runtime, + params: InvokeContractParams, + ) -> Result { + let input_data: InputData = params.try_into()?; + if sol_facade::can_handle(&input_data) { + let output_data: Vec = match sol_facade::parse_input(&input_data)? { + Calls::getCount(call) => { + let count = Self::get_count(rt)?; + call.returns(count) + } + Calls::getLeafAt(call) => { + let params = call.params(); + let push_return = Self::get_leaf_at(rt, params)?; + call.returns(push_return) + } + Calls::getPeaks(call) => { + let peaks = Self::get_peaks(rt)?; + call.returns(peaks) + } + Calls::getRoot(call) => { + let root = Self::get_root(rt)?; + call.returns(root) + } + Calls::push(call) => { + let params = call.params(); + let push_return = Self::push(rt, params)?; + call.returns(push_return) + } + }; + Ok(InvokeContractReturn { output_data }) + } else { + Err(actor_error!(illegal_argument, "invalid call".to_string())) + } + } +} + +impl MachineActor for TimehubActor { + type State = State; +} + +impl ActorCode for TimehubActor { + type Methods = Method; + + fn name() -> &'static str { + TIMEHUB_ACTOR_NAME + } + + actor_dispatch_unrestricted! { + Constructor => constructor, + Init => init, + GetAddress => get_address, + GetMetadata => get_metadata, + Push => push, + Get => get_leaf_at, + Root => get_root, + Peaks => get_peaks, + Count => get_count, + // EVM interop + InvokeContract => invoke_contract, + _ => fallback, + } +} diff --git a/fendermint/actors/timehub/src/lib.rs b/fendermint/actors/timehub/src/lib.rs new file mode 100644 index 0000000000..8bf738f1dd --- /dev/null +++ b/fendermint/actors/timehub/src/lib.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +mod actor; +mod shared; +mod sol_facade; + +pub use shared::*; diff --git a/fendermint/actors/timehub/src/shared.rs b/fendermint/actors/timehub/src/shared.rs new file mode 100644 index 0000000000..c9b30eeadd --- /dev/null +++ b/fendermint/actors/timehub/src/shared.rs @@ -0,0 +1,528 @@ +// Copyright 2025 Recall Contributors +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::HashMap; + +use cid::Cid; +use fendermint_actor_machine::{ + Kind, MachineAddress, MachineState, GET_ADDRESS_METHOD, GET_METADATA_METHOD, INIT_METHOD, + METHOD_CONSTRUCTOR, +}; +use fil_actors_runtime::ActorError; +use fvm_ipld_amt::Amt; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{strict_bytes, to_vec, tuple::*, CborStore, DAG_CBOR}; +use fvm_shared::address::Address; +use multihash_codetable::{Code, MultihashDigest}; +use num_derive::FromPrimitive; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +pub const TIMEHUB_ACTOR_NAME: &str = "timehub"; +const BIT_WIDTH: u32 = 3; + +fn state_error(e: fvm_ipld_amt::Error) -> ActorError { + ActorError::illegal_state(e.to_string()) +} + +fn store_error(e: anyhow::Error) -> ActorError { + ActorError::illegal_state(e.to_string()) +} + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Init = INIT_METHOD, + GetAddress = GET_ADDRESS_METHOD, + GetMetadata = GET_METADATA_METHOD, + Push = frc42_dispatch::method_hash!("Push"), + Get = frc42_dispatch::method_hash!("Get"), + Root = frc42_dispatch::method_hash!("Root"), + Peaks = frc42_dispatch::method_hash!("Peaks"), + Count = frc42_dispatch::method_hash!("Count"), + // EVM Interop + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), +} + +/// Bytes of a CID to add. +#[derive(Serialize, Deserialize)] +#[serde(transparent)] +pub struct PushParams(#[serde(with = "strict_bytes")] pub Vec); + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct PushReturn { + /// The new root of the timehub MMR after the object was pushed into it. + pub root: Cid, + /// The index of the object that was just pushed into the timehub. + pub index: u64, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Leaf { + /// Timestamp of the witness in seconds since the UNIX epoch + pub timestamp: u64, + /// Witnessed CID + pub witnessed: Cid, +} + +/// Compute the hash of a pair of CIDs. +/// The hash is the CID of a new block containing the concatenation of the two CIDs. +/// We do not include the index of the element(s) because incoming data should already be "nonced". +fn hash_pair(left: Option<&Cid>, right: Option<&Cid>) -> anyhow::Result { + if let (Some(left), Some(right)) = (left, right) { + // Encode the CIDs into a binary format + let data = to_vec(&[left, right])?; + // Compute the CID for the block + let mh_code = Code::Blake2b256; + let mh = mh_code.digest(&data); + let cid = Cid::new_v1(DAG_CBOR, mh); + Ok(cid) + } else { + Err(ActorError::illegal_argument( + "hash_pair requires two CIDs".into(), + )) + } +} + +/// Compute and store the hash of a pair of CIDs. +/// The hash is the CID of a new block containing the concatenation of the two CIDs. +/// We do not include the index of the element(s) because incoming data should already be "nonced". +fn hash_and_put_pair( + store: &BS, + left: Option<&Cid>, + right: Option<&Cid>, +) -> anyhow::Result { + if let (Some(left), Some(right)) = (left, right) { + // Compute the CID for the block + store + .put_cbor(&[left, right], Code::Blake2b256) + .map_err(store_error) + } else { + Err(ActorError::illegal_argument( + "hash_pair requires two CIDs".into(), + )) + } +} + +/// Return the new peaks of the timehub after adding `new_leaf`. +fn push( + store: &BS, + leaf_count: u64, + peaks: &mut Amt, + obj: S, +) -> anyhow::Result { + // Create new leaf + let leaf = store + .put_cbor(&obj, Code::Blake2b256) + .map_err(store_error)?; + // Push the new leaf onto the peaks + peaks.set(peaks.count(), leaf).map_err(state_error)?; + // Count trailing ones in binary representation of the previous leaf_count + // This works because adding a leaf fills the next available spot, + // and the binary representation of this index will have trailing ones + // where merges are required. + let mut new_peaks = (!leaf_count).trailing_zeros(); + while new_peaks > 0 { + // Pop the last two peaks and push their hash + let right = peaks.delete(peaks.count() - 1).map_err(state_error)?; + let left = peaks.delete(peaks.count() - 1).map_err(state_error)?; + // Push the new peak onto the peak array + peaks + .set( + peaks.count(), + hash_and_put_pair(store, left.as_ref(), right.as_ref())?, + ) + .map_err(state_error)?; + new_peaks -= 1; + } + peaks.flush().map_err(state_error) +} + +/// Collect the peaks and combine to compute the root commitment. +fn bag_peaks(peaks: &Amt) -> anyhow::Result { + let peaks_count = peaks.count(); + // Handle special cases where we have no peaks or only one peak + if peaks_count == 0 { + return Ok(Cid::default()); + } + // If there is only one leaf element, we simply "promote" that to the root peak + if peaks_count == 1 { + return Ok(peaks.get(0).map_err(state_error)?.unwrap().to_owned()); + } + // Walk backward through the peaks, combining them pairwise + let mut root = hash_pair( + peaks.get(peaks_count - 2).map_err(state_error)?, + peaks.get(peaks_count - 1).map_err(state_error)?, + )?; + for i in 2..peaks_count { + root = hash_pair( + peaks.get(peaks_count - 1 - i).map_err(state_error)?, + Some(&root), + )?; + } + Ok(root) +} + +/// Given the size of the MMR and an index into the MMR, returns a tuple where the first element +/// represents the path through the subtree that the leaf node lives in. +/// The second element represents the index of the peak containing the subtree that the leaf node +/// lives in. +fn path_for_eigen_root(leaf_index: u64, leaf_count: u64) -> anyhow::Result> { + // Ensure `leaf_index` is within bounds. + if leaf_index >= leaf_count { + return Ok(None); + } + // XOR turns matching bits into zeros and differing bits into ones, so to determine when + // the two "paths" converge, we simply look for the most significant 1 bit... + let diff = leaf_index ^ leaf_count; + // ...and then merge height of `leaf_index` and `leaf_count` occurs at ⌊log2(x ⊕ y)⌋ + let eigentree_height = u64::BITS - diff.leading_zeros() - 1; + let merge_height = 1 << eigentree_height; + // Compute a bitmask (all the lower bits set to 1) + let bitmask = merge_height - 1; + // The Hamming weight of leaf_count is the number of eigentrees in the structure. + let eigentree_count = leaf_count.count_ones(); + // Isolates the lower bits of leaf_count up to the merge_height, and count the one-bits. + // This is essentially the offset to the eigentree containing leaf_index + let offset = (leaf_count & bitmask).count_ones(); + // The index is simply the total eigentree count minus the offset (minus one) + let eigen_index = eigentree_count - offset - 1; + // Now that we have the offset, we need to determine the path within the local eigentree + let local_offset = leaf_index & bitmask; + // The local_index is the local_offset plus the merge_height for the local eigentree + let local_path = local_offset + merge_height; + Ok(Some((local_path, eigen_index as u64))) +} + +/// Returns None when the index doesn't point to a leaf. +/// If the index is valid, it will return a value or error. +fn get_at( + store: &BS, + leaf_index: u64, + leaf_count: u64, + peaks: &Amt, +) -> anyhow::Result> { + let (path, eigen_index) = match path_for_eigen_root(leaf_index, leaf_count)? { + None => return Ok(None), + Some(res) => res, + }; + let cid = match peaks.get(eigen_index)? { + Some(cid) => cid, + None => return Ok(None), + }; + // Special case where eigentree has a height of one + if path == 1 { + return Ok(Some(store.get_cbor::(cid)?.ok_or_else(|| { + anyhow::anyhow!("failed to get leaf for cid {}", cid) + })?)); + } + + let mut pair = match store.get_cbor::<[Cid; 2]>(cid)? { + Some(value) => value, + None => anyhow::bail!("failed to get eigentree root node for cid {}", cid), + }; + + let leading_zeros = path.leading_zeros(); + let significant_bits = 64 - leading_zeros; + + // Iterate over each bit from the most significant bit to the least + for i in 1..(significant_bits - 1) { + let bit = ((path >> (significant_bits - i - 1)) & 1) as usize; + let cid = &pair[bit]; + pair = store.get_cbor(cid)?.ok_or_else(|| { + anyhow::anyhow!("failed to get eigentree intermediate node for cid {}", cid) + })?; + } + + let bit = (path & 1) as usize; + let cid = &pair[bit]; + let leaf = store + .get_cbor::(cid)? + .ok_or_else(|| anyhow::anyhow!("failed to get leaf for cid {}", cid))?; + + Ok(Some(leaf)) +} + +/// The state represents an MMR with peaks stored in an AMT +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The machine address set by the init actor. + pub address: MachineAddress, + /// The machine rubust owner address. + pub owner: Address, + /// Root of the AMT that is storing the peaks of the MMR + pub peaks: Cid, + /// Number of leaf nodes in the timehub MMR. + pub leaf_count: u64, + /// User-defined metadata. + pub metadata: HashMap, +} + +impl MachineState for State { + fn new( + store: &BS, + owner: Address, + metadata: HashMap, + ) -> anyhow::Result { + let peaks = match Amt::<(), _>::new_with_bit_width(store, BIT_WIDTH).flush() { + Ok(cid) => cid, + Err(e) => { + return Err(ActorError::illegal_state(format!( + "timehub actor failed to create empty Amt: {}", + e + ))); + } + }; + Ok(Self { + address: Default::default(), + owner, + peaks, + leaf_count: 0, + metadata, + }) + } + + fn init(&mut self, address: Address) -> anyhow::Result<(), ActorError> { + self.address.set(address) + } + + fn address(&self) -> MachineAddress { + self.address.clone() + } + + fn kind(&self) -> Kind { + Kind::Timehub + } + + fn owner(&self) -> Address { + self.owner + } + + fn metadata(&self) -> HashMap { + self.metadata.clone() + } +} + +impl State { + pub fn peak_count(&self) -> u32 { + self.leaf_count.count_ones() + } + + pub fn leaf_count(&self) -> u64 { + self.leaf_count + } + + pub fn push( + &mut self, + store: &BS, + obj: S, + ) -> anyhow::Result { + let mut amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + self.peaks = push(store, self.leaf_count, &mut amt, obj)?; + self.leaf_count += 1; + + let root = bag_peaks(&amt)?; + Ok(PushReturn { + root, + index: self.leaf_count - 1, + }) + } + + pub fn get_root(&self, store: &BS) -> anyhow::Result { + let amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + bag_peaks(&amt) + } + + pub fn get_peaks(&self, store: &BS) -> anyhow::Result, ActorError> { + let amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + let mut peaks = Vec::new(); + amt.for_each(|_, cid| { + peaks.push(cid.to_owned()); + Ok(()) + }) + .map_err(state_error)?; + Ok(peaks) + } + + pub fn get_leaf_at( + &self, + store: &BS, + index: u64, + ) -> anyhow::Result, ActorError> { + let amt = Amt::::load(&self.peaks, store).map_err(state_error)?; + get_at::(store, index, self.leaf_count, &amt) + .map_err(|e| ActorError::serialization(e.to_string())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + #[test] + fn test_constructor() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let state = State::new(&store, Address::new_id(100), HashMap::new()); + assert!(state.is_ok()); + let state = state.unwrap(); + assert_eq!( + state.peaks, + Cid::from_str("bafy2bzacedijw74yui7otvo63nfl3hdq2vdzuy7wx2tnptwed6zml4vvz7wee") + .unwrap() + ); + assert_eq!(state.leaf_count(), 0); + } + + #[test] + fn test_hash_and_put_pair() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let obj1 = vec![1, 2, 3]; + let obj2 = vec![1, 2, 3]; + let cid1 = state.push(&store, obj1).expect("push1 failed").root; + let cid2 = state.push(&store, obj2).expect("push2 failed").root; + + let pair_cid = + hash_and_put_pair(&store, Some(&cid1), Some(&cid2)).expect("hash_and_put_pair failed"); + let merkle_node = store + .get_cbor::<[Cid; 2]>(&pair_cid) + .expect("get_cbor failed") + .expect("get_cbor returned None"); + let expected = [cid1, cid2]; + assert_eq!(merkle_node, expected); + } + + #[test] + fn test_hash_pair() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + let obj1 = vec![1, 2, 3]; + let obj2 = vec![1, 2, 3]; + let cid1 = state.push(&store, obj1).expect("push1 failed").root; + let cid2 = state.push(&store, obj2).expect("push2 failed").root; + + // Compare hash_pair and hash_and_put_pair and make sure they result in the same CID. + let hash1 = hash_pair(Some(&cid1), Some(&cid2)).expect("hash_pair failed"); + let hash2 = + hash_and_put_pair(&store, Some(&cid1), Some(&cid2)).expect("hash_and_put_pair failed"); + assert_eq!(hash1, hash2); + } + + #[test] + fn test_push_simple() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let obj = vec![1, 2, 3]; + let res = state.push(&store, obj).expect("push failed"); + assert_eq!(res.root, state.get_root(&store).expect("get_root failed")); + assert_eq!(res.index, 0); + assert_eq!(state.leaf_count(), 1); + } + + #[test] + fn test_get_peaks() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let obj = vec![1, 2, 3]; + assert!(state.push(&store, obj).is_ok()); + assert_eq!(state.leaf_count(), 1); + let peaks = state.get_peaks(&store); + assert!(peaks.is_ok()); + let peaks = peaks.unwrap(); + assert_eq!(peaks.len(), 1); + assert_eq!( + peaks[0], + Cid::from_str("bafy2bzacebltuz74cvzod3x7cx3eledj4gn5vjcer7znymoq56htf2e3cclok") + .unwrap() + ); + } + + #[test] + fn test_bag_peaks() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + let mut root = Cid::default(); + for i in 1..=11 { + let res = state.push(&store, vec![i]).unwrap(); + root = res.root; + assert_eq!(res.index, i - 1); + } + let peaks = state.get_peaks(&store).unwrap(); + assert_eq!(peaks.len(), 3); + assert_eq!(state.leaf_count(), 11); + assert_eq!(root, state.get_root(&store).expect("get_root failed")); + } + + #[test] + fn test_get_obj_basic() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + + state.push(&store, vec![0]).unwrap(); + assert_eq!(state.peak_count(), 1); + assert_eq!(state.leaf_count(), 1); + let item0 = state + .get_leaf_at::<_, Vec>(&store, 0u64) + .unwrap() + .unwrap(); + assert_eq!(item0, vec![0]); + + state.push(&store, vec![1]).unwrap(); + assert_eq!(state.peak_count(), 1); + assert_eq!(state.leaf_count(), 2); + let item0 = state + .get_leaf_at::<_, Vec>(&store, 0u64) + .unwrap() + .unwrap(); + let item1 = state + .get_leaf_at::<_, Vec>(&store, 1u64) + .unwrap() + .unwrap(); + assert_eq!(item0, vec![0]); + assert_eq!(item1, vec![1]); + + state.push(&store, vec![2]).unwrap(); + assert_eq!(state.peak_count(), 2); + assert_eq!(state.leaf_count(), 3); + let item0 = state + .get_leaf_at::<_, Vec>(&store, 0u64) + .unwrap() + .unwrap(); + let item1 = state + .get_leaf_at::<_, Vec>(&store, 1u64) + .unwrap() + .unwrap(); + let item2 = state + .get_leaf_at::<_, Vec>(&store, 2u64) + .unwrap() + .unwrap(); + assert_eq!(item0, vec![0]); + assert_eq!(item1, vec![1]); + assert_eq!(item2, vec![2]); + } + + #[test] + fn test_get_obj() { + let store = fvm_ipld_blockstore::MemoryBlockstore::default(); + let mut state = State::new(&store, Address::new_id(100), HashMap::new()).unwrap(); + for i in 0..31 { + state.push(&store, vec![i]).unwrap(); + assert_eq!(state.leaf_count(), i + 1); + + // As more items are added to the timehub, ensure each item remains gettable at + // each phase of the growth of the inner tree structures. + for j in 0..i { + let item = state + .get_leaf_at::<_, Vec>(&store, j) + .unwrap() + .unwrap(); + assert_eq!(item, vec![j]); + } + } + assert_eq!(state.peak_count(), 5); + } +} diff --git a/fendermint/actors/timehub/src/sol_facade.rs b/fendermint/actors/timehub/src/sol_facade.rs new file mode 100644 index 0000000000..616f4a536d --- /dev/null +++ b/fendermint/actors/timehub/src/sol_facade.rs @@ -0,0 +1,115 @@ +// Copyright 2022-2024 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::Error; +use cid::Cid; +use fil_actors_runtime::{actor_error, ActorError}; +use ipc_storage_actor_sdk::declare_abi_call; +use ipc_storage_actor_sdk::evm::{InputData, TryIntoEVMEvent}; +use ipc_storage_sol_facade::primitives::U256; +use ipc_storage_sol_facade::timehub as sol; +use ipc_storage_sol_facade::types::{SolCall, SolInterface}; + +use crate::{Leaf, PushParams, PushReturn}; + +pub struct EventPushed { + index: u64, + timestamp: u64, + cid: Cid, +} +impl EventPushed { + pub fn new(index: u64, timestamp: u64, cid: Cid) -> Self { + Self { + index, + timestamp, + cid, + } + } +} +impl TryIntoEVMEvent for EventPushed { + type Target = sol::Events; + + fn try_into_evm_event(self) -> Result { + Ok(sol::Events::EventPushed(sol::EventPushed { + index: U256::from(self.index), + timestamp: U256::from(self.timestamp), + cid: self.cid.to_bytes().into(), + })) + } +} + +// ----- Calls ----- // + +declare_abi_call!(); + +pub fn can_handle(input_data: &InputData) -> bool { + sol::Calls::valid_selector(input_data.selector()) +} + +pub fn parse_input(input: &InputData) -> Result { + sol::Calls::abi_decode_raw(input.selector(), input.calldata(), true) + .map_err(|e| actor_error!(illegal_argument, format!("invalid call: {}", e))) +} + +impl AbiCall for sol::pushCall { + type Params = PushParams; + type Returns = PushReturn; + type Output = Vec; + fn params(&self) -> Self::Params { + PushParams(self.cid.0.iter().as_slice().to_vec()) + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + let root = returns.root.to_bytes(); + let index = returns.index; + Self::abi_encode_returns(&(root, index)) + } +} + +impl AbiCall for sol::getLeafAtCall { + type Params = u64; + type Returns = Option; + type Output = Vec; + fn params(&self) -> Self::Params { + self.index + } + fn returns(&self, returns: Self::Returns) -> Self::Output { + let (timestamp, witnessed) = if let Some(leaf) = returns { + (leaf.timestamp, leaf.witnessed.to_bytes()) + } else { + (u64::default(), Vec::default()) + }; + Self::abi_encode_returns(&(timestamp, witnessed)) + } +} + +impl AbiCall for sol::getCountCall { + type Params = (); + type Returns = u64; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&(returns,)) + } +} + +impl AbiCall for sol::getPeaksCall { + type Params = (); + type Returns = Vec; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, returns: Self::Returns) -> Self::Output { + let cids = returns.iter().map(|cid| cid.to_bytes()).collect::>(); + Self::abi_encode_returns(&(cids,)) + } +} + +impl AbiCall for sol::getRootCall { + type Params = (); + type Returns = Cid; + type Output = Vec; + fn params(&self) -> Self::Params {} + fn returns(&self, returns: Self::Returns) -> Self::Output { + Self::abi_encode_returns(&(returns.to_bytes(),)) + } +} diff --git a/fendermint/app/Cargo.toml b/fendermint/app/Cargo.toml index 01c8a95803..3cb9cf48c9 100644 --- a/fendermint/app/Cargo.toml +++ b/fendermint/app/Cargo.toml @@ -103,4 +103,4 @@ fendermint_vm_snapshot = { path = "../vm/snapshot", features = ["arb"] } # Using a single binary to run the application as well as to execute client commands. [[bin]] name = "fendermint" -path = "src/main.rs" +path = "src/main.rs" \ No newline at end of file diff --git a/fendermint/rpc/Cargo.toml b/fendermint/rpc/Cargo.toml index 834a591802..8748c5a0a0 100644 --- a/fendermint/rpc/Cargo.toml +++ b/fendermint/rpc/Cargo.toml @@ -24,6 +24,8 @@ cid = { workspace = true } fvm_ipld_encoding = { workspace = true } fvm_shared = { workspace = true } +fendermint_actor_blobs_shared = { path = "../actors/blobs/shared" } +fendermint_actor_bucket = { path = "../actors/bucket" } fendermint_crypto = { path = "../crypto" } fendermint_vm_actor_interface = { path = "../vm/actor_interface" } fendermint_vm_message = { path = "../vm/message" } diff --git a/fendermint/rpc/src/message.rs b/fendermint/rpc/src/message.rs index a3996f76aa..08389c39a9 100644 --- a/fendermint/rpc/src/message.rs +++ b/fendermint/rpc/src/message.rs @@ -6,6 +6,7 @@ use std::path::Path; use anyhow::Context; use base64::Engine; use bytes::Bytes; +use fendermint_actor_bucket::{GetParams, Method::GetObject}; use fendermint_crypto::SecretKey; use fendermint_vm_actor_interface::{eam, evm}; use fendermint_vm_message::{chain::ChainMessage, signed::SignedMessage}; @@ -116,6 +117,33 @@ impl MessageFactory { Ok(msg) } + + /// Get an object from a bucket. + pub fn os_get( + &mut self, + address: Address, + params: GetParams, + value: TokenAmount, + gas_params: GasParams, + ) -> anyhow::Result { + let params = RawBytes::serialize(params)?; + Ok(self.transaction(address, GetObject as u64, params, value, gas_params)) + } + + pub fn blob_get( + &mut self, + blob_hash: fendermint_actor_blobs_shared::bytes::B256, + value: TokenAmount, + gas_params: GasParams, + ) -> anyhow::Result { + use fendermint_actor_blobs_shared::blobs::GetBlobParams; + use fendermint_actor_blobs_shared::method::Method::GetBlob; + use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; + + let params = GetBlobParams(blob_hash); + let params = RawBytes::serialize(params)?; + Ok(self.transaction(BLOBS_ACTOR_ADDR, GetBlob as u64, params, value, gas_params)) + } } /// Wrapper for MessageFactory which generates signed messages /// diff --git a/fendermint/rpc/src/query.rs b/fendermint/rpc/src/query.rs index 930606229e..ffdd3dea0c 100644 --- a/fendermint/rpc/src/query.rs +++ b/fendermint/rpc/src/query.rs @@ -19,7 +19,11 @@ use fendermint_vm_message::query::{ ActorState, BuiltinActors, FvmQuery, FvmQueryHeight, GasEstimate, StateParams, }; -use crate::response::encode_data; +use crate::message::{GasParams, MessageFactory}; +use crate::response::{decode_blob_get, decode_os_get, encode_data}; +use fendermint_actor_bucket::{GetParams, Object}; +use fendermint_vm_actor_interface::system; +use fvm_shared::econ::TokenAmount; #[derive(Serialize, Debug, Clone)] /// The parsed value from a query, along with the height at which the query was performed. @@ -128,6 +132,50 @@ pub trait QueryClient: Sync { Ok(QueryResponse { height, value }) } + /// Get an object in a bucket without including a transaction on the blockchain. + async fn os_get_call( + &mut self, + address: Address, + params: GetParams, + value: TokenAmount, + gas_params: GasParams, + height: FvmQueryHeight, + ) -> anyhow::Result> { + let msg = MessageFactory::new(system::SYSTEM_ACTOR_ADDR, 0) + .os_get(address, params, value, gas_params)?; + + let response = self.call(msg, height).await?; + if response.value.code.is_err() { + return Err(anyhow!("{}", response.value.info)); + } + + let return_data = decode_os_get(&response.value) + .context("error decoding data from deliver_tx in call")?; + + Ok(return_data) + } + + /// Get a blob from the blobs actor without including a transaction on the blockchain. + async fn blob_get_call( + &mut self, + blob_hash: fendermint_actor_blobs_shared::bytes::B256, + value: TokenAmount, + gas_params: GasParams, + height: FvmQueryHeight, + ) -> anyhow::Result> { + let msg = MessageFactory::new(system::SYSTEM_ACTOR_ADDR, 0) + .blob_get(blob_hash, value, gas_params)?; + + let response = self.call(msg, height).await?; + if response.value.code.is_err() { + return Err(anyhow!("{}", response.value.info)); + } + let return_data = decode_blob_get(&response.value) + .context("error decoding blob data from deliver_tx in call")?; + + Ok(return_data) + } + /// Run an ABCI query. async fn perform(&self, query: FvmQuery, height: FvmQueryHeight) -> anyhow::Result; } diff --git a/fendermint/rpc/src/response.rs b/fendermint/rpc/src/response.rs index f6ed6d567d..6f356513d0 100644 --- a/fendermint/rpc/src/response.rs +++ b/fendermint/rpc/src/response.rs @@ -3,6 +3,7 @@ use anyhow::{anyhow, Context}; use base64::Engine; use bytes::Bytes; +use fendermint_actor_bucket::Object; use fendermint_vm_actor_interface::eam::{self, CreateReturn}; use fvm_ipld_encoding::{BytesDe, RawBytes}; use tendermint::abci::response::DeliverTx; @@ -58,3 +59,18 @@ pub fn decode_fevm_return_data(data: RawBytes) -> anyhow::Result> { .map(|bz| bz.0) .map_err(|e| anyhow!("failed to deserialize bytes returned by FEVM method invocation: {e}")) } + +/// Decode the result of a bucket GetObject call. +pub fn decode_os_get(deliver_tx: &DeliverTx) -> anyhow::Result> { + let data = decode_data(&deliver_tx.data)?; + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing as Option: {e}")) +} + +pub fn decode_blob_get( + deliver_tx: &DeliverTx, +) -> anyhow::Result> { + let data = decode_data(&deliver_tx.data)?; + fvm_ipld_encoding::from_slice::>(&data) + .map_err(|e| anyhow!("error parsing as Option: {e}")) +} diff --git a/fendermint/testing/materializer/tests/docker_tests/benches.rs b/fendermint/testing/materializer/tests/docker_tests/benches.rs index c64a68d5c5..26caf87a70 100644 --- a/fendermint/testing/materializer/tests/docker_tests/benches.rs +++ b/fendermint/testing/materializer/tests/docker_tests/benches.rs @@ -168,7 +168,7 @@ async fn test_contract_deployment() -> Result<(), anyhow::Error> { let (testnet, cleanup) = make_testnet(MANIFEST, |_| {}).await?; let block_gas_limit = U256::from(10_000_000_000u64); - let max_tx_gas_limit = U256::from(50_000_000u64); + let max_tx_gas_limit = U256::from(500_000_000u64); let pangea = testnet.node(&testnet.root().node("pangea"))?; let provider = pangea @@ -225,6 +225,10 @@ async fn test_contract_deployment() -> Result<(), anyhow::Error> { let gas_estimation = middleware.estimate_gas(&deploy_tx, None).await.unwrap(); deploy_tx.set_gas(gas_estimation); + println!( + "gas estimation {} vs max {}", + gas_estimation, max_tx_gas_limit + ); assert!(gas_estimation <= max_tx_gas_limit); input.bencher.start(); diff --git a/fendermint/vm/actor_interface/src/adm.rs b/fendermint/vm/actor_interface/src/adm.rs new file mode 100644 index 0000000000..4f08d564c8 --- /dev/null +++ b/fendermint/vm/actor_interface/src/adm.rs @@ -0,0 +1,76 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_shared::{address::Address, ActorID, METHOD_CONSTRUCTOR}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt::Display; + +define_singleton!(ADM { + id: 17, + code_id: 17 +}); + +pub const ADM_ACTOR_NAME: &str = "adm"; + +/// ADM actor methods available. +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + CreateExternal = 1214262202, + UpdateDeployers = 1768606754, + ListMetadata = 2283215593, + GetMachineCode = 2892692559, +} + +/// The kinds of machines available. +#[derive(Debug, Serialize, Deserialize)] +pub enum Kind { + /// A bucket with S3-like key semantics. + Bucket, + /// An MMR accumulator, used for timestamping data. + Timehub, +} + +impl Display for Kind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self { + Self::Bucket => "bucket", + Self::Timehub => "timehub", + }; + write!(f, "{}", str) + } +} + +/// Machine metadata. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Metadata { + /// Machine kind. + pub kind: Kind, + /// Machine ID address. + pub address: Address, + /// User-defined metadata. + pub metadata: HashMap, +} + +/// Helper for machine creation. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CreateExternalParams { + pub owner: Address, + pub kind: Kind, + pub metadata: HashMap, +} + +/// Helper to read return value from machine creation. +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct CreateExternalReturn { + pub actor_id: ActorID, + pub robust_address: Option
, +} + +/// Helper for listing machine metadata by owner. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ListMetadataParams { + pub owner: Address, +} diff --git a/fendermint/vm/actor_interface/src/blob_reader.rs b/fendermint/vm/actor_interface/src/blob_reader.rs new file mode 100644 index 0000000000..94bce68b41 --- /dev/null +++ b/fendermint/vm/actor_interface/src/blob_reader.rs @@ -0,0 +1,4 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +define_id!(BLOB_READER { id: 67 }); diff --git a/fendermint/vm/actor_interface/src/blobs.rs b/fendermint/vm/actor_interface/src/blobs.rs new file mode 100644 index 0000000000..7eaf992bca --- /dev/null +++ b/fendermint/vm/actor_interface/src/blobs.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +define_id!(BLOBS { id: 66 }); diff --git a/fendermint/vm/actor_interface/src/bucket.rs b/fendermint/vm/actor_interface/src/bucket.rs new file mode 100644 index 0000000000..4353840af6 --- /dev/null +++ b/fendermint/vm/actor_interface/src/bucket.rs @@ -0,0 +1,5 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +// Note: See this thread about choosing the ids https://filecoinproject.slack.com/archives/C04JR5R1UL8/p1706638112395409 +define_code!(BUCKET { code_id: 68 }); diff --git a/fendermint/vm/actor_interface/src/init.rs b/fendermint/vm/actor_interface/src/init.rs index 858a13faf8..b6245c9953 100644 --- a/fendermint/vm/actor_interface/src/init.rs +++ b/fendermint/vm/actor_interface/src/init.rs @@ -18,6 +18,8 @@ pub const FIRST_NON_SINGLETON_ADDR: ActorID = 100; define_singleton!(INIT { id: 1, code_id: 2 }); +pub const INIT_ACTOR_NAME: &str = "init"; + pub type AddressMap = BTreeMap; /// Delegated address of an Ethereum built-in actor. diff --git a/fendermint/vm/actor_interface/src/ipc_storage_config.rs b/fendermint/vm/actor_interface/src/ipc_storage_config.rs new file mode 100644 index 0000000000..f9c28d9696 --- /dev/null +++ b/fendermint/vm/actor_interface/src/ipc_storage_config.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +define_id!(IPC_STORAGE_CONFIG { id: 70 }); diff --git a/fendermint/vm/actor_interface/src/lib.rs b/fendermint/vm/actor_interface/src/lib.rs index dea7cd1b70..8549c5baf4 100644 --- a/fendermint/vm/actor_interface/src/lib.rs +++ b/fendermint/vm/actor_interface/src/lib.rs @@ -44,6 +44,9 @@ macro_rules! define_singleton { pub mod account; pub mod activity; +pub mod adm; +pub mod blob_reader; +pub mod blobs; pub mod burntfunds; pub mod chainmetadata; pub mod cron; @@ -55,6 +58,7 @@ pub mod f3_light_client; pub mod gas_market; pub mod init; pub mod ipc; +pub mod ipc_storage_config; pub mod multisig; pub mod placeholder; pub mod reward; diff --git a/fendermint/vm/interpreter/Cargo.toml b/fendermint/vm/interpreter/Cargo.toml index b364e3c5f0..9cdfd22054 100644 --- a/fendermint/vm/interpreter/Cargo.toml +++ b/fendermint/vm/interpreter/Cargo.toml @@ -29,9 +29,19 @@ fendermint_actor_activity_tracker = { path = "../../actors/activity-tracker" } fendermint_actor_f3_light_client = { path = "../../actors/f3-light-client" } fendermint_actor_gas_market_eip1559 = { path = "../../actors/gas_market/eip1559" } fendermint_actor_eam = { path = "../../actors/eam" } +fendermint_actor_init = { path = "../../actors/init" } +fendermint_actor_adm = { path = "../../actors/adm" } +fendermint_actor_blobs = { path = "../../actors/blobs" } +fendermint_actor_blobs_shared = { path = "../../actors/blobs/shared" } +fendermint_actor_blob_reader = { path = "../../actors/blob_reader" } +fendermint_actor_ipc_storage_config = { path = "../../actors/ipc_storage_config" } +fendermint_actor_ipc_storage_config_shared = { path = "../../actors/ipc_storage_config/shared" } +fil_actor_adm = { workspace = true } fil_actor_evm = { workspace = true } fendermint_testing = { path = "../../testing", optional = true } ipc_actors_abis = { path = "../../../contract-bindings" } +iroh = { workspace = true } +iroh-blobs = { workspace = true } fil_actor_eam = { workspace = true } ipc-api = { path = "../../../ipc/api" } ipc-observability = { path = "../../../ipc/observability" } diff --git a/fendermint/vm/interpreter/src/genesis.rs b/fendermint/vm/interpreter/src/genesis.rs index 581c75d492..3597dc5bc4 100644 --- a/fendermint/vm/interpreter/src/genesis.rs +++ b/fendermint/vm/interpreter/src/genesis.rs @@ -5,6 +5,7 @@ use std::collections::{BTreeSet, HashMap}; use std::io::{Cursor, Read, Write}; use std::marker::PhantomData; use std::path::{Path, PathBuf}; +use std::str::FromStr; use std::sync::Arc; use anyhow::{anyhow, Context}; @@ -18,8 +19,8 @@ use fendermint_eth_hardhat::{ContractSourceAndName, Hardhat, FQN}; use fendermint_vm_actor_interface::diamond::{EthContract, EthContractMap}; use fendermint_vm_actor_interface::eam::EthAddress; use fendermint_vm_actor_interface::{ - account, activity, burntfunds, chainmetadata, cron, eam, f3_light_client, gas_market, init, - ipc, reward, system, EMPTY_ARR, + account, activity, adm, blob_reader, blobs, burntfunds, chainmetadata, cron, eam, + f3_light_client, gas_market, init, ipc, ipc_storage_config, reward, system, EMPTY_ARR, }; use fendermint_vm_core::Timestamp; use fendermint_vm_genesis::{ActorMeta, Collateral, Genesis, Power, PowerScale, Validator}; @@ -302,14 +303,17 @@ impl<'a> GenesisBuilder<'a> { .context("failed to create system actor")?; // Init actor + // Add Blobs actor ID to eth_builtin_ids so its delegated address is registered + let mut eth_builtin_ids: BTreeSet<_> = + ipc_entrypoints.values().map(|c| c.actor_id).collect(); + eth_builtin_ids.insert(blobs::BLOBS_ACTOR_ID); + eth_builtin_ids.insert(adm::ADM_ACTOR_ID); + let (init_state, addr_to_id) = init::State::new( state.store(), genesis.chain_name.clone(), &genesis.accounts, - &ipc_entrypoints - .values() - .map(|c| c.actor_id) - .collect::>(), + ð_builtin_ids, all_ipc_contracts.len() as u64, ) .context("failed to create init state")?; @@ -376,6 +380,34 @@ impl<'a> GenesisBuilder<'a> { ) .context("failed to create reward actor")?; + // ADM Address Manager (ADM) actor + let mut machine_codes = std::collections::HashMap::new(); + for machine_name in &["bucket", "timehub"] { + if let Some(cid) = state.custom_actor_manifest.code_by_name(machine_name) { + let kind = fendermint_actor_adm::Kind::from_str(machine_name) + .expect("failed to parse adm machine name"); + tracing::info!(machine_name, cid = cid.to_string(), "registered machine"); + machine_codes.insert(kind, *cid); + } + } + let adm_state = fendermint_actor_adm::State::new( + state.store(), + machine_codes, + fendermint_actor_adm::PermissionModeParams::Unrestricted, + )?; + let eth_addr = init::builtin_actor_eth_addr(adm::ADM_ACTOR_ID); + let f4_addr = fvm_shared::address::Address::from(eth_addr); + tracing::info!("!!!!!!!! SETUP adm ACTOR !!!!!!!!: {eth_addr}, {eth_addr:?}"); + state + .create_custom_actor( + fendermint_vm_actor_interface::adm::ADM_ACTOR_NAME, + adm::ADM_ACTOR_ID, + &adm_state, + TokenAmount::zero(), + Some(f4_addr), + ) + .context("failed to create adm actor")?; + // STAGE 1b: Then we initialize the in-repo custom actors. // Initialize the chain metadata actor which handles saving metadata about the chain @@ -394,6 +426,47 @@ impl<'a> GenesisBuilder<'a> { ) .context("failed to create chainmetadata actor")?; + // Initialize the ipc_storage config actor. + let ipc_storage_config_state = fendermint_actor_ipc_storage_config::State { + admin: None, + config: fendermint_actor_ipc_storage_config_shared::IPCStorageConfig::default(), + }; + state + .create_custom_actor( + fendermint_actor_ipc_storage_config::ACTOR_NAME, + ipc_storage_config::IPC_STORAGE_CONFIG_ACTOR_ID, + &ipc_storage_config_state, + TokenAmount::zero(), + None, + ) + .context("failed to create ipc_storage config actor")?; + + // Initialize the blob actor with delegated address for Ethereum/Solidity access. + let blobs_state = fendermint_actor_blobs::State::new(&state.store())?; + let blobs_eth_addr = init::builtin_actor_eth_addr(blobs::BLOBS_ACTOR_ID); + let blobs_f4_addr = fvm_shared::address::Address::from(blobs_eth_addr); + state + .create_custom_actor( + fendermint_actor_blobs::BLOBS_ACTOR_NAME, + blobs::BLOBS_ACTOR_ID, + &blobs_state, + TokenAmount::zero(), + Some(blobs_f4_addr), + ) + .context("failed to create blobs actor")?; + tracing::info!("!!!!!!!! SETUP BLOB ACTOR !!!!!!!!: {blobs_eth_addr}, {blobs_eth_addr:?}"); + + // Initialize the blob reader actor. + state + .create_custom_actor( + fendermint_actor_blob_reader::BLOB_READER_ACTOR_NAME, + blob_reader::BLOB_READER_ACTOR_ID, + &fendermint_actor_blob_reader::State::new(&state.store())?, + TokenAmount::zero(), + None, + ) + .context("failed to create blob reader actor")?; + let eam_state = fendermint_actor_eam::State::new( state.store(), PermissionModeParams::from(genesis.eam_permission_mode), @@ -409,6 +482,18 @@ impl<'a> GenesisBuilder<'a> { ) .context("failed to replace built in eam actor")?; + // Replace Init actor with our custom version that allows ADM to spawn actors + state + .replace_builtin_actor( + init::INIT_ACTOR_NAME, + init::INIT_ACTOR_ID, + fendermint_actor_init::IPC_INIT_ACTOR_NAME, + &init_state, + TokenAmount::zero(), + None, + ) + .context("failed to replace built in init actor")?; + // Currently hardcoded for now, once genesis V2 is implemented, should be taken // from genesis parameters. // diff --git a/infra/fendermint/scripts/genesis.toml b/infra/fendermint/scripts/genesis.toml index a182836be7..f1fccfbfc1 100644 --- a/infra/fendermint/scripts/genesis.toml +++ b/infra/fendermint/scripts/genesis.toml @@ -42,4 +42,4 @@ env = { "CMD" = "genesis --genesis-file /data/genesis.json ipc seal-genesis --bu [tasks.genesis-write] extend = "fendermint-tool" env = { "CMD" = "genesis --genesis-file /data/genesis.json into-tendermint --app-state ${SEALED_GENESIS} --out /data/genesis.committed.json" } -script.post = "cp ${BASE_DIR}/genesis.committed.json ${CMT_DIR}/config/genesis.json" +script.post = "cp ${BASE_DIR}/genesis.committed.json ${CMT_DIR}/config/genesis.json" \ No newline at end of file diff --git a/ipc-storage/README.md b/ipc-storage/README.md new file mode 100644 index 0000000000..e653738b8a --- /dev/null +++ b/ipc-storage/README.md @@ -0,0 +1,267 @@ +# Bucket Storage Guide (Path-Based Access) + +## Configuration + +```bash +# From RECALL_RUN.md +export TENDERMINT_RPC=http://localhost:26657 +export OBJECTS_LISTEN_ADDR=http://localhost:8080 +export NODE_OPERATION_OBJECT_API=http://localhost:8081 +export ETH_RPC=http://localhost:8545 +export BLOBS_ACTOR=0x6d342defae60f6402aee1f804653bbae4e66ae46 +export ADM_ACTOR=0x7caec36fc8a3a867ca5b80c6acb5e5871d05aa28 + +# Your credentials +export USER_SK= +export USER_ADDR= +``` + +## 6. Start Gateway +```bash +cargo build --release -p ipc-decentralized-storage --bin gateway --bin node + +# prepare to start node +export FM_NETWORK=test +# validator bls key file in hex format +export BLS_KEY_FILE=./test-network/bls_key.hex +# fendermint secret key file +export SECRET_KEY_FILE=./test-network/keys/alice.sk + +# register as a storage node operator +./target/release/node register-operator --bls-key-file $BLS_KEY_FILE --secret-key-file $SECRET_KEY_FILE --operator-rpc-url $NODE_OPERATION_OBJECT_API + +# start the node +./target/release/node run \ + --secret-key-file ./test-network/bls_key.hex \ + --iroh-path ./iroh_node \ + --iroh-v4-addr 0.0.0.0:11204 \ + --rpc-url http://localhost:26657 \ + --batch-size 10 \ + --poll-interval-secs 5 \ + --max-concurrent-downloads 10 \ + --rpc-bind-addr 127.0.0.1:8081 + +./target/release/gateway --bls-key-file $BLS_KEY_FILE --secret-key-file $SECRET_KEY_FILE --iroh-path ./iroh_gateway --objects-listen-addr 0.0.0.0:8080 + +``` +## 6. Download the Blob + +Download via HTTP API: + +```bash +# Download the blob +curl $NODE_OPERATION_OBJECT_API/v1/blobs/${BLOB_HASH#0x}/content +# You should see the original file +``` + +--- + +## 1. Create a Bucket + +First, create a bucket via the ADM (Actor Deployment Manager): + +```bash +# Buy 1 FIL worth of credits +cast send $BLOBS_ACTOR "buyCredit()" \ + --value 0.1ether \ + --private-key $USER_SK \ + --rpc-url http://localhost:8545 + +# Create a new bucket (caller becomes owner) +TX_RESULT=$(cast send $ADM_ACTOR "createBucket()" \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC \ + --json) + +echo $TX_RESULT | jq '.' + +# Extract bucket address from MachineInitialized event +# Event signature: MachineInitialized(uint8 indexed kind, address machineAddress) +BUCKET_ADDR=$(echo $TX_RESULT | jq -r '.logs[] | select(.topics[0] == "0x8f7252642373d5f0b89a0c5cd9cd242e5cd5bb1a36aec623756e4f52a8c1ea6e") | .data' | cut -c27-66) +BUCKET_ADDR="0x$BUCKET_ADDR" + +echo "Bucket created at: $BUCKET_ADDR" +export BUCKET_ADDR +``` + +## 2. Upload and Register an Object + +### Step 2a: Upload file to Iroh (same as basic flow) + +```bash +# Create a test file +echo "Hello from bucket storage!" > myfile.txt + +# Get file size +BLOB_SIZE=$(stat -f%z myfile.txt 2>/dev/null || stat -c%s myfile.txt) + +# Upload to Iroh +UPLOAD_RESPONSE=$(curl -s -X POST $OBJECTS_API/v1/objects \ + -F "size=${BLOB_SIZE}" \ + -F "data=@myfile.txt") + +echo $UPLOAD_RESPONSE | jq '.' + +# Extract hashes +BLOB_HASH_B32=$(echo $UPLOAD_RESPONSE | jq -r '.hash') +METADATA_HASH_B32=$(echo $UPLOAD_RESPONSE | jq -r '.metadata_hash // .metadataHash') +NODE_ID_BASE32=$(curl -s $OBJECTS_API/v1/node | jq -r '.node_id') + +# Convert to hex (same as RECALL_RUN.md) +export BLOB_HASH=$(python3 -c " +import base64 +h = '$BLOB_HASH_B32'.upper() +padding = (8 - len(h) % 8) % 8 +h = h + '=' * padding +decoded = base64.b32decode(h) +if len(decoded) > 32: + decoded = decoded[:32] +elif len(decoded) < 32: + decoded = decoded + b'\x00' * (32 - len(decoded)) +print('0x' + decoded.hex()) +") + +export METADATA_HASH=$(python3 -c " +import base64 +h = '$METADATA_HASH_B32'.upper() +padding = (8 - len(h) % 8) % 8 +h = h + '=' * padding +decoded = base64.b32decode(h) +if len(decoded) > 32: + decoded = decoded[:32] +elif len(decoded) < 32: + decoded = decoded + b'\x00' * (32 - len(decoded)) +print('0x' + decoded.hex()) +") + +export SOURCE_NODE="0x$NODE_ID_BASE32" + +echo "Blob Hash: $BLOB_HASH" +echo "Metadata Hash: $METADATA_HASH" +echo "Source Node: $SOURCE_NODE" +``` + +### Step 2b: Register object in bucket with a path + +```bash +# Add object with a path-based key +# Signature: addObject(bytes32 source, string key, bytes32 hash, bytes32 recoveryHash, uint64 size) +cast send $BUCKET_ADDR "addObject(bytes32,string,bytes32,bytes32,uint64)" \ + $SOURCE_NODE \ + "documents/myfile.txt" \ + $BLOB_HASH \ + $METADATA_HASH \ + $BLOB_SIZE \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC +``` + +## 3. Query Objects + +### Get a single object by path + +```bash +# Get object by exact path +# Returns: ObjectValue(bytes32 blobHash, bytes32 recoveryHash, uint64 size, uint64 expiry, (string,string)[] metadata) +cast call $BUCKET_ADDR "getObject(string)((bytes32,bytes32,uint64,uint64,(string,string)[]))" "documents/myfile.txt" --rpc-url $ETH_RPC +``` + +### List all objects (no filter) + +```bash +# List all objects in bucket +cast call $BUCKET_ADDR "queryObjects()(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" \ + --rpc-url $ETH_RPC +``` + +### List with prefix (folder-like) + +```bash +# List everything under "documents/" +cast call $BUCKET_ADDR "queryObjects(string)(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" "documents/" --rpc-url $ETH_RPC +``` + +### List with delimiter (S3-style folder simulation) + +```bash +# List top-level "folders" and files +# Returns: Query((string,ObjectState)[] objects, string[] commonPrefixes, string nextKey) +# Where ObjectState = (bytes32 blobHash, uint64 size, uint64 expiry, (string,string)[] metadata) +cast call $BUCKET_ADDR "queryObjects(string,string)(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" "" "/" \ + --rpc-url $ETH_RPC + +# Example response: +# ([], ["documents/", "images/"], "") +# ^objects at root ^"folders" ^nextKey (empty = no more pages) + +# Extract blob hash from first object: +# BLOB_HASH=$(cast call ... | jq -r '.[0][0][1][0]') + +# List contents of "documents/" folder +cast call $BUCKET_ADDR "queryObjects(string,string)(((string,(bytes32,uint64,uint64,(string,string)[]))[],string[],string))" "documents/" "/" \ + --rpc-url $ETH_RPC +``` + +### Paginated queries + +```bash +# Query with pagination +# queryObjects(prefix, delimiter, startKey, limit) +cast call $BUCKET_ADDR "queryObjects(string,string,string,uint64)" \ + "documents/" \ + "/" \ + "" \ + 100 \ + --rpc-url $ETH_RPC + +# If nextKey is returned, use it for the next page +cast call $BUCKET_ADDR "queryObjects(string,string,string,uint64)" \ + "documents/" \ + "/" \ + "documents/page2start.txt" \ + 100 \ + --rpc-url $ETH_RPC +``` + +--- + +## 4. Update Object Metadata + +```bash +# Update metadata for an existing object +# Set value to empty string to delete a metadata key +cast send $BUCKET_ADDR "updateObjectMetadata(string,(string,string)[])" \ + "documents/myfile.txt" \ + '[("content-type","text/markdown"),("version","2")]' \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC +``` + +--- + +## 5. Delete an Object + +```bash +# Delete object by path +cast send $BUCKET_ADDR "deleteObject(string)" "documents/myfile.txt" \ + --private-key $USER_SK \ + --rpc-url $ETH_RPC +``` + +--- + +## 6. Download Content + +Downloads still go through the Iroh/Objects API using the blob hash: + +```bash +# First get the object to retrieve its blob hash +OBJECT_INFO=$(cast call $BUCKET_ADDR "getObject(string)" "documents/myfile.txt" \ + --rpc-url $ETH_RPC) + +# Extract blob hash from response and download +# (The blob hash is the first bytes32 in the response) +curl $NODE_OPERATION_OBJECT_API/v1/blobs/${BLOB_HASH#0x}/content +``` + +--- \ No newline at end of file diff --git a/ipc-storage/actor_sdk/Cargo.toml b/ipc-storage/actor_sdk/Cargo.toml new file mode 100644 index 0000000000..f378489e4d --- /dev/null +++ b/ipc-storage/actor_sdk/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "ipc_storage_actor_sdk" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +fvm_shared = { workspace = true } +fvm_sdk = { workspace = true } +num-traits = { workspace = true } +fil_actors_runtime = { workspace = true } +fil_actor_adm = { workspace = true } +ipc_storage_sol_facade = { workspace = true, features = [] } +anyhow = { workspace = true } +fvm_ipld_encoding = { workspace = true } +serde = { workspace = true } +cid = { workspace = true } diff --git a/ipc-storage/actor_sdk/src/caller.rs b/ipc-storage/actor_sdk/src/caller.rs new file mode 100644 index 0000000000..45c06703d1 --- /dev/null +++ b/ipc-storage/actor_sdk/src/caller.rs @@ -0,0 +1,162 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::{extract_send_result, runtime::Runtime, ActorError}; +use fvm_shared::{address::Address, bigint::Zero, econ::TokenAmount, error::ExitCode, METHOD_SEND}; + +use crate::util::{to_id_address, to_id_and_delegated_address}; + +/// Helper stuct for managing actor message caller and sponsor addresses. +#[derive(Debug)] +pub struct Caller { + /// Caller ID-address. + id_addr: Address, + /// Caller delegated address. + delegated_addr: Option
, + /// Caller's sponsor ID-address. + sponsor_id_addr: Option
, + /// Caller's sponsor delegated address. + sponsor_delegated_addr: Option
, + /// Whether the caller actor was created. + created: bool, +} + +/// Caller option (authenticate or create). +#[derive(Debug, Default)] +pub enum CallerOption { + #[default] + None, + /// The target address must be the runtime's message origin or caller. + Auth, + /// Create the target address if it's not found. + Create, +} + +impl Caller { + /// Returns a new caller. + /// TODO: Remove origin authentication after the solidity facades are complete. + pub fn new( + rt: &impl Runtime, + address: Address, + sponsor: Option
, + option: CallerOption, + ) -> Result { + let mut created = false; + let id_addr = match to_id_address(rt, address, false) { + Ok(addr) => Ok(addr), + Err(e) + if matches!(option, CallerOption::Create) + && e.exit_code() == ExitCode::USR_NOT_FOUND => + { + create_actor(rt, address)?; + created = true; + to_id_address(rt, address, false) + } + Err(e) => Err(e), + }?; + + let caller = match sponsor { + Some(sponsor) => { + let sponsor_id_addr = to_id_address(rt, sponsor, false)?; + Self { + id_addr, + delegated_addr: None, + sponsor_id_addr: Some(sponsor_id_addr), + sponsor_delegated_addr: None, + created, + } + } + None => Self { + id_addr, + delegated_addr: None, + sponsor_id_addr: None, + sponsor_delegated_addr: None, + created, + }, + }; + Ok(caller) + } + + /// Returns a new caller. + /// Caller and sponsor must have a delegated address. + /// TODO: Remove origin authentication after the solidity facades are complete. + pub fn new_delegated( + rt: &impl Runtime, + address: Address, + sponsor: Option
, + option: CallerOption, + ) -> Result { + let mut created = false; + let (id_addr, delegated_addr) = match to_id_and_delegated_address(rt, address) { + Ok(addrs) => Ok(addrs), + Err(e) + if matches!(option, CallerOption::Create) + && e.exit_code() == ExitCode::USR_NOT_FOUND => + { + create_actor(rt, address)?; + created = true; + to_id_and_delegated_address(rt, address) + } + Err(e) => Err(e), + }?; + + let caller = match sponsor { + Some(sponsor) => { + let (sponsor_id_addr, sponsor_delegated_addr) = + to_id_and_delegated_address(rt, sponsor)?; + Self { + id_addr, + delegated_addr: Some(delegated_addr), + sponsor_id_addr: Some(sponsor_id_addr), + sponsor_delegated_addr: Some(sponsor_delegated_addr), + created, + } + } + None => Self { + id_addr, + delegated_addr: Some(delegated_addr), + sponsor_id_addr: None, + sponsor_delegated_addr: None, + created, + }, + }; + Ok(caller) + } + + /// Returns the caller delegated address. + pub fn address(&self) -> Address { + self.delegated_addr.unwrap_or(self.id_addr) + } + + /// Returns the caller address that should be used with actor state methods. + pub fn state_address(&self) -> Address { + self.id_addr + } + + /// Returns the sponsor address that should be used with actor state methods. + pub fn sponsor_state_address(&self) -> Option
{ + self.sponsor_id_addr + } + + /// Returns the sponsor delegated address. + pub fn sponsor_address(&self) -> Option
{ + self.sponsor_delegated_addr + } + + /// Returns the address that should be used with events. + pub fn event_address(&self) -> Address { + self.sponsor_delegated_addr.unwrap_or(self.address()) + } + + /// Returns whether the caller actor was created. + pub fn created(&self) -> bool { + self.created + } +} + +/// Creates a new placeholder actor by sending zero tokens to the address. +fn create_actor(rt: &impl Runtime, address: Address) -> Result<(), ActorError> { + extract_send_result(rt.send_simple(&address, METHOD_SEND, None, TokenAmount::zero()))?; + Ok(()) +} diff --git a/ipc-storage/actor_sdk/src/constants.rs b/ipc-storage/actor_sdk/src/constants.rs new file mode 100644 index 0000000000..6d4bdcfbf5 --- /dev/null +++ b/ipc-storage/actor_sdk/src/constants.rs @@ -0,0 +1,11 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Constants for actors + +use fvm_shared::address::Address; + +/// ADM (Autonomous Data Management) actor address +/// Actor ID 17 is reserved for ADM in ipc storage networks +pub const ADM_ACTOR_ADDR: Address = Address::new_id(17); diff --git a/ipc-storage/actor_sdk/src/evm.rs b/ipc-storage/actor_sdk/src/evm.rs new file mode 100644 index 0000000000..564c1e0829 --- /dev/null +++ b/ipc-storage/actor_sdk/src/evm.rs @@ -0,0 +1,152 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_runtime::{actor_error, runtime::Runtime, ActorError}; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared::event::{ActorEvent, Entry, Flags}; +use fvm_shared::IPLD_RAW; +use ipc_storage_sol_facade::primitives::IntoLogData; + +/// The event key prefix for the Ethereum log topics. +const EVENT_TOPIC_KEY_PREFIX: &str = "t"; + +/// The event key for the Ethereum log data. +const EVENT_DATA_KEY: &str = "d"; + +pub trait TryIntoEVMEvent { + type Target: IntoLogData; + fn try_into_evm_event(self) -> Result; +} + +/// Returns an [`ActorEvent`] from an EVM event. +pub fn to_actor_event(event: T) -> Result { + let event = event + .try_into_evm_event() + .map_err(|e| actor_error!(illegal_argument; "failed to build evm event: {}", e))?; + let log = event.to_log_data(); + let num_entries = log.topics().len() + 1; // +1 for log data + + let mut entries: Vec = Vec::with_capacity(num_entries); + for (i, topic) in log.topics().iter().enumerate() { + let key = format!("{}{}", EVENT_TOPIC_KEY_PREFIX, i + 1); + entries.push(Entry { + flags: Flags::FLAG_INDEXED_ALL, + key, + codec: IPLD_RAW, + value: topic.to_vec(), + }); + } + entries.push(Entry { + flags: Flags::FLAG_INDEXED_ALL, + key: EVENT_DATA_KEY.to_owned(), + codec: IPLD_RAW, + value: log.data.to_vec(), + }); + + Ok(entries.into()) +} + +/// Emits an [`ActorEvent`] from an EVM event. +pub fn emit_evm_event(rt: &impl Runtime, event: T) -> Result<(), ActorError> { + let actor_event = to_actor_event(event)?; + rt.emit_event(&actor_event) +} + +/// Params for invoking a contract. +#[derive(Default, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractParams { + #[serde(with = "strict_bytes")] + pub input_data: Vec, +} + +/// EVM call with selector (first 4 bytes) and calldata (remaining bytes). +pub struct InputData(Vec); + +impl InputData { + /// Returns the selector bytes. + pub fn selector(&self) -> [u8; 4] { + let mut selector = [0u8; 4]; + selector.copy_from_slice(&self.0[0..4]); + selector + } + + /// Returns the calldata bytes. + pub fn calldata(&self) -> &[u8] { + &self.0[4..] + } +} + +impl TryFrom for InputData { + type Error = ActorError; + + fn try_from(value: InvokeContractParams) -> Result { + if value.input_data.len() < 4 { + return Err(ActorError::illegal_argument("input too short".to_string())); + } + Ok(InputData(value.input_data)) + } +} + +#[macro_export] +macro_rules! declare_abi_call { + () => { + pub trait AbiCall { + type Params; + type Returns; + type Output; + fn params(&self) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; + } + + pub trait AbiCallRuntime { + type Params; + type Returns; + type Output; + fn params(&self, rt: &impl fil_actors_runtime::runtime::Runtime) -> Self::Params; + fn returns(&self, returns: Self::Returns) -> Self::Output; + } + + #[derive(Debug, Clone)] + pub struct AbiEncodeError { + message: String, + } + + impl From for AbiEncodeError { + fn from(error: anyhow::Error) -> Self { + Self { + message: format!("failed to abi encode {}", error), + } + } + } + + impl From for AbiEncodeError { + fn from(message: String) -> Self { + Self { message } + } + } + + impl From for AbiEncodeError { + fn from(error: fil_actors_runtime::ActorError) -> Self { + Self { + message: format!("{}", error), + } + } + } + + impl From for fil_actors_runtime::ActorError { + fn from(error: AbiEncodeError) -> Self { + fil_actors_runtime::actor_error!(serialization, error.message) + } + } + }; +} + +/// Returned when invoking a contract. +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractReturn { + #[serde(with = "strict_bytes")] + pub output_data: Vec, +} diff --git a/ipc-storage/actor_sdk/src/lib.rs b/ipc-storage/actor_sdk/src/lib.rs new file mode 100644 index 0000000000..67d3ab6cb2 --- /dev/null +++ b/ipc-storage/actor_sdk/src/lib.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +pub mod caller; +pub mod constants; +pub mod evm; +pub mod storage; +pub mod util; diff --git a/ipc-storage/actor_sdk/src/storage.rs b/ipc-storage/actor_sdk/src/storage.rs new file mode 100644 index 0000000000..c1aa4a1b83 --- /dev/null +++ b/ipc-storage/actor_sdk/src/storage.rs @@ -0,0 +1,21 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared::error::ErrorNumber; + +/// Deletes a blob by hash from backing storage. +pub fn delete_blob(hash: [u8; 32]) -> Result<(), ErrorNumber> { + unsafe { sys::delete_blob(hash.as_ptr()) } +} + +mod sys { + use fvm_sdk::sys::fvm_syscalls; + + fvm_syscalls! { + module = "ipc_storage"; + + /// Deletes a blob by hash from backing storage. + pub fn delete_blob(hash_ptr: *const u8) -> Result<()>; + } +} diff --git a/ipc-storage/actor_sdk/src/util.rs b/ipc-storage/actor_sdk/src/util.rs new file mode 100644 index 0000000000..c8acabe036 --- /dev/null +++ b/ipc-storage/actor_sdk/src/util.rs @@ -0,0 +1,105 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fil_actors_runtime::{ + deserialize_block, extract_send_result, + runtime::{builtins::Type, Runtime}, + ActorError, +}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::sys::SendFlags; +use fvm_shared::{address::Address, bigint::BigUint, econ::TokenAmount, MethodNum}; +use num_traits::Zero; + +use crate::constants::ADM_ACTOR_ADDR; +pub use fil_actor_adm::Kind; + +/// Resolves ID address of an actor. +/// If `require_delegated` is `true`, the address must be of type +/// EVM (a Solidity contract), EthAccount (an Ethereum-style EOA), or Placeholder (a yet to be +/// determined EOA or Solidity contract). +pub fn to_id_address( + rt: &impl Runtime, + address: Address, + require_delegated: bool, +) -> Result { + let actor_id = rt + .resolve_address(&address) + .ok_or(ActorError::not_found(format!( + "actor {} not found", + address + )))?; + if require_delegated { + let code_cid = rt.get_actor_code_cid(&actor_id).ok_or_else(|| { + ActorError::not_found(format!("actor {} code cid not found", address)) + })?; + if !matches!( + rt.resolve_builtin_actor_type(&code_cid), + Some(Type::Placeholder | Type::EVM | Type::EthAccount) + ) { + return Err(ActorError::forbidden(format!( + "invalid address: address {} is not delegated", + address, + ))); + } + } + Ok(Address::new_id(actor_id)) +} + +/// Resolves an address to its external delegated address. +pub fn to_delegated_address(rt: &impl Runtime, address: Address) -> Result { + Ok(to_id_and_delegated_address(rt, address)?.1) +} + +/// Resolves an address to its ID address and external delegated address. +pub fn to_id_and_delegated_address( + rt: &impl Runtime, + address: Address, +) -> Result<(Address, Address), ActorError> { + let actor_id = rt + .resolve_address(&address) + .ok_or(ActorError::not_found(format!( + "actor {} not found", + address + )))?; + let delegated = rt + .lookup_delegated_address(actor_id) + .ok_or(ActorError::forbidden(format!( + "invalid address: actor {} is not delegated", + address + )))?; + Ok((Address::new_id(actor_id), delegated)) +} + +/// Returns the [`TokenAmount`] as a [`BigUint`]. +/// If the given amount is negative, the value returned will be zero. +pub fn token_to_biguint(amount: Option) -> BigUint { + amount + .unwrap_or_default() + .atto() + .to_biguint() + .unwrap_or_default() +} + +/// Checks if an address is a bucket actor by comparing its code CID +/// with the bucket code CID registered in the ADM actor. +pub fn is_bucket_address(rt: &impl Runtime, address: Address) -> Result { + let caller_code_cid = rt + .resolve_address(&address) + .and_then(|actor_id| rt.get_actor_code_cid(&actor_id)); + if let Some(caller_code_cid) = caller_code_cid { + let bucket_code_cid = deserialize_block::(extract_send_result(rt.send( + &ADM_ACTOR_ADDR, + 2892692559 as MethodNum, + IpldBlock::serialize_cbor(&Kind::Bucket)?, + TokenAmount::zero(), + None, + SendFlags::READ_ONLY, + ))?)?; + Ok(caller_code_cid.eq(&bucket_code_cid)) + } else { + Ok(false) + } +} diff --git a/ipc-storage/ipc-decentralized-storage/Cargo.toml b/ipc-storage/ipc-decentralized-storage/Cargo.toml new file mode 100644 index 0000000000..0fbef6023e --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/Cargo.toml @@ -0,0 +1,81 @@ +[package] +name = "ipc-decentralized-storage" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +thiserror.workspace = true +serde.workspace = true +serde_json.workspace = true +tokio.workspace = true +tracing.workspace = true +futures.workspace = true +futures-util.workspace = true +bytes.workspace = true + +# HTTP server dependencies +warp.workspace = true +hex.workspace = true +lazy_static.workspace = true +prometheus.workspace = true +prometheus_exporter.workspace = true +uuid.workspace = true +mime_guess.workspace = true +urlencoding.workspace = true + +# Entanglement dependencies +entangler.workspace = true +entangler_storage.workspace = true + +# HTTP client dependencies +reqwest = { version = "0.11", features = ["json"] } + +# CLI dependencies +clap = { workspace = true, features = ["derive"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } + +# Iroh dependencies for decentralized storage +iroh.workspace = true +iroh-base.workspace = true +iroh-blobs.workspace = true +iroh_manager = { path = "../../ipc-storage/iroh_manager" } + +# Fendermint dependencies for RPC client +fendermint_rpc = { path = "../../fendermint/rpc" } +fendermint_vm_message = { path = "../../fendermint/vm/message" } +fendermint_vm_actor_interface = { path = "../../fendermint/vm/actor_interface" } +fendermint_actor_blobs_shared = { path = "../../fendermint/actors/blobs/shared" } +fendermint_actor_bucket = { path = "../../fendermint/actors/bucket" } +fendermint_crypto = { path = "../../fendermint/crypto" } + +# IPC dependencies for address parsing +ipc-api = { path = "../../ipc/api" } +ethers.workspace = true + +# FVM dependencies +fvm_shared.workspace = true +fvm_ipld_encoding.workspace = true + +# Tendermint +tendermint-rpc.workspace = true + +# BLS signatures +bls-signatures = { version = "0.13.1", default-features = false, features = ["blst"] } +blake2b_simd.workspace = true +rand = "0.8" + +[[bin]] +name = "gateway" +path = "src/bin/gateway.rs" + +[[bin]] +name = "node" +path = "src/bin/node.rs" + +[dev-dependencies] +tokio = { workspace = true, features = ["test-util"] } +tempfile.workspace = true diff --git a/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs b/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs new file mode 100644 index 0000000000..7b33dc992b --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/bin/gateway.rs @@ -0,0 +1,250 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! CLI for running the blob gateway with objects API + +use anyhow::{anyhow, Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use clap::Parser; +use fendermint_rpc::message::SignedMessageFactory; +use fendermint_rpc::FendermintClient; +use fendermint_rpc::QueryClient; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_shared::address::{set_current_network, Address, Network}; +use fvm_shared::chainid::ChainID; +use ipc_decentralized_storage::gateway::objects_service; +use ipc_decentralized_storage::gateway::BlobGateway; +use ipc_decentralized_storage::objects::ObjectsConfig; +use iroh_manager::IrohNode; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::PathBuf; +use std::time::Duration; +use tendermint_rpc::Url; +use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; + +#[derive(Parser, Debug)] +#[command(name = "gateway")] +#[command( + about = "Run the blob gateway with objects API to query pending blobs and handle object uploads" +)] +struct Args { + /// Set the FVM Address Network: "mainnet" (f) or "testnet" (t) + #[arg(short, long, default_value = "testnet", env = "FM_NETWORK")] + network: String, + + /// Path to file containing the secp256k1 secret key in Base64 format (for signing transactions) + #[arg(long, env = "SECRET_KEY_FILE", required = true)] + secret_key_file: PathBuf, + + /// Path to file containing BLS private key in hex format (96 characters) + /// If not provided, a new key will be generated and saved to this path + #[arg(long, env = "BLS_KEY_FILE")] + bls_key_file: Option, + + /// Tendermint RPC URL + #[arg(short, long, default_value = "http://localhost:26657")] + rpc_url: Url, + + /// Number of pending blobs to fetch per query + #[arg(short, long, default_value = "10")] + batch_size: u32, + + /// Polling interval in seconds + #[arg(short = 'i', long, default_value = "5")] + poll_interval_secs: u64, + + // Objects service arguments + /// Enable objects HTTP API service + #[arg(long, default_value = "true")] + enable_objects: bool, + + /// Objects service listen address + #[arg(long, default_value = "127.0.0.1:8080", env = "OBJECTS_LISTEN_ADDR")] + objects_listen_addr: SocketAddr, + + /// Maximum object size in bytes (default 100MB) + #[arg(long, default_value = "104857600", env = "MAX_OBJECT_SIZE")] + max_object_size: u64, + + /// Path to Iroh data directory + #[arg(long, env = "IROH_PATH")] + iroh_path: PathBuf, + + /// Iroh IPv4 bind address + #[arg(long, env = "IROH_V4_ADDR")] + iroh_v4_addr: Option, + + /// Iroh IPv6 bind address + #[arg(long, env = "IROH_V6_ADDR")] + iroh_v6_addr: Option, +} + +/// Get the next sequence number (nonce) of an account. +async fn get_sequence(client: &impl QueryClient, addr: &Address) -> Result { + let state = client + .actor_state(addr, FvmQueryHeight::default()) + .await + .context("failed to get actor state")?; + + match state.value { + Some((_id, state)) => Ok(state.sequence), + None => Err(anyhow!("cannot find actor {addr}")), + } +} + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize tracing + tracing_subscriber::registry() + .with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"))) + .with(tracing_subscriber::fmt::layer()) + .init(); + + let args = Args::parse(); + + // Set the network for address display (f for mainnet, t for testnet) + let network = match args.network.to_lowercase().as_str() { + "main" | "mainnet" | "f" => Network::Mainnet, + "test" | "testnet" | "t" => Network::Testnet, + _ => { + anyhow::bail!( + "Invalid network: {}. Use 'mainnet' or 'testnet'", + args.network + ); + } + }; + set_current_network(network); + tracing::info!("Using network: {:?}", network); + + // Read secp256k1 secret key for signing transactions + tracing::info!( + "Reading secret key from: {}", + args.secret_key_file.display() + ); + let sk = SignedMessageFactory::read_secret_key(&args.secret_key_file) + .context("failed to read secret key")?; + + let pk = sk.public_key(); + // Use f1 address (secp256k1) for signing native FVM actor transactions + let from_addr = + Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; + tracing::info!("Gateway sender address: {}", from_addr); + + // Parse or generate BLS private key if provided + let _bls_private_key = if let Some(key_file) = &args.bls_key_file { + if key_file.exists() { + tracing::info!("Reading BLS private key from: {}", key_file.display()); + let key_hex = std::fs::read_to_string(key_file) + .context("failed to read BLS private key file")? + .trim() + .to_string(); + + let key_bytes = hex::decode(&key_hex) + .context("failed to decode BLS private key hex string from file")?; + + let key = BlsPrivateKey::from_bytes(&key_bytes) + .map_err(|e| anyhow::anyhow!("failed to parse BLS private key: {:?}", e))?; + + tracing::info!("Loaded BLS private key successfully"); + tracing::info!( + "BLS Public key: {}", + hex::encode(key.public_key().as_bytes()) + ); + Some(key) + } else { + tracing::info!("BLS key file not found, generating a new BLS private key"); + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + let key_hex = hex::encode(key.as_bytes()); + + // Save the key to the file + std::fs::write(key_file, &key_hex) + .context("failed to write BLS private key to file")?; + + tracing::info!( + "Generated and saved new BLS private key to: {}", + key_file.display() + ); + tracing::info!( + "BLS Public key: {}", + hex::encode(key.public_key().as_bytes()) + ); + Some(key) + } + } else { + tracing::info!("No BLS private key file provided"); + None + }; + + tracing::info!("Starting blob gateway"); + tracing::info!("RPC URL: {}", args.rpc_url); + tracing::info!("Batch size: {}", args.batch_size); + tracing::info!("Poll interval: {}s", args.poll_interval_secs); + + // Start Iroh node for objects service + tracing::info!("Starting Iroh node at: {}", args.iroh_path.display()); + let iroh_node = IrohNode::persistent(args.iroh_v4_addr, args.iroh_v6_addr, &args.iroh_path) + .await + .context("failed to start Iroh node")?; + + let node_addr = iroh_node.endpoint().node_addr().await?; + tracing::info!("Iroh node started: {}", node_addr.node_id); + + // Start objects service if enabled (upload only) + if args.enable_objects { + let objects_config = ObjectsConfig { + listen_addr: args.objects_listen_addr, + tendermint_url: args.rpc_url.clone(), + max_object_size: args.max_object_size, + metrics_enabled: false, + metrics_listen: None, + }; + + // Use the gateway's own Iroh blobs client for uploads + let iroh_blobs = iroh_node.blobs_client().clone(); + + let _objects_handle = + objects_service::start_objects_service(objects_config, iroh_node.clone(), iroh_blobs); + tracing::info!( + "Objects upload service started on {}", + args.objects_listen_addr + ); + } + + // Create the Fendermint RPC client + let client = FendermintClient::new_http(args.rpc_url, None) + .context("failed to create Fendermint client")?; + + // Query the account nonce from the state + let sequence = get_sequence(&client, &from_addr) + .await + .context("failed to get account sequence")?; + + // Query the chain ID + let chain_id = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to get state params")? + .value + .chain_id; + + tracing::info!("Chain ID: {}", chain_id); + tracing::info!("Account sequence: {}", sequence); + + // Create signed message factory + let mf = SignedMessageFactory::new(sk, from_addr, sequence, ChainID::from(chain_id)); + + // Bind the client with the message factory for transaction signing + let bound_client = client.bind(mf); + + // Create the gateway with the bound client + let mut gateway = BlobGateway::new( + bound_client, + args.batch_size, + Duration::from_secs(args.poll_interval_secs), + ); + + // Run the gateway + gateway.run().await?; + + Ok(()) +} diff --git a/ipc-storage/ipc-decentralized-storage/src/bin/node.rs b/ipc-storage/ipc-decentralized-storage/src/bin/node.rs new file mode 100644 index 0000000000..2359766dfc --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/bin/node.rs @@ -0,0 +1,598 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Binary for running a decentralized storage node + +use anyhow::{anyhow, Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use clap::{Parser, Subcommand}; +use ethers::types::Address as EthAddress; +use fendermint_actor_blobs_shared::method::Method; +use fendermint_actor_blobs_shared::operators::RegisterNodeOperatorParams; +use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; +use fendermint_rpc::message::{GasParams, SignedMessageFactory}; +use fendermint_rpc::tx::{TxClient, TxCommit}; +use fendermint_rpc::FendermintClient; +use fendermint_rpc::QueryClient; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::{set_current_network, Address, Network}; +use fvm_shared::chainid::ChainID; +use fvm_shared::econ::TokenAmount; +use ipc_decentralized_storage::node::{launch, NodeConfig}; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::PathBuf; +use std::str::FromStr; +use std::time::Duration; +use tendermint_rpc::Url; +use tracing::info; + +#[derive(Parser, Debug)] +#[command(name = "ipc-storage-node")] +#[command(about = "Decentralized storage node CLI", long_about = None)] +struct Cli { + /// Set the FVM Address Network: "mainnet" (f) or "testnet" (t) + #[arg(short, long, default_value = "testnet", env = "FM_NETWORK")] + network: String, + + #[command(subcommand)] + command: Commands, +} + +#[derive(Subcommand, Debug)] +enum Commands { + /// Run the storage node + Run(RunArgs), + /// Register as a node operator + RegisterOperator(RegisterOperatorArgs), + /// Generate a new BLS private key + GenerateBlsKey(GenerateBlsKeyArgs), + /// Query a blob by its hash + QueryBlob(QueryBlobArgs), + /// Query an object from a bucket by key + QueryObject(QueryObjectArgs), +} + +#[derive(Parser, Debug)] +struct RunArgs { + /// Path to file containing BLS private key in hex format (96 characters) + /// If not provided, a new key will be generated and saved to this path + #[arg(long, env = "BLS_KEY_FILE")] + secret_key_file: Option, + + /// Path to store Iroh data + #[arg(long, default_value = "./iroh_data")] + iroh_path: PathBuf, + + /// IPv4 bind address for Iroh (e.g., 0.0.0.0:11204) + #[arg(long)] + iroh_v4_addr: Option, + + /// IPv6 bind address for Iroh (e.g., [::]:11204) + #[arg(long)] + iroh_v6_addr: Option, + + /// Tendermint RPC URL + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Ethereum JSON-RPC URL (Fendermint ETH API endpoint) + #[arg(long, default_value = "http://localhost:8545")] + eth_rpc_url: String, + + /// Blobs actor address for event filtering (hex format with 0x prefix) + #[arg(long, default_value = "0xff00000000000000000000000000000000000064")] + blobs_actor_address: String, + + /// Number of blobs to fetch per query + #[arg(long, default_value = "10")] + batch_size: u32, + + /// Polling interval in seconds + #[arg(long, default_value = "5")] + poll_interval_secs: u64, + + /// Maximum concurrent blob downloads + #[arg(long, default_value = "10")] + max_concurrent_downloads: usize, + + /// Address to bind the RPC server for signature queries + #[arg(long, default_value = "127.0.0.1:8080")] + rpc_bind_addr: SocketAddr, +} + +#[derive(Parser, Debug)] +struct RegisterOperatorArgs { + /// Path to file containing BLS private key in hex format (96 characters) + #[arg(long, env = "BLS_KEY_FILE", required = true)] + bls_key_file: PathBuf, + + /// Path to file containing the secp256k1 secret key in Base64 format (for signing transactions) + #[arg(long, env = "SECRET_KEY_FILE", required = true)] + secret_key_file: PathBuf, + + /// RPC URL where this operator's node will be listening (e.g., http://my-node.example.com:8080) + #[arg(long, required = true)] + operator_rpc_url: String, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + chain_rpc_url: String, +} + +#[derive(Parser, Debug)] +struct GenerateBlsKeyArgs { + /// Path to save the generated BLS private key (hex format) + #[arg(long, short = 'o', default_value = "./bls_key.hex")] + output: PathBuf, + + /// Overwrite existing file if it exists + #[arg(long, short = 'f')] + force: bool, +} + +#[derive(Parser, Debug)] +struct QueryBlobArgs { + /// Blob hash to query (hex string, with or without 0x prefix) + #[arg(long, required = true)] + hash: String, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Block height to query at (default: latest committed) + #[arg(long)] + height: Option, +} + +#[derive(Parser, Debug)] +struct QueryObjectArgs { + /// Bucket address (f-address or eth-address format) + #[arg(long, required = true)] + bucket: String, + + /// Object key/path within the bucket + #[arg(long, required = true)] + key: String, + + /// Tendermint RPC URL for the chain + #[arg(long, default_value = "http://localhost:26657")] + rpc_url: String, + + /// Block height to query at (default: latest committed) + #[arg(long)] + height: Option, +} + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize tracing + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ) + .init(); + + let cli = Cli::parse(); + + // Set the network for address display (f for mainnet, t for testnet) + let network = match cli.network.to_lowercase().as_str() { + "main" | "mainnet" | "f" => Network::Mainnet, + "test" | "testnet" | "t" => Network::Testnet, + _ => { + anyhow::bail!( + "Invalid network: {}. Use 'mainnet' or 'testnet'", + cli.network + ); + } + }; + set_current_network(network); + info!("Using network: {:?}", network); + + match cli.command { + Commands::Run(args) => run_node(args).await, + Commands::RegisterOperator(args) => register_operator(args).await, + Commands::GenerateBlsKey(args) => generate_bls_key(args), + Commands::QueryBlob(args) => query_blob(args).await, + Commands::QueryObject(args) => query_object(args).await, + } +} + +async fn run_node(args: RunArgs) -> Result<()> { + // Parse or generate BLS private key + let bls_private_key = if let Some(key_file) = &args.secret_key_file { + if key_file.exists() { + info!("Reading BLS private key from: {}", key_file.display()); + let key_hex = std::fs::read_to_string(key_file) + .context("failed to read BLS private key file")? + .trim() + .to_string(); + + let key_bytes = hex::decode(&key_hex) + .context("failed to decode BLS private key hex string from file")?; + + BlsPrivateKey::from_bytes(&key_bytes) + .map_err(|e| anyhow::anyhow!("failed to parse BLS private key: {:?}", e))? + } else { + info!("Key file not found, generating a new BLS private key"); + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + let key_hex = hex::encode(key.as_bytes()); + + // Save the key to the file + std::fs::write(key_file, &key_hex) + .context("failed to write BLS private key to file")?; + + info!( + "Generated and saved new BLS private key to: {}", + key_file.display() + ); + info!("Public key: {}", hex::encode(key.public_key().as_bytes())); + + key + } + } else { + info!( + "No private key file provided, generating a new temporary key (will not be persisted)" + ); + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + info!("Generated temporary BLS private key"); + info!("Public key: {}", hex::encode(key.public_key().as_bytes())); + info!("WARNING: This key will not be saved and will be lost when the node stops!"); + key + }; + + // Parse RPC URL + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + + // Parse blobs actor address + let blobs_actor_address: EthAddress = args + .blobs_actor_address + .parse() + .context("failed to parse blobs actor address")?; + + // Create node configuration + let config = NodeConfig { + iroh_path: args.iroh_path, + iroh_v4_addr: args.iroh_v4_addr, + iroh_v6_addr: args.iroh_v6_addr, + rpc_url, + eth_rpc_url: args.eth_rpc_url, + batch_size: args.batch_size, + poll_interval: Duration::from_secs(args.poll_interval_secs), + max_concurrent_downloads: args.max_concurrent_downloads, + bls_private_key, + rpc_bind_addr: args.rpc_bind_addr, + blobs_actor_address, + }; + + info!("Starting node with configuration: {:?}", config); + + // Launch the node + launch(config).await +} + +async fn register_operator(args: RegisterOperatorArgs) -> Result<()> { + info!("Registering as node operator"); + + // Read BLS private key + info!( + "Reading BLS private key from: {}", + args.bls_key_file.display() + ); + let key_hex = std::fs::read_to_string(&args.bls_key_file) + .context("failed to read BLS private key file")? + .trim() + .to_string(); + + let key_bytes = + hex::decode(&key_hex).context("failed to decode BLS private key hex string from file")?; + + let bls_private_key = BlsPrivateKey::from_bytes(&key_bytes) + .map_err(|e| anyhow::anyhow!("failed to parse BLS private key: {:?}", e))?; + + // Get BLS public key + let bls_pubkey = bls_private_key.public_key().as_bytes().to_vec(); + + info!("BLS public key: {}", hex::encode(&bls_pubkey)); + info!("Operator RPC URL: {}", args.operator_rpc_url); + + // Read secp256k1 secret key for signing + info!( + "Reading secret key from: {}", + args.secret_key_file.display() + ); + let sk = SignedMessageFactory::read_secret_key(&args.secret_key_file) + .context("failed to read secret key")?; + + let pk = sk.public_key(); + // Use f1 address (secp256k1) instead of f410 (delegated/ethereum) because we're calling + // a native FVM actor with CBOR params, not an EVM contract with calldata + let from_addr = + Address::new_secp256k1(&pk.serialize()).context("failed to create f1 address")?; + info!("Sender address: {}", from_addr); + + // Parse chain RPC URL + let chain_rpc_url = + Url::from_str(&args.chain_rpc_url).context("failed to parse chain RPC URL")?; + + // Create Fendermint client + let client = FendermintClient::new_http(chain_rpc_url, None) + .context("failed to create Fendermint client")?; + + // Query the account nonce from the state + let sequence = get_sequence(&client, &from_addr) + .await + .context("failed to get account sequence")?; + + // Query the chain ID + let chain_id = client + .state_params(FvmQueryHeight::default()) + .await + .context("failed to get state params")? + .value + .chain_id; + + info!("Chain ID: {}", chain_id); + info!("Account sequence: {}", sequence); + + // Create signed message factory + let mf = SignedMessageFactory::new(sk, from_addr, sequence, ChainID::from(chain_id)); + + // Bind the client with the message factory + let mut client = client.bind(mf); + + // Prepare registration parameters + let params = RegisterNodeOperatorParams { + bls_pubkey: bls_pubkey.clone(), + rpc_url: args.operator_rpc_url.clone(), + }; + + let params_bytes = + RawBytes::serialize(params).context("failed to serialize RegisterNodeOperatorParams")?; + + // Gas params + let gas_params = GasParams { + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::from_atto(100), + gas_premium: TokenAmount::from_atto(100), + }; + + info!("Sending RegisterNodeOperator transaction..."); + + // Send the transaction + let res = TxClient::::transaction( + &mut client, + BLOBS_ACTOR_ADDR, + Method::RegisterNodeOperator as u64, + params_bytes, + TokenAmount::from_atto(0), + gas_params, + ) + .await + .context("failed to send RegisterNodeOperator transaction")?; + + if res.response.check_tx.code.is_err() { + anyhow::bail!( + "RegisterNodeOperator check_tx failed: {}", + res.response.check_tx.log + ); + } + + if res.response.deliver_tx.code.is_err() { + anyhow::bail!( + "RegisterNodeOperator deliver_tx failed: {}", + res.response.deliver_tx.log + ); + } + + info!("✓ Successfully registered as node operator!"); + info!( + " BLS Public key: {}", + hex::encode(bls_private_key.public_key().as_bytes()) + ); + info!(" RPC URL: {}", args.operator_rpc_url); + info!(" Tx hash: {}", res.response.hash); + + Ok(()) +} + +/// Get the next sequence number (nonce) of an account. +async fn get_sequence(client: &impl QueryClient, addr: &Address) -> Result { + let state = client + .actor_state(addr, FvmQueryHeight::default()) + .await + .context("failed to get actor state")?; + + match state.value { + Some((_id, state)) => Ok(state.sequence), + None => Err(anyhow!("cannot find actor {addr}")), + } +} + +/// Generate a new BLS private key and save it to a file. +fn generate_bls_key(args: GenerateBlsKeyArgs) -> Result<()> { + // Check if file already exists + if args.output.exists() && !args.force { + anyhow::bail!( + "File {} already exists. Use --force to overwrite.", + args.output.display() + ); + } + + info!("Generating new BLS private key..."); + + // Generate the key + let key = BlsPrivateKey::generate(&mut rand::thread_rng()); + let key_hex = hex::encode(key.as_bytes()); + let pubkey_hex = hex::encode(key.public_key().as_bytes()); + + // Save the key to the file + std::fs::write(&args.output, &key_hex).context("failed to write BLS private key to file")?; + + info!("✓ BLS private key generated successfully!"); + info!(" Private key saved to: {}", args.output.display()); + info!(" Public key: {}", pubkey_hex); + + Ok(()) +} + +/// Query a blob by its hash from the blobs actor. +async fn query_blob(args: QueryBlobArgs) -> Result<()> { + use fendermint_actor_blobs_shared::bytes::B256; + use fendermint_rpc::message::GasParams; + use fvm_shared::econ::TokenAmount; + + info!("Querying blob with hash: {}", args.hash); + + // Parse blob hash - strip 0x prefix if present + let blob_hash_hex = args.hash.strip_prefix("0x").unwrap_or(&args.hash); + + let blob_hash_bytes = + hex::decode(blob_hash_hex).context("failed to decode blob hash hex string")?; + + if blob_hash_bytes.len() != 32 { + anyhow::bail!( + "blob hash must be 32 bytes, got {} bytes", + blob_hash_bytes.len() + ); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = B256(hash_array); + + // Parse RPC URL + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + + // Create Fendermint client + let mut client = + FendermintClient::new_http(rpc_url, None).context("failed to create Fendermint client")?; + + // Set query height + let height = args + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // Query the blob + let maybe_blob = client + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, height) + .await + .context("failed to query blob")?; + + match maybe_blob { + Some(blob) => { + println!("Blob found!"); + println!(" Hash: 0x{}", hex::encode(blob_hash.0)); + println!(" Size: {} bytes", blob.size); + println!(" Metadata hash: 0x{}", hex::encode(blob.metadata_hash.0)); + println!(" Status: {:?}", blob.status); + println!(" Subscribers: {}", blob.subscribers.len()); + + // Print subscriber details (subscription_id -> expiry epoch) + for (subscription_id, expiry) in &blob.subscribers { + println!(" - Subscription ID: {}", subscription_id); + println!(" Expiry epoch: {}", expiry); + } + } + None => { + println!("Blob not found with hash: 0x{}", hex::encode(blob_hash.0)); + } + } + + Ok(()) +} + +/// Query an object from a bucket by its key. +async fn query_object(args: QueryObjectArgs) -> Result<()> { + use fendermint_actor_bucket::GetParams; + use fendermint_rpc::message::GasParams; + use fvm_shared::address::{Error as NetworkError, Network}; + use fvm_shared::econ::TokenAmount; + use ipc_api::ethers_address_to_fil_address; + + info!( + "Querying object from bucket: {} with key: {}", + args.bucket, args.key + ); + + // Parse bucket address (supports both f-address and eth-address formats) + let bucket_address = Network::Mainnet + .parse_address(&args.bucket) + .or_else(|e| match e { + NetworkError::UnknownNetwork => Network::Testnet.parse_address(&args.bucket), + _ => Err(e), + }) + .or_else(|_| { + let addr = ethers::types::Address::from_str(&args.bucket) + .context("failed to parse as eth address")?; + ethers_address_to_fil_address(&addr) + }) + .context("failed to parse bucket address")?; + + info!("Parsed bucket address: {}", bucket_address); + + // Parse RPC URL + let rpc_url = Url::from_str(&args.rpc_url).context("failed to parse RPC URL")?; + + // Create Fendermint client + let mut client = + FendermintClient::new_http(rpc_url, None).context("failed to create Fendermint client")?; + + // Set query height + let height = args + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // Query the object + let params = GetParams(args.key.as_bytes().to_vec()); + let maybe_object = client + .os_get_call( + bucket_address, + params, + TokenAmount::default(), + gas_params, + height, + ) + .await + .context("failed to query object")?; + + match maybe_object { + Some(object) => { + println!("Object found!"); + println!(" Key: {}", args.key); + println!(" Hash: 0x{}", hex::encode(object.hash.0)); + println!(" Recovery hash: 0x{}", hex::encode(object.recovery_hash.0)); + println!(" Size: {} bytes", object.size); + println!(" Expiry epoch: {}", object.expiry); + if !object.metadata.is_empty() { + println!(" Metadata:"); + for (key, value) in &object.metadata { + println!(" {}: {}", key, value); + } + } + } + None => { + println!("Object not found with key: {}", args.key); + } + } + + Ok(()) +} diff --git a/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs b/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs new file mode 100644 index 0000000000..6295a6dc04 --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/gateway/mod.rs @@ -0,0 +1,761 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Gateway module for querying pending blobs from the FVM blockchain +//! +//! This module provides a polling gateway that constantly queries the blobs actor +//! for pending blobs that need to be resolved. + +pub mod objects_service; + +use anyhow::{Context, Result}; +use bls_signatures::{aggregate, Serialize as BlsSerialize, Signature as BlsSignature}; +use fendermint_actor_blobs_shared::blobs::{ + BlobStatus, FinalizeBlobParams, GetAddedBlobsParams, SubscriptionId, +}; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_actor_blobs_shared::method::Method::{ + FinalizeBlob, GetActiveOperators, GetAddedBlobs, GetOperatorInfo, +}; +use fendermint_actor_blobs_shared::operators::{ + GetActiveOperatorsReturn, GetOperatorInfoParams, OperatorInfo, +}; +use fendermint_actor_blobs_shared::BLOBS_ACTOR_ADDR; +use fendermint_rpc::message::GasParams; +use fendermint_rpc::tx::{BoundClient, TxClient, TxCommit}; +use fendermint_vm_actor_interface::system; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::address::Address; +use fvm_shared::bigint::Zero; +use fvm_shared::econ::TokenAmount; +use fvm_shared::message::Message; +use std::collections::{HashMap, HashSet}; +use std::time::{Duration, Instant}; +use tokio::time::sleep; +use tracing::{debug, error, info, warn}; + +/// A blob item with its hash, size, and subscribers +/// Note: We use B256 for both hash and source to match the actor's return type exactly. +/// The actor returns BlobRequest = (B256, u64, HashSet<(Address, SubscriptionId, B256)>) +pub type BlobItem = (B256, u64, HashSet<(Address, SubscriptionId, B256)>); + +/// Cached operator information +struct OperatorCache { + /// List of active operator addresses in order (for bitmap indexing) + operators: Vec
, + /// Operator info by address (BLS pubkey, RPC URL) + operator_info: HashMap, + /// When this cache was last refreshed + last_refresh: Instant, +} + +impl OperatorCache { + fn new() -> Self { + Self { + operators: Vec::new(), + operator_info: HashMap::new(), + // Set to a time far in the past to force refresh on first use + last_refresh: Instant::now() - Duration::from_secs(3600), + } + } + + fn is_stale(&self, max_age: Duration) -> bool { + self.last_refresh.elapsed() > max_age + } +} + +/// Signature collection state for a single blob +struct BlobSignatureCollection { + /// When we first saw this blob + first_seen: Instant, + /// Number of collection attempts + retry_count: u32, + /// Signatures already collected: operator_index -> signature + collected_signatures: HashMap, + /// Operator indices we've already attempted (to avoid re-querying) + attempted_operators: HashSet, + /// Blob metadata needed for finalization + blob_metadata: BlobMetadata, +} + +/// Metadata about a blob needed for finalization +#[derive(Clone)] +pub struct BlobMetadata { + /// Subscriber address that requested the blob + subscriber: Address, + /// Blob size in bytes + size: u64, + /// Subscription ID + subscription_id: SubscriptionId, + /// Source Iroh node ID + source: B256, +} + +impl BlobSignatureCollection { + fn new(metadata: BlobMetadata) -> Self { + Self { + first_seen: Instant::now(), + retry_count: 0, + collected_signatures: HashMap::new(), + attempted_operators: HashSet::new(), + blob_metadata: metadata, + } + } +} + +/// Default gas parameters for transactions +fn default_gas_params() -> GasParams { + GasParams { + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::from_atto(100), + gas_premium: TokenAmount::from_atto(100), + } +} + +/// Gateway for polling added blobs from the chain +/// +/// Uses the fendermint RPC client to query the blobs actor for newly added blobs +/// and submit finalization transactions. +pub struct BlobGateway { + client: C, + /// How many added blobs to fetch per query + batch_size: u32, + /// Polling interval + poll_interval: Duration, + /// Cached operator data (refreshed periodically) + operator_cache: OperatorCache, + /// Track blobs awaiting signature collection and finalization + pending_finalization: HashMap, +} + +impl BlobGateway +where + C: fendermint_rpc::QueryClient + Send + Sync, +{ + /// Create a new blob gateway + pub fn new(client: C, batch_size: u32, poll_interval: Duration) -> Self { + Self { + client, + batch_size, + poll_interval, + operator_cache: OperatorCache::new(), + pending_finalization: HashMap::new(), + } + } + + /// Query added blobs from the chain once + pub async fn query_added_blobs(&self) -> Result> { + debug!("Querying added blobs (batch_size: {})", self.batch_size); + + // Create the query message to the blobs actor + let params = GetAddedBlobsParams(self.batch_size); + let params = + RawBytes::serialize(params).context("failed to serialize GetAddedBlobsParams")?; + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: GetAddedBlobs as u64, + params, + gas_limit: 10_000_000_000, // High gas limit for read-only query + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + // Execute the query using the FendermintClient + let response = self + .client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetAddedBlobs call")?; + + if response.value.code.is_err() { + anyhow::bail!("GetAddedBlobs query failed: {}", response.value.info); + } + + // Decode the return data + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode response data")?; + + let blobs = fvm_ipld_encoding::from_slice::>(&return_data) + .context("failed to decode added blobs response")?; + + info!("Found {} added blobs", blobs.len()); + Ok(blobs) + } +} + +/// Implementation for transaction-capable clients (can submit finalization transactions) +impl BlobGateway +where + C: fendermint_rpc::QueryClient + BoundClient + TxClient + Send + Sync, +{ + /// Main entry point: run the gateway to monitor and finalize blobs + /// + /// This is an alias for run_signature_collection() + pub async fn run(&mut self) -> Result<()> { + self.run_signature_collection().await + } + + /// Main entry point: collect signatures and finalize blobs + /// + /// This monitors pending blobs, collects signatures from operators, + /// aggregates them, and calls finalize_blob on-chain. + pub async fn run_signature_collection(&mut self) -> Result<()> { + info!( + "Starting signature collection loop (interval: {:?})", + self.poll_interval + ); + + loop { + if let Err(e) = self.signature_collection_loop().await { + error!("Signature collection error: {}", e); + } + + sleep(self.poll_interval).await; + } + } + + async fn signature_collection_loop(&mut self) -> Result<()> { + debug!("Starting signature collection loop iteration"); + + // Step 1: Refresh operator cache if stale (every 5 minutes) + let cache_refresh_interval = Duration::from_secs(300); + let needs_refresh = self.operator_cache.is_stale(cache_refresh_interval); + debug!( + "Operator cache status: {} operators, stale: {}", + self.operator_cache.operators.len(), + needs_refresh + ); + + if needs_refresh { + info!("Refreshing operator cache..."); + match self.query_active_operators().await { + Ok(operators) => { + self.operator_cache.operators = operators.clone(); + self.operator_cache.operator_info.clear(); + + // Fetch operator info for each operator + for operator_addr in &operators { + match self.get_operator_info(*operator_addr).await { + Ok(info) => { + self.operator_cache + .operator_info + .insert(*operator_addr, info); + } + Err(e) => { + warn!("Failed to get info for operator {}: {}", operator_addr, e); + } + } + } + + self.operator_cache.last_refresh = Instant::now(); + info!("Operator cache refreshed: {} operators", operators.len()); + } + Err(e) => { + warn!("Failed to refresh operator cache: {}", e); + } + } + } + + // Step 2: Query added blobs and track them + match self.query_added_blobs().await { + Ok(added_blobs) => { + for (hash, size, sources) in added_blobs { + // Extract metadata from sources (pick first source) + if let Some((subscriber, subscription_id, source)) = sources.iter().next() { + // Skip if already tracked + if self.pending_finalization.contains_key(&hash) { + continue; + } + + let metadata = BlobMetadata { + subscriber: *subscriber, + size, + subscription_id: subscription_id.clone(), + source: *source, + }; + + // Track the blob for signature collection + // (blob will be finalized directly from Added status) + self.pending_finalization + .insert(hash, BlobSignatureCollection::new(metadata)); + } else { + warn!("Blob {} has no sources, skipping", hash); + } + } + } + Err(e) => { + warn!("Failed to query added blobs: {}", e); + } + } + + // Step 3: Try to collect signatures for tracked blobs + let tracked_blobs: Vec = self.pending_finalization.keys().copied().collect(); + + debug!( + "Checking {} blobs for signature collection", + tracked_blobs.len() + ); + + for hash in tracked_blobs { + // Get collection once and check if we should skip + let Some(collection) = self.pending_finalization.get_mut(&hash) else { + continue; + }; + + // Skip if we just added this blob (give operators time to download) + // Use 10 seconds for faster testing + let elapsed = collection.first_seen.elapsed(); + if elapsed < Duration::from_secs(10) { + debug!( + "Blob {} waiting for operators to download ({:.1}s / 10s)", + hash, + elapsed.as_secs_f64() + ); + continue; + } + + info!( + "Blob {} ready for signature collection (waited {:.1}s)", + hash, + elapsed.as_secs_f64() + ); + + // Get operators from cache + let (operators, total_operators) = ( + self.operator_cache.operators.clone(), + self.operator_cache.operators.len(), + ); + + if total_operators == 0 { + debug!("No operators available, skipping signature collection"); + continue; + } + + let threshold = (total_operators * 2 + 2) / 3; // Ceiling of 2/3 + + // Collect signatures that aren't already attempted + let attempted_operators = collection.attempted_operators.clone(); + + // Build list of (index, operator_addr, rpc_url) for operators we need to query + let mut fetch_tasks = Vec::new(); + for (index, operator_addr) in operators.iter().enumerate() { + // Skip if already collected + if attempted_operators.contains(&index) { + continue; + } + + // Get operator RPC URL from cache - skip if not found + let Some(operator_info) = self.operator_cache.operator_info.get(operator_addr) + else { + warn!("Operator {} not found in cache, skipping", operator_addr); + continue; + }; + + fetch_tasks.push((index, *operator_addr, operator_info.rpc_url.clone())); + } + + // Fetch signatures from all operators in parallel + let fetch_futures: Vec<_> = fetch_tasks + .into_iter() + .map(|(index, operator_addr, rpc_url)| async move { + let result = Self::fetch_signature_static(&rpc_url, hash).await; + (index, operator_addr, result) + }) + .collect(); + + // Wait for all fetches to complete + let fetch_results = futures::future::join_all(fetch_futures).await; + + // Collect successful signatures + let mut new_signatures: Vec<(usize, BlsSignature)> = Vec::new(); + for (index, operator_addr, result) in fetch_results { + match result { + Ok(signature) => { + info!( + "Got signature from operator {} (index {})", + operator_addr, index + ); + new_signatures.push((index, signature)); + } + Err(e) => { + warn!( + "Failed to get signature from operator {}: {}", + operator_addr, e + ); + // Don't mark as attempted - we'll retry next iteration + } + } + } + + // Apply all collected signatures at once + let collection = self.pending_finalization.get_mut(&hash).unwrap(); + for (index, signature) in new_signatures { + collection.collected_signatures.insert(index, signature); + collection.attempted_operators.insert(index); + } + + // Get collection reference for final checks + let num_collected = collection.collected_signatures.len(); + + if num_collected >= threshold { + // Collect signatures and build bitmap + let sigs_vec: Vec<(usize, BlsSignature)> = collection + .collected_signatures + .iter() + .map(|(idx, sig)| (*idx, *sig)) + .collect(); + + let mut bitmap: u128 = 0; + for idx in collection.collected_signatures.keys() { + bitmap |= 1u128 << idx; + } + + info!( + "Collected {}/{} signatures for blob {} (threshold: {})", + num_collected, total_operators, hash, threshold + ); + + // Get metadata before calling finalize_blob + let metadata = collection.blob_metadata.clone(); + + // Aggregate signatures + match self.aggregate_signatures(sigs_vec) { + Ok(aggregated_sig) => { + info!("Successfully aggregated signature for blob {}", hash); + info!("Bitmap: 0b{:b}", bitmap); + + // Call finalize_blob with aggregated signature and bitmap + match self + .finalize_blob(hash, &metadata, aggregated_sig, bitmap) + .await + { + Ok(()) => { + // Remove from tracking after successful finalization + self.pending_finalization.remove(&hash); + info!("Blob {} finalized on-chain and removed from tracking", hash); + } + Err(e) => { + warn!("Failed to finalize blob {} on-chain: {}", hash, e); + // Keep in tracking to retry later + } + } + } + Err(e) => { + warn!("Failed to aggregate signatures for {}: {}", hash, e); + } + } + } else { + // Update retry count + collection.retry_count += 1; + + // Give up after too many retries or too much time + if collection.retry_count > 20 + || collection.first_seen.elapsed() > Duration::from_secs(600) + { + warn!( + "Giving up on blob {} after {} retries / {:?} (collected {}/{})", + hash, + collection.retry_count, + collection.first_seen.elapsed(), + num_collected, + threshold + ); + } else { + debug!( + "Blob {} progress: {}/{} signatures (threshold: {})", + hash, num_collected, total_operators, threshold + ); + } + } + } + + Ok(()) + } +} + +/// Additional query methods for all clients (read-only operations) +impl BlobGateway +where + C: fendermint_rpc::QueryClient + Send + Sync, +{ + /// Query the list of active node operators from the chain + pub async fn query_active_operators(&self) -> Result> { + debug!("Querying active operators"); + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: GetActiveOperators as u64, + params: RawBytes::default(), + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = self + .client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetActiveOperators call")?; + + if response.value.code.is_err() { + anyhow::bail!("GetActiveOperators query failed: {}", response.value.info); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode response data")?; + + let result = fvm_ipld_encoding::from_slice::(&return_data) + .context("failed to decode active operators response")?; + + info!("Found {} active operators", result.operators.len()); + Ok(result.operators) + } + + /// Get operator info by address + pub async fn get_operator_info(&self, address: Address) -> Result { + debug!("Querying operator info for {}", address); + + let params = GetOperatorInfoParams { address }; + let params = + RawBytes::serialize(params).context("failed to serialize GetOperatorInfoParams")?; + + let msg = Message { + version: Default::default(), + from: system::SYSTEM_ACTOR_ADDR, + to: BLOBS_ACTOR_ADDR, + sequence: 0, + value: TokenAmount::zero(), + method_num: GetOperatorInfo as u64, + params, + gas_limit: 10_000_000_000, + gas_fee_cap: TokenAmount::zero(), + gas_premium: TokenAmount::zero(), + }; + + let response = self + .client + .call(msg, FvmQueryHeight::default()) + .await + .context("failed to execute GetOperatorInfo call")?; + + if response.value.code.is_err() { + anyhow::bail!("GetOperatorInfo query failed: {}", response.value.info); + } + + let return_data = fendermint_rpc::response::decode_data(&response.value.data) + .context("failed to decode response data")?; + + let result = fvm_ipld_encoding::from_slice::>(&return_data) + .context("failed to decode operator info response")?; + + result.ok_or_else(|| anyhow::anyhow!("Operator not found")) + } + + /// Collect signatures from all active operators for a given blob hash + /// + /// Returns a tuple of (signatures_with_index, bitmap) where: + /// - signatures_with_index: Vec of (operator_index, BLS signature) + /// - bitmap: u128 bitmap indicating which operators signed + pub async fn collect_signatures( + &self, + blob_hash: B256, + ) -> Result<(Vec<(usize, BlsSignature)>, u128)> { + info!("Collecting signatures for blob {}", blob_hash); + + // Get active operators + let operators = self.query_active_operators().await?; + + if operators.is_empty() { + anyhow::bail!("No active operators found"); + } + + let mut signatures = Vec::new(); + let mut bitmap: u128 = 0; + + // Query each operator's RPC for the signature + for (index, operator_addr) in operators.iter().enumerate() { + match self.get_operator_info(*operator_addr).await { + Ok(operator_info) => { + match self + .fetch_signature_from_operator(&operator_info.rpc_url, blob_hash) + .await + { + Ok(signature) => { + signatures.push((index, signature)); + bitmap |= 1u128 << index; + info!( + "Got signature from operator {} (index {})", + operator_addr, index + ); + } + Err(e) => { + warn!( + "Failed to get signature from operator {} ({}): {}", + operator_addr, operator_info.rpc_url, e + ); + } + } + } + Err(e) => { + warn!("Failed to get info for operator {}: {}", operator_addr, e); + } + } + } + + if signatures.is_empty() { + anyhow::bail!("No signatures collected from any operator"); + } + + info!( + "Collected {} signatures out of {} operators", + signatures.len(), + operators.len() + ); + + Ok((signatures, bitmap)) + } + + /// Fetch a signature from an operator's RPC endpoint + async fn fetch_signature_from_operator( + &self, + rpc_url: &str, + blob_hash: B256, + ) -> Result { + Self::fetch_signature_static(rpc_url, blob_hash).await + } + + /// Static version of fetch_signature_from_operator for parallel execution + async fn fetch_signature_static(rpc_url: &str, blob_hash: B256) -> Result { + let url = format!("{}/signature/{}", rpc_url, blob_hash); + debug!("Fetching signature from {}", url); + + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(10)) + .build() + .context("failed to create HTTP client")?; + + let response = client + .get(&url) + .send() + .await + .context("failed to send HTTP request")?; + + if !response.status().is_success() { + anyhow::bail!("HTTP request failed with status: {}", response.status()); + } + + let json: serde_json::Value = response + .json() + .await + .context("failed to parse JSON response")?; + + let signature_hex = json["signature"] + .as_str() + .ok_or_else(|| anyhow::anyhow!("Missing 'signature' field in response"))?; + + let signature_bytes = + hex::decode(signature_hex).context("failed to decode signature hex")?; + + let signature = BlsSignature::from_bytes(&signature_bytes) + .map_err(|e| anyhow::anyhow!("Failed to parse BLS signature: {:?}", e))?; + + Ok(signature) + } + + /// Aggregate BLS signatures into a single signature + pub fn aggregate_signatures( + &self, + signatures: Vec<(usize, BlsSignature)>, + ) -> Result { + if signatures.is_empty() { + anyhow::bail!("Cannot aggregate empty signature list"); + } + + info!("Aggregating {} signatures", signatures.len()); + + let sigs: Vec = signatures.into_iter().map(|(_, sig)| sig).collect(); + let aggregated = aggregate(&sigs) + .map_err(|e| anyhow::anyhow!("Failed to aggregate signatures: {:?}", e))?; + + Ok(aggregated) + } +} + +/// Transaction methods for clients that can submit transactions +impl BlobGateway +where + C: fendermint_rpc::QueryClient + BoundClient + TxClient + Send + Sync, +{ + /// Call finalize_blob on-chain with aggregated signature and bitmap + /// + /// This submits a real transaction to the blockchain (not just a query). + pub async fn finalize_blob( + &mut self, + blob_hash: B256, + metadata: &BlobMetadata, + aggregated_signature: BlsSignature, + signer_bitmap: u128, + ) -> Result<()> { + info!("Finalizing blob {} on-chain", blob_hash); + + // Serialize aggregated signature + let signature_bytes = aggregated_signature.as_bytes().to_vec(); + + // Create finalize blob params + let params = FinalizeBlobParams { + source: metadata.source, + subscriber: metadata.subscriber, + hash: blob_hash, + size: metadata.size, + id: metadata.subscription_id.clone(), + status: BlobStatus::Resolved, + aggregated_signature: signature_bytes, + signer_bitmap, + }; + + let params_bytes = + RawBytes::serialize(params).context("failed to serialize FinalizeBlobParams")?; + + // Submit actual transaction using TxClient + let res = TxClient::::transaction( + &mut self.client, + BLOBS_ACTOR_ADDR, + FinalizeBlob as u64, + params_bytes, + TokenAmount::zero(), + default_gas_params(), + ) + .await + .context("failed to send FinalizeBlob transaction")?; + + if res.response.check_tx.code.is_err() { + anyhow::bail!( + "FinalizeBlob check_tx failed: {}", + res.response.check_tx.log + ); + } + + if res.response.deliver_tx.code.is_err() { + anyhow::bail!( + "FinalizeBlob deliver_tx failed: {}", + res.response.deliver_tx.log + ); + } + + info!( + "Successfully finalized blob {} on-chain (tx: {})", + blob_hash, res.response.hash + ); + Ok(()) + } +} diff --git a/ipc-storage/ipc-decentralized-storage/src/gateway/objects_service.rs b/ipc-storage/ipc-decentralized-storage/src/gateway/objects_service.rs new file mode 100644 index 0000000000..a1b72fe9d3 --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/gateway/objects_service.rs @@ -0,0 +1,64 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Objects service integration for the gateway +//! +//! This module provides functionality to start the objects HTTP service +//! alongside the gateway's blob polling functionality. + +use anyhow::Result; +use iroh_manager::{BlobsClient, IrohNode}; +use std::net::SocketAddr; +use tracing::info; + +use crate::objects::{self, ObjectsConfig}; + +/// Configuration for the gateway with objects service +#[derive(Clone, Debug, Default)] +pub struct GatewayWithObjectsConfig { + /// Objects service configuration + pub objects_config: ObjectsConfig, +} + +/// Start the objects HTTP service in a background task +/// +/// This spawns the objects service which handles: +/// - POST /v1/objects - Upload objects +/// - GET /v1/objects/{address}/{key} - Download objects from buckets +/// - GET /v1/blobs/{hash} - Download blobs directly +/// +/// Returns a handle to the spawned task. +pub fn start_objects_service( + config: ObjectsConfig, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, +) -> tokio::task::JoinHandle> { + let listen_addr = config.listen_addr; + info!(listen_addr = %listen_addr, "starting objects service in background"); + + tokio::spawn(async move { + objects::run_objects_service(config, iroh_node, iroh_resolver_blobs).await + }) +} + +/// Start only the objects HTTP service (blocking) +/// +/// This is a convenience function that runs the objects service directly +/// without the gateway's blob polling functionality. +pub async fn run_objects_service_standalone( + listen_addr: SocketAddr, + tendermint_url: tendermint_rpc::Url, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, + max_object_size: u64, +) -> Result<()> { + let config = ObjectsConfig { + listen_addr, + tendermint_url, + max_object_size, + metrics_enabled: false, + metrics_listen: None, + }; + + objects::run_objects_service(config, iroh_node, iroh_resolver_blobs).await +} diff --git a/ipc-storage/ipc-decentralized-storage/src/lib.rs b/ipc-storage/ipc-decentralized-storage/src/lib.rs new file mode 100644 index 0000000000..a73f28b639 --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/lib.rs @@ -0,0 +1,11 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! IPC Decentralized Storage +//! +//! This crate provides decentralized storage abstractions and implementations +//! for the IPC (Inter-Planetary Consensus) system. + +pub mod gateway; +pub mod node; +pub mod objects; diff --git a/ipc-storage/ipc-decentralized-storage/src/node/mod.rs b/ipc-storage/ipc-decentralized-storage/src/node/mod.rs new file mode 100644 index 0000000000..f5ec75053c --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/node/mod.rs @@ -0,0 +1,314 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Node module for running a decentralized storage node +//! +//! This module provides functionality to run a complete storage node that: +//! - Starts an Iroh instance for P2P storage +//! - Polls the chain for newly added blobs +//! - Resolves blobs by downloading them from the source nodes + +mod resolver; +mod rpc; +pub mod store; + +use anyhow::{Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use ethers::types::Address; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_rpc::FendermintClient; +use iroh_blobs::Hash; +use iroh_manager::IrohNode; +use std::collections::HashMap; +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::str::FromStr; +use std::sync::{Arc, RwLock}; +use std::time::Duration; +use tendermint_rpc::Url; +use tokio::sync::Mutex; +use tokio::time::sleep; +use tracing::{debug, error, info, warn}; + +use crate::gateway::BlobGateway; +use resolver::EventPollerConfig; +use store::InMemoryStore; + +/// Configuration for the storage node +#[derive(Clone)] +pub struct NodeConfig { + /// Path to store Iroh data + pub iroh_path: std::path::PathBuf, + /// IPv4 bind address for Iroh (optional, uses default if None) + pub iroh_v4_addr: Option, + /// IPv6 bind address for Iroh (optional, uses default if None) + pub iroh_v6_addr: Option, + /// Tendermint RPC URL + pub rpc_url: Url, + /// Ethereum JSON-RPC URL (Fendermint ETH API endpoint) + pub eth_rpc_url: String, + /// Number of blobs to fetch per query + pub batch_size: u32, + /// Polling interval for querying added blobs + pub poll_interval: Duration, + /// Maximum concurrent blob downloads + pub max_concurrent_downloads: usize, + /// BLS private key for signing blob hashes + pub bls_private_key: BlsPrivateKey, + /// Address to bind the RPC server for signature queries + pub rpc_bind_addr: SocketAddr, + /// Blobs actor address for event filtering + pub blobs_actor_address: Address, +} + +impl std::fmt::Debug for NodeConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeConfig") + .field("iroh_path", &self.iroh_path) + .field("iroh_v4_addr", &self.iroh_v4_addr) + .field("iroh_v6_addr", &self.iroh_v6_addr) + .field("rpc_url", &self.rpc_url) + .field("eth_rpc_url", &self.eth_rpc_url) + .field("batch_size", &self.batch_size) + .field("poll_interval", &self.poll_interval) + .field("max_concurrent_downloads", &self.max_concurrent_downloads) + .field("bls_private_key", &"") + .field("rpc_bind_addr", &self.rpc_bind_addr) + .field("blobs_actor_address", &self.blobs_actor_address) + .finish() + } +} + +/// Storage for BLS signatures of resolved blobs +/// Maps blob hash -> BLS signature +pub type SignatureStorage = Arc>>>; + +/// Shared Fendermint client wrapped in Arc for async access +pub type SharedFendermintClient = Arc>; + +impl NodeConfig { + /// Create a new NodeConfig with a generated BLS key + pub fn new_with_generated_key() -> Self { + let bls_private_key = BlsPrivateKey::generate(&mut rand::thread_rng()); + Self { + iroh_path: std::env::current_dir().unwrap().join("iroh_data"), + iroh_v4_addr: None, + iroh_v6_addr: None, + rpc_url: Url::from_str("http://localhost:26657").unwrap(), + eth_rpc_url: "http://localhost:8545".to_string(), + batch_size: 10, + poll_interval: Duration::from_secs(5), + max_concurrent_downloads: 10, + bls_private_key, + rpc_bind_addr: "127.0.0.1:8080".parse().unwrap(), + blobs_actor_address: Address::zero(), // Should be configured + } + } +} + +/// Launch a storage node that polls for added blobs and downloads them +/// +/// This function: +/// 1. Starts an Iroh node for P2P storage +/// 2. Creates an RPC client to query the chain +/// 3. Polls for newly added blobs +/// 4. Downloads blobs from their source nodes using Iroh +/// 5. Polls for blob finalized/deleted events +pub async fn launch(config: NodeConfig) -> Result<()> { + info!("Starting decentralized storage node"); + info!("Iroh path: {}", config.iroh_path.display()); + info!("RPC URL: {}", config.rpc_url); + info!("ETH RPC URL: {}", config.eth_rpc_url); + info!("Poll interval: {:?}", config.poll_interval); + + // Start Iroh node + info!("Starting Iroh node..."); + let iroh_node = + IrohNode::persistent(config.iroh_v4_addr, config.iroh_v6_addr, &config.iroh_path) + .await + .context("failed to start Iroh node")?; + + let node_addr = iroh_node.endpoint().node_addr().await?; + info!("Iroh node started: {}", node_addr.node_id); + + // Create RPC client + info!("Connecting to Fendermint RPC..."); + let client = FendermintClient::new_http(config.rpc_url.clone(), None) + .context("failed to create Fendermint client")?; + + // Create gateway + let gateway = BlobGateway::new(client, config.batch_size, config.poll_interval); + + // Track blobs currently being downloaded (keyed by B256 hash from chain) + let mut in_progress: HashMap>> = HashMap::new(); + // Track blobs that have been downloaded but not yet finalized on-chain + let mut downloaded: HashMap = HashMap::new(); + + // Storage for BLS signatures of downloaded blobs + let signatures: SignatureStorage = Arc::new(RwLock::new(HashMap::new())); + + // Create in-memory store for tracking polled heights + let store = Arc::new(InMemoryStore::new()); + + // Create a separate client for RPC server queries + let rpc_client = FendermintClient::new_http(config.rpc_url.clone(), None) + .context("failed to create RPC server Fendermint client")?; + let rpc_client = Arc::new(Mutex::new(rpc_client)); + + // Start RPC server for signature queries and blob downloads + let signatures_for_rpc = signatures.clone(); + let rpc_bind_addr = config.rpc_bind_addr; + let rpc_client_for_server = rpc_client.clone(); + let iroh_for_rpc = iroh_node.clone(); + tokio::spawn(async move { + if let Err(e) = rpc::start_rpc_server( + rpc_bind_addr, + signatures_for_rpc, + rpc_client_for_server, + iroh_for_rpc, + ) + .await + { + error!("RPC server error: {}", e); + } + }); + + // Start event poller for blob finalization and deletion + let signatures_for_events = signatures.clone(); + let store_for_events = store.clone(); + let iroh_for_events = iroh_node.clone(); + let event_poller_config = EventPollerConfig { + eth_rpc_url: config.eth_rpc_url.clone(), + poll_interval: config.poll_interval, + blobs_actor_address: config.blobs_actor_address, + }; + tokio::spawn(async move { + if let Err(e) = resolver::poll_for_blob_events( + event_poller_config, + signatures_for_events, + store_for_events, + iroh_for_events, + ) + .await + { + error!("Event poller error: {}", e); + } + }); + + info!("Starting blob resolution loop"); + info!( + "BLS public key: {:?}", + hex::encode(config.bls_private_key.public_key().as_bytes()) + ); + info!("RPC server listening on: {}", config.rpc_bind_addr); + + loop { + // Check completed downloads and move them to the downloaded set + // Collect finished tasks to process + let mut finished = Vec::new(); + in_progress.retain(|hash, handle| { + if handle.is_finished() { + finished.push(*hash); + false // Remove from in_progress + } else { + true // Keep in in_progress + } + }); + + // Process finished downloads + for hash in finished { + // Note: The task has finished, but we mark it as downloaded + // The actual result checking would require more complex handling + // For now, we assume successful completion if the task finished + info!("Blob {} download completed, waiting for finalization", hash); + downloaded.insert(hash, std::time::Instant::now()); + } + + // TODO: Query on-chain blob status to check if downloaded blobs are finalized + // For now, just log the downloaded blobs waiting for finalization + if !downloaded.is_empty() { + debug!("Blobs waiting for finalization: {}", downloaded.len()); + // Clean up old entries (older than 5 minutes) to prevent memory leaks + let cutoff = std::time::Instant::now() - Duration::from_secs(300); + downloaded.retain(|hash, timestamp| { + if *timestamp < cutoff { + warn!("Blob {} has been waiting for finalization for >5 minutes, removing from tracking", hash); + false + } else { + true + } + }); + } + + // Query for added blobs + match gateway.query_added_blobs().await { + Ok(blobs) => { + if !blobs.is_empty() { + info!("Found {} added blobs to resolve", blobs.len()); + + for blob_item in blobs { + let (hash, size, sources) = blob_item; + + // Skip if already downloading + if in_progress.contains_key(&hash) { + debug!("Blob {} already in progress, skipping", hash); + continue; + } + + // Check if we're at the concurrency limit + if in_progress.len() >= config.max_concurrent_downloads { + warn!( + "Max concurrent downloads ({}) reached, deferring blob {}", + config.max_concurrent_downloads, hash + ); + continue; + } + + // Skip if already downloaded and waiting for finalization + if downloaded.contains_key(&hash) { + debug!("Blob {} already downloaded, waiting for finalization", hash); + continue; + } + + // Spawn a task to download this blob + let iroh_clone = iroh_node.clone(); + let bls_key = config.bls_private_key; + let sigs = signatures.clone(); + + // Convert B256 hash to iroh_blobs::Hash + let iroh_hash = Hash::from_bytes(hash.0); + + // Convert sources from B256 to iroh::NodeId + let iroh_sources: std::collections::HashSet<_> = sources + .into_iter() + .map(|(addr, sub_id, source_b256)| { + let node_id = iroh::NodeId::from_bytes(&source_b256.0) + .expect("B256 should be valid NodeId bytes"); + (addr, sub_id, node_id) + }) + .collect(); + + let handle = tokio::spawn(async move { + resolver::resolve_blob( + iroh_clone, + iroh_hash, + size, + iroh_sources, + bls_key, + sigs, + ) + .await + }); + + in_progress.insert(hash, handle); + } + } + } + Err(e) => { + error!("Failed to query added blobs: {}", e); + } + } + + // Wait before the next poll + sleep(config.poll_interval).await; + } +} diff --git a/ipc-storage/ipc-decentralized-storage/src/node/resolver.rs b/ipc-storage/ipc-decentralized-storage/src/node/resolver.rs new file mode 100644 index 0000000000..68a588cf18 --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/node/resolver.rs @@ -0,0 +1,460 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Blob resolution and event handling for the storage node +//! +//! This module provides: +//! - Blob resolution by downloading from source nodes +//! - Event polling for blob finalization and deletion using ethers-rs + +use std::sync::Arc; +use std::time::Duration; + +use anyhow::{Context, Result}; +use bls_signatures::{PrivateKey as BlsPrivateKey, Serialize as BlsSerialize}; +use ethers::prelude::*; +use ethers::providers::{Http, Provider}; +use iroh_blobs::Hash; +use iroh_manager::IrohNode; +use tracing::{debug, error, info, warn}; + +use super::store::Store; +use super::SignatureStorage; + +// Event signatures for blob events (keccak256 of the event signature) +// BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved) +const BLOB_FINALIZED_TOPIC: &str = + "0x3f5b99de731555264580d7e2f00e46919de0d4f067a01d28aed55632a9068595"; +// BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased) +const BLOB_DELETED_TOPIC: &str = + "0x1ebbc934d9a1e5c0c9bcb94c6a7c55bfa2b66fca0a5d8ed66f0b43a5c8e3c0d8"; + +/// Configuration for the event poller +#[derive(Clone)] +pub struct EventPollerConfig { + /// Ethereum JSON-RPC URL (Fendermint ETH API endpoint) + pub eth_rpc_url: String, + /// Polling interval + pub poll_interval: Duration, + /// Blobs actor address to filter events from + pub blobs_actor_address: Address, +} + +/// Events that the poller can detect +#[derive(Debug, Clone)] +pub enum BlobEvent { + /// A blob has been finalized + Finalized { hash: Hash }, + /// A blob has been deleted + Deleted { hash: Hash }, +} + +/// Poll for blob events (finalized and deleted) using ethers-rs get_logs +/// +/// This function polls the chain for new blocks and processes events +/// related to blob finalization and deletion. +pub async fn poll_for_blob_events( + config: EventPollerConfig, + signatures: SignatureStorage, + store: Arc, + iroh: IrohNode, +) -> Result<()> { + info!("Starting event poller for BlobFinalized and BlobDeleted events"); + info!("ETH RPC URL: {}", config.eth_rpc_url); + info!("Poll interval: {:?}", config.poll_interval); + info!("Blobs actor address: {:?}", config.blobs_actor_address); + + // Create ethers HTTP provider + let provider = Provider::::try_from(&config.eth_rpc_url) + .context("failed to create HTTP provider")?; + + loop { + if let Err(e) = poll_once(&provider, &config, &signatures, &store, &iroh).await { + error!("Error during event polling: {}", e); + } + + tokio::time::sleep(config.poll_interval).await; + } +} + +/// Perform a single poll iteration +async fn poll_once( + provider: &Provider, + config: &EventPollerConfig, + signatures: &SignatureStorage, + store: &Arc, + iroh: &IrohNode, +) -> Result<()> { + // Get the latest block number + let latest_block = provider + .get_block_number() + .await + .context("failed to get block number")?; + let latest_height = latest_block.as_u64(); + + // Get the last polled height from store + let last_polled = store.get_last_polled_height()?.unwrap_or(0); + + if latest_height <= last_polled { + debug!( + "No new blocks to process (latest: {}, last polled: {})", + latest_height, last_polled + ); + return Ok(()); + } + + let from_block = last_polled + 1; + debug!("Processing blocks from {} to {}", from_block, latest_height); + + // Build filter for BlobFinalized events + let finalized_filter = Filter::new() + .address(config.blobs_actor_address) + .topic0(BLOB_FINALIZED_TOPIC.parse::().unwrap()) + .from_block(from_block) + .to_block(latest_height); + + // Build filter for BlobDeleted events + let deleted_filter = Filter::new() + .address(config.blobs_actor_address) + .topic0(BLOB_DELETED_TOPIC.parse::().unwrap()) + .from_block(from_block) + .to_block(latest_height); + + // Query for BlobFinalized events + let finalized_logs = provider + .get_logs(&finalized_filter) + .await + .context("failed to get BlobFinalized logs")?; + + for log in finalized_logs { + if let Some(event) = parse_blob_finalized_log(&log) { + handle_blob_event(event, signatures, iroh).await; + } + } + + // Query for BlobDeleted events + let deleted_logs = provider + .get_logs(&deleted_filter) + .await + .context("failed to get BlobDeleted logs")?; + + for log in deleted_logs { + if let Some(event) = parse_blob_deleted_log(&log) { + handle_blob_event(event, signatures, iroh).await; + } + } + + // Update the last polled height + store.set_last_polled_height(latest_height)?; + debug!("Updated last polled height to {}", latest_height); + + Ok(()) +} + +/// Parse a BlobFinalized event from a log +/// Event: BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved) +fn parse_blob_finalized_log(log: &Log) -> Option { + // The hash is the second topic (first non-indexed param in data, but hash is in data) + // Actually, looking at the event signature: + // event BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved); + // - subscriber is indexed (topic1) + // - hash is not indexed (in data) + // - resolved is not indexed (in data) + + if log.data.len() < 64 { + debug!("BlobFinalized log data too short: {} bytes", log.data.len()); + return None; + } + + // First 32 bytes of data is the hash + let hash_bytes: [u8; 32] = log.data[0..32].try_into().ok()?; + let hash = Hash::from(hash_bytes); + + Some(BlobEvent::Finalized { hash }) +} + +/// Parse a BlobDeleted event from a log +/// Event: BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased) +fn parse_blob_deleted_log(log: &Log) -> Option { + // - subscriber is indexed (topic1) + // - hash is not indexed (in data, first 32 bytes) + // - size is not indexed (in data) + // - bytesReleased is not indexed (in data) + + if log.data.len() < 96 { + debug!("BlobDeleted log data too short: {} bytes", log.data.len()); + return None; + } + + // First 32 bytes of data is the hash + let hash_bytes: [u8; 32] = log.data[0..32].try_into().ok()?; + let hash = Hash::from(hash_bytes); + + Some(BlobEvent::Deleted { hash }) +} + +/// Handle a blob event +async fn handle_blob_event(event: BlobEvent, signatures: &SignatureStorage, iroh: &IrohNode) { + match event { + BlobEvent::Finalized { hash } => { + // Remove signature from memory for finalized blobs + let mut sigs = signatures.write().unwrap(); + if sigs.remove(&hash).is_some() { + info!("Removed signature for finalized blob {} from memory", hash); + } else { + debug!( + "Blob {} was finalized but no signature found in memory", + hash + ); + } + } + BlobEvent::Deleted { hash } => { + // Remove signature from memory + { + let mut sigs = signatures.write().unwrap(); + if sigs.remove(&hash).is_some() { + info!("Removed signature for deleted blob {} from memory", hash); + } + } + + // Optionally delete the blob from Iroh storage + // Note: This is a best-effort cleanup, failures are logged but not fatal + match delete_blob_from_iroh(iroh, hash).await { + Ok(deleted) => { + if deleted { + info!("Deleted blob {} from Iroh storage", hash); + } else { + debug!("Blob {} was not found in Iroh storage", hash); + } + } + Err(e) => { + warn!("Failed to delete blob {} from Iroh storage: {}", hash, e); + } + } + } + } +} + +/// Delete a blob and its associated content from Iroh storage +async fn delete_blob_from_iroh(iroh: &IrohNode, hash: Hash) -> Result { + use iroh_blobs::hashseq::HashSeq; + + // First, try to read the hash sequence to get all associated hashes + let hash_seq_bytes = match iroh.blobs_client().read_to_bytes(hash).await { + Ok(bytes) => bytes, + Err(_) => { + // Blob not found, nothing to delete + return Ok(false); + } + }; + + // Parse the hash sequence + let content_hashes: Vec = match HashSeq::try_from(hash_seq_bytes) { + Ok(seq) => seq.iter().collect(), + Err(e) => { + warn!("Failed to parse hash sequence for {}: {}", hash, e); + // Still try to delete the main hash + vec![] + } + }; + + // Delete the hash sequence blob tag + let seq_tag = iroh_blobs::Tag(format!("blob-seq-{}", hash).into()); + let _ = iroh.blobs_client().tags().delete(seq_tag).await; + + // Delete content blob tags + for content_hash in &content_hashes { + let content_tag = iroh_blobs::Tag(format!("blob-{}-{}", hash, content_hash).into()); + let _ = iroh.blobs_client().tags().delete(content_tag).await; + } + + Ok(true) +} + +/// Resolve a blob by downloading it from one of its sources +/// +/// Downloads the hash sequence and all blobs referenced within it (including original content). +/// Returns Ok(()) if the blob was successfully downloaded, Err otherwise. +pub async fn resolve_blob( + iroh: IrohNode, + hash: Hash, + size: u64, + sources: std::collections::HashSet<( + fvm_shared::address::Address, + fendermint_actor_blobs_shared::blobs::SubscriptionId, + iroh::NodeId, + )>, + bls_private_key: BlsPrivateKey, + signatures: SignatureStorage, +) -> Result<()> { + use iroh_blobs::hashseq::HashSeq; + + info!("Resolving blob: {} (size: {})", hash, size); + debug!("Sources: {} available", sources.len()); + + // Try each source until one succeeds + for (_subscriber, _id, source_node_id) in sources { + debug!("Attempting download from source: {}", source_node_id); + + // Create a NodeAddr from the source + let source_addr = iroh::NodeAddr::new(source_node_id); + + // Step 1: Download the hash sequence blob + match iroh + .blobs_client() + .download_with_opts( + hash, + iroh_blobs::rpc::client::blobs::DownloadOptions { + format: iroh_blobs::BlobFormat::Raw, + nodes: vec![source_addr.clone()], + tag: iroh_blobs::util::SetTagOption::Named(iroh_blobs::Tag( + format!("blob-seq-{}", hash).into(), + )), + mode: iroh_blobs::rpc::client::blobs::DownloadMode::Queued, + }, + ) + .await + { + Ok(progress) => { + match progress.finish().await { + Ok(outcome) => { + let downloaded_size = outcome.local_size + outcome.downloaded_size; + info!( + "Downloaded hash sequence {} (downloaded: {} bytes, local: {} bytes)", + hash, outcome.downloaded_size, outcome.local_size + ); + + // Step 2: Read and parse the hash sequence to get all referenced blobs + let hash_seq_bytes = match iroh.blobs_client().read_to_bytes(hash).await { + Ok(bytes) => bytes, + Err(e) => { + warn!("Failed to read hash sequence {}: {}", hash, e); + continue; + } + }; + + let hash_seq = match HashSeq::try_from(hash_seq_bytes) { + Ok(seq) => seq, + Err(e) => { + warn!("Failed to parse hash sequence {}: {}", hash, e); + continue; + } + }; + + let content_hashes: Vec = hash_seq.iter().collect(); + info!( + "Hash sequence {} contains {} blobs to download", + hash, + content_hashes.len() + ); + + // Step 3: Download all blobs in the hash sequence + let mut all_downloaded = true; + for (idx, content_hash) in content_hashes.iter().enumerate() { + let blob_type = if idx == 0 { + "original content" + } else if idx == 1 { + "metadata" + } else { + "parity" + }; + + debug!( + "Downloading {} blob {} ({}/{}): {}", + blob_type, + content_hash, + idx + 1, + content_hashes.len(), + content_hash + ); + + match iroh + .blobs_client() + .download_with_opts( + *content_hash, + iroh_blobs::rpc::client::blobs::DownloadOptions { + format: iroh_blobs::BlobFormat::Raw, + nodes: vec![source_addr.clone()], + tag: iroh_blobs::util::SetTagOption::Named( + iroh_blobs::Tag( + format!("blob-{}-{}", hash, content_hash).into(), + ), + ), + mode: iroh_blobs::rpc::client::blobs::DownloadMode::Queued, + }, + ) + .await + { + Ok(content_progress) => match content_progress.finish().await { + Ok(content_outcome) => { + debug!( + "Downloaded {} blob {} (downloaded: {} bytes, local: {} bytes)", + blob_type, + content_hash, + content_outcome.downloaded_size, + content_outcome.local_size + ); + } + Err(e) => { + warn!( + "Failed to complete {} blob {} download: {}", + blob_type, content_hash, e + ); + all_downloaded = false; + } + }, + Err(e) => { + warn!( + "Failed to start {} blob {} download: {}", + blob_type, content_hash, e + ); + all_downloaded = false; + } + } + } + + if !all_downloaded { + warn!( + "Not all content blobs downloaded for {}, trying next source", + hash + ); + continue; + } + + info!( + "Successfully resolved blob {} with all {} content blobs (expected original size: {} bytes)", + hash, content_hashes.len(), size + ); + + // Generate BLS signature for the blob hash + let hash_bytes = hash.as_bytes(); + let signature = bls_private_key.sign(hash_bytes); + let signature_bytes = signature.as_bytes(); + + // Store signature in memory + { + let mut sigs = signatures.write().unwrap(); + sigs.insert(hash, signature_bytes.clone()); + } + + info!("Generated BLS signature for blob {}", hash); + debug!("Signature: {}", hex::encode(&signature_bytes)); + debug!("Hash sequence blob size: {} bytes", downloaded_size); + + // Blob downloaded successfully + // It will now wait for validator signatures before finalization + return Ok(()); + } + Err(e) => { + warn!("Failed to complete download from {}: {}", source_node_id, e); + } + } + } + Err(e) => { + warn!("Failed to start download from {}: {}", source_node_id, e); + } + } + } + + anyhow::bail!("Failed to resolve blob {} from any source", hash) +} diff --git a/ipc-storage/ipc-decentralized-storage/src/node/rpc.rs b/ipc-storage/ipc-decentralized-storage/src/node/rpc.rs new file mode 100644 index 0000000000..7a242ddeba --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/node/rpc.rs @@ -0,0 +1,427 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! RPC server for the decentralized storage node +//! +//! Provides HTTP endpoints for: +//! - Signature queries +//! - Blob metadata queries +//! - Blob content retrieval + +use std::convert::Infallible; +use std::net::SocketAddr; + +use anyhow::Result; +use fendermint_actor_blobs_shared::bytes::B256; +use fendermint_rpc::message::GasParams; +use fendermint_rpc::QueryClient; +use fendermint_vm_message::query::FvmQueryHeight; +use fvm_shared::econ::TokenAmount; +use iroh_blobs::Hash; +use iroh_manager::IrohNode; +use tracing::info; +use warp::Filter; + +use super::{SharedFendermintClient, SignatureStorage}; + +/// Start the RPC server for signature queries and blob queries +pub async fn start_rpc_server( + bind_addr: SocketAddr, + signatures: SignatureStorage, + client: SharedFendermintClient, + iroh: IrohNode, +) -> Result<()> { + // GET /signature/{hash} + let get_signature = warp::path!("signature" / String) + .and(warp::get()) + .and(with_signatures(signatures)) + .and_then(handle_get_signature); + + // GET /health + let health = warp::path("health") + .and(warp::get()) + .map(|| warp::reply::json(&serde_json::json!({"status": "ok"}))); + + // GET /v1/blobs/{hash} - returns blob metadata as JSON + let client_for_meta = client.clone(); + let get_blob = warp::path!("v1" / "blobs" / String) + .and(warp::get()) + .and(warp::query::()) + .and(with_client(client_for_meta)) + .and_then(handle_get_blob); + + // GET /v1/blobs/{hash}/content - returns blob content as binary stream + let get_blob_content = warp::path!("v1" / "blobs" / String / "content") + .and(warp::get()) + .and(warp::query::()) + .and(with_client(client)) + .and(with_iroh(iroh)) + .and_then(handle_get_blob_content); + + // CORS configuration - allow all origins for development + let cors = warp::cors() + .allow_any_origin() + .allow_methods(vec!["GET", "POST", "OPTIONS"]) + .allow_headers(vec!["Content-Type", "Authorization"]); + + let routes = get_signature + .or(health) + .or(get_blob_content) + .or(get_blob) + .with(cors); + + info!("RPC server starting on {}", bind_addr); + warp::serve(routes).run(bind_addr).await; + Ok(()) +} + +/// Warp filter to inject signature storage +fn with_signatures( + signatures: SignatureStorage, +) -> impl Filter + Clone { + warp::any().map(move || signatures.clone()) +} + +/// Response for signature query +#[derive(serde::Serialize)] +struct SignatureResponse { + hash: String, + signature: String, +} + +/// Handle GET /signature/{hash} +async fn handle_get_signature( + hash_str: String, + signatures: SignatureStorage, +) -> Result { + use std::str::FromStr; + + // Parse hash from hex string + let hash = Hash::from_str(&hash_str).map_err(|_| warp::reject::not_found())?; + + // Look up signature + let signature = { + let sigs = signatures.read().unwrap(); + sigs.get(&hash).cloned() + }; + + match signature { + Some(sig) => { + let response = SignatureResponse { + hash: hash_str, + signature: hex::encode(&sig), + }; + Ok(warp::reply::json(&response)) + } + None => Err(warp::reject::not_found()), + } +} + +/// Query parameter for optional block height +#[derive(serde::Deserialize)] +struct HeightQuery { + pub height: Option, +} + +/// Warp filter to inject Fendermint client +fn with_client( + client: SharedFendermintClient, +) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +/// Response for blob query +#[derive(serde::Serialize)] +struct BlobResponse { + hash: String, + size: u64, + metadata_hash: String, + status: String, + subscribers: Vec, +} + +/// Subscriber info for blob response +#[derive(serde::Serialize)] +struct BlobSubscriberInfo { + subscription_id: String, + expiry: i64, +} + +/// Error response +#[derive(serde::Serialize)] +struct ErrorResponse { + error: String, +} + +/// Handle GET /v1/blobs/{hash} +async fn handle_get_blob( + hash_str: String, + height_query: HeightQuery, + client: SharedFendermintClient, +) -> Result { + // Parse blob hash - strip 0x prefix if present + let blob_hash_hex = hash_str.strip_prefix("0x").unwrap_or(&hash_str); + + let blob_hash_bytes = match hex::decode(blob_hash_hex) { + Ok(bytes) => bytes, + Err(_) => { + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: "invalid hex string".to_string(), + }), + warp::http::StatusCode::BAD_REQUEST, + )); + } + }; + + if blob_hash_bytes.len() != 32 { + return Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: format!("blob hash must be 32 bytes, got {}", blob_hash_bytes.len()), + }), + warp::http::StatusCode::BAD_REQUEST, + )); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = B256(hash_array); + + // Set query height + let height = height_query + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // Query the blob + let maybe_blob = { + let mut client_guard = client.lock().await; + client_guard + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, height) + .await + }; + + match maybe_blob { + Ok(Some(blob)) => { + let subscribers: Vec = blob + .subscribers + .iter() + .map(|(sub_id, expiry)| BlobSubscriberInfo { + subscription_id: sub_id.to_string(), + expiry: *expiry, + }) + .collect(); + + let response = BlobResponse { + hash: format!("0x{}", hex::encode(blob_hash.0)), + size: blob.size, + metadata_hash: format!("0x{}", hex::encode(blob.metadata_hash.0)), + status: format!("{:?}", blob.status), + subscribers, + }; + Ok(warp::reply::with_status( + warp::reply::json(&response), + warp::http::StatusCode::OK, + )) + } + Ok(None) => Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: "blob not found".to_string(), + }), + warp::http::StatusCode::NOT_FOUND, + )), + Err(e) => Ok(warp::reply::with_status( + warp::reply::json(&ErrorResponse { + error: format!("query failed: {}", e), + }), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )), + } +} + +/// Warp filter to inject Iroh node +fn with_iroh(iroh: IrohNode) -> impl Filter + Clone { + warp::any().map(move || iroh.clone()) +} + +/// Handle GET /v1/blobs/{hash}/content - returns the actual blob content +async fn handle_get_blob_content( + hash_str: String, + height_query: HeightQuery, + client: SharedFendermintClient, + iroh: IrohNode, +) -> Result { + use futures::TryStreamExt; + use iroh_blobs::hashseq::HashSeq; + use warp::hyper::Body; + + // Parse blob hash - strip 0x prefix if present + let blob_hash_hex = hash_str.strip_prefix("0x").unwrap_or(&hash_str); + + let blob_hash_bytes = match hex::decode(blob_hash_hex) { + Ok(bytes) => bytes, + Err(_) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: "invalid hex string".to_string(), + }) + .unwrap(), + )), + warp::http::StatusCode::BAD_REQUEST, + )); + } + }; + + if blob_hash_bytes.len() != 32 { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("blob hash must be 32 bytes, got {}", blob_hash_bytes.len()), + }) + .unwrap(), + )), + warp::http::StatusCode::BAD_REQUEST, + )); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = B256(hash_array); + + // Set query height + let height = height_query + .height + .map(FvmQueryHeight::from) + .unwrap_or(FvmQueryHeight::Committed); + + // Gas params for the query call + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + + // First query the blobs actor to verify the blob exists + let maybe_blob = { + let mut client_guard = client.lock().await; + client_guard + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, height) + .await + }; + + match maybe_blob { + Ok(Some(blob)) => { + // The blob hash is actually a hash sequence hash + let hash_seq_hash = Hash::from_bytes(blob_hash.0); + let size = blob.size; + + // Read the hash sequence from Iroh to get the original content hash + let hash_seq_bytes = match iroh.blobs_client().read_to_bytes(hash_seq_hash).await { + Ok(bytes) => bytes, + Err(e) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("failed to read hash sequence: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + let hash_seq = match HashSeq::try_from(hash_seq_bytes) { + Ok(seq) => seq, + Err(e) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("failed to parse hash sequence: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + // First hash in the sequence is the original content + let orig_hash = match hash_seq.iter().next() { + Some(hash) => hash, + None => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: "hash sequence is empty".to_string(), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + // Read the actual content from Iroh + let reader = match iroh.blobs_client().read(orig_hash).await { + Ok(reader) => reader, + Err(e) => { + return Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("failed to read blob content: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )); + } + }; + + // Stream the content as the response body + let bytes_stream = + reader.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)); + let body = Body::wrap_stream(bytes_stream); + + let mut response = warp::reply::Response::new(body); + response.headers_mut().insert( + "Content-Type", + warp::http::HeaderValue::from_static("application/octet-stream"), + ); + response + .headers_mut() + .insert("Content-Length", warp::http::HeaderValue::from(size)); + + Ok(warp::reply::with_status( + response, + warp::http::StatusCode::OK, + )) + } + Ok(None) => Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: "blob not found".to_string(), + }) + .unwrap(), + )), + warp::http::StatusCode::NOT_FOUND, + )), + Err(e) => Ok(warp::reply::with_status( + warp::reply::Response::new(Body::from( + serde_json::to_string(&ErrorResponse { + error: format!("query failed: {}", e), + }) + .unwrap(), + )), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + )), + } +} diff --git a/ipc-storage/ipc-decentralized-storage/src/node/store.rs b/ipc-storage/ipc-decentralized-storage/src/node/store.rs new file mode 100644 index 0000000000..4f4c97708e --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/node/store.rs @@ -0,0 +1,96 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Storage trait and implementations for the storage node +//! +//! This module provides: +//! - A trait for storing node state (e.g., last polled height) +//! - An in-memory implementation for development/testing + +use anyhow::Result; +use std::sync::RwLock; + +/// Storage trait for persisting node state +pub trait Store: Send + Sync { + /// Get the last polled block height + fn get_last_polled_height(&self) -> Result>; + + /// Store the last polled block height + fn set_last_polled_height(&self, height: u64) -> Result<()>; +} + +/// In-memory implementation of the Store trait +/// +/// This implementation stores state in memory and is suitable for +/// development and testing. State is lost when the node restarts. +pub struct InMemoryStore { + last_polled_height: RwLock>, +} + +impl InMemoryStore { + /// Create a new in-memory store + pub fn new() -> Self { + Self { + last_polled_height: RwLock::new(None), + } + } + + /// Create a new in-memory store with an initial height + pub fn with_initial_height(height: u64) -> Self { + Self { + last_polled_height: RwLock::new(Some(height)), + } + } +} + +impl Default for InMemoryStore { + fn default() -> Self { + Self::new() + } +} + +impl Store for InMemoryStore { + fn get_last_polled_height(&self) -> Result> { + let guard = self + .last_polled_height + .read() + .map_err(|e| anyhow::anyhow!("failed to acquire read lock: {}", e))?; + Ok(*guard) + } + + fn set_last_polled_height(&self, height: u64) -> Result<()> { + let mut guard = self + .last_polled_height + .write() + .map_err(|e| anyhow::anyhow!("failed to acquire write lock: {}", e))?; + *guard = Some(height); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_in_memory_store() { + let store = InMemoryStore::new(); + + // Initially None + assert_eq!(store.get_last_polled_height().unwrap(), None); + + // Set and get + store.set_last_polled_height(100).unwrap(); + assert_eq!(store.get_last_polled_height().unwrap(), Some(100)); + + // Update + store.set_last_polled_height(200).unwrap(); + assert_eq!(store.get_last_polled_height().unwrap(), Some(200)); + } + + #[test] + fn test_in_memory_store_with_initial_height() { + let store = InMemoryStore::with_initial_height(50); + assert_eq!(store.get_last_polled_height().unwrap(), Some(50)); + } +} diff --git a/ipc-storage/ipc-decentralized-storage/src/objects.rs b/ipc-storage/ipc-decentralized-storage/src/objects.rs new file mode 100644 index 0000000000..c2ca3cab0f --- /dev/null +++ b/ipc-storage/ipc-decentralized-storage/src/objects.rs @@ -0,0 +1,1204 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +//! Objects API service for handling object upload and download +//! +//! This module provides HTTP endpoints for: +//! - Uploading objects to Iroh storage with entanglement +//! - Downloading objects from buckets +//! - Downloading blobs directly + +use std::{ + convert::Infallible, net::SocketAddr, num::ParseIntError, path::Path, str::FromStr, + time::Instant, +}; + +use anyhow::{anyhow, Context, Result}; +use bytes::Buf; +use entangler::{ChunkRange, Config, EntanglementResult, Entangler}; +use entangler_storage::iroh::IrohStorage as EntanglerIrohStorage; +use fendermint_actor_bucket::{GetParams, Object}; +use fendermint_rpc::{client::FendermintClient, message::GasParams, QueryClient}; +use fendermint_vm_message::query::FvmQueryHeight; +use futures_util::{StreamExt, TryStreamExt}; +use fvm_shared::address::{Address, Error as NetworkError, Network}; +use fvm_shared::econ::TokenAmount; +use ipc_api::ethers_address_to_fil_address; +use iroh::NodeAddr; +use iroh_blobs::{hashseq::HashSeq, rpc::client::blobs::BlobStatus, util::SetTagOption, Hash}; +use iroh_manager::{get_blob_hash_and_size, BlobsClient, IrohNode}; +use lazy_static::lazy_static; +use mime_guess::get_mime_extensions_str; +use prometheus::{register_histogram, register_int_counter, Histogram, IntCounter}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::{debug, info}; +use uuid::Uuid; +use warp::path::Tail; +use warp::{ + filters::multipart::Part, + http::{HeaderMap, HeaderValue, StatusCode}, + hyper::body::Body, + Filter, Rejection, Reply, +}; + +/// The alpha parameter for alpha entanglement determines the number of parity blobs to generate +/// for the original blob. +const ENTANGLER_ALPHA: u8 = 3; +/// The s parameter for alpha entanglement determines the number of horizontal strands in the grid. +const ENTANGLER_S: u8 = 5; +/// Chunk size used by the entangler. +const CHUNK_SIZE: u64 = 1024; + +/// Configuration for the objects service +#[derive(Clone, Debug)] +pub struct ObjectsConfig { + /// Listen address for the HTTP server + pub listen_addr: SocketAddr, + /// Tendermint RPC URL for FendermintClient + pub tendermint_url: tendermint_rpc::Url, + /// Maximum object size in bytes + pub max_object_size: u64, + /// Enable metrics + pub metrics_enabled: bool, + /// Metrics listen address + pub metrics_listen: Option, +} + +impl Default for ObjectsConfig { + fn default() -> Self { + Self { + listen_addr: "127.0.0.1:8080".parse().unwrap(), + tendermint_url: "http://localhost:26657".parse().unwrap(), + max_object_size: 100 * 1024 * 1024, // 100MB + metrics_enabled: false, + metrics_listen: None, + } + } +} + +/// Run the objects service +/// +/// This starts an HTTP server with endpoints for object upload/download. +pub async fn run_objects_service( + config: ObjectsConfig, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, +) -> Result<()> { + if config.metrics_enabled { + if let Some(metrics_listen) = config.metrics_listen { + info!(listen_addr = %metrics_listen, "serving metrics"); + let builder = prometheus_exporter::Builder::new(metrics_listen); + let _ = builder.start().context("failed to start metrics server")?; + } + } else { + info!("metrics disabled"); + } + + let client = FendermintClient::new_http(config.tendermint_url, None)?; + + // Admin routes + let health = warp::path!("health") + .and(warp::get()) + .and_then(handle_health); + let node_addr = warp::path!("v1" / "node") + .and(warp::get()) + .and(with_iroh(iroh_node.clone())) + .and_then(handle_node_addr); + + // Objects routes + let objects_upload = warp::path!("v1" / "objects") + .and(warp::post()) + .and(with_iroh(iroh_node.clone())) + .and(warp::multipart::form().max_length(config.max_object_size + 1024 * 1024)) + .and(with_max_size(config.max_object_size)) + .and_then(handle_object_upload); + + let objects_download = warp::path!("v1" / "objects" / String / ..) + .and(warp::path::tail()) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_object_download); + + let blobs_download = warp::path!("v1" / "blobs" / String) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_blob_download); + + let router = health + .or(node_addr) + .or(objects_upload) + .or(blobs_download) + .or(objects_download) + .with( + warp::cors() + .allow_any_origin() + .allow_headers(vec!["Content-Type"]) + .allow_methods(vec!["POST", "DEL", "GET", "HEAD"]), + ) + .recover(handle_rejection); + + info!(listen_addr = %config.listen_addr, "starting objects service"); + warp::serve(router).run(config.listen_addr).await; + + Ok(()) +} + +/// Create the objects service routes (for integration into existing servers) +pub fn objects_routes( + client: FendermintClient, + iroh_node: IrohNode, + iroh_resolver_blobs: BlobsClient, + max_object_size: u64, +) -> impl Filter + Clone { + let health = warp::path!("health") + .and(warp::get()) + .and_then(handle_health); + let node_addr = warp::path!("v1" / "node") + .and(warp::get()) + .and(with_iroh(iroh_node.clone())) + .and_then(handle_node_addr); + + let objects_upload = warp::path!("v1" / "objects") + .and(warp::post()) + .and(with_iroh(iroh_node.clone())) + .and(warp::multipart::form().max_length(max_object_size + 1024 * 1024)) + .and(with_max_size(max_object_size)) + .and_then(handle_object_upload); + + let objects_download = warp::path!("v1" / "objects" / String / ..) + .and(warp::path::tail()) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_object_download); + + let blobs_download = warp::path!("v1" / "blobs" / String) + .and( + warp::get() + .map(|| "GET".to_string()) + .or(warp::head().map(|| "HEAD".to_string())) + .unify(), + ) + .and(warp::header::optional::("Range")) + .and(warp::query::()) + .and(with_client(client.clone())) + .and(with_iroh_blobs(iroh_resolver_blobs.clone())) + .and_then(handle_blob_download); + + health + .or(node_addr) + .or(objects_upload) + .or(blobs_download) + .or(objects_download) +} + +fn with_client( + client: FendermintClient, +) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +fn with_iroh(client: IrohNode) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +fn with_iroh_blobs( + client: BlobsClient, +) -> impl Filter + Clone { + warp::any().map(move || client.clone()) +} + +fn with_max_size(max_size: u64) -> impl Filter + Clone { + warp::any().map(move || max_size) +} + +#[derive(Serialize, Deserialize)] +struct HeightQuery { + pub height: Option, +} + +#[derive(Debug, Error)] +enum ObjectsError { + #[error("error parsing range header: `{0}`")] + RangeHeaderParseError(ParseIntError), + #[error("invalid range header")] + RangeHeaderInvalid, +} + +impl From for ObjectsError { + fn from(err: ParseIntError) -> Self { + ObjectsError::RangeHeaderParseError(err) + } +} + +#[derive(Default)] +struct ObjectParser { + hash: Option, + size: Option, + source: Option, + data_part: Option, +} + +impl ObjectParser { + async fn read_part(&mut self, part: Part) -> anyhow::Result> { + let value = part + .stream() + .fold(Vec::new(), |mut vec, data| async move { + if let Ok(data) = data { + vec.extend_from_slice(data.chunk()); + } + vec + }) + .await; + Ok(value) + } + + async fn read_hash(&mut self, form_part: Part) -> anyhow::Result<()> { + let value = self.read_part(form_part).await?; + let text = String::from_utf8(value).map_err(|_| anyhow!("cannot parse hash"))?; + let hash: Hash = text.parse().map_err(|_| anyhow!("cannot parse hash"))?; + self.hash = Some(hash); + Ok(()) + } + + async fn read_size(&mut self, form_part: Part) -> anyhow::Result<()> { + let value = self.read_part(form_part).await?; + let text = String::from_utf8(value).map_err(|_| anyhow!("cannot parse size"))?; + let size: u64 = text.parse().map_err(|_| anyhow!("cannot parse size"))?; + self.size = Some(size); + Ok(()) + } + + async fn read_source(&mut self, form_part: Part) -> anyhow::Result<()> { + let value = self.read_part(form_part).await?; + let text = String::from_utf8(value).map_err(|_| anyhow!("cannot parse source"))?; + let source: NodeAddr = + serde_json::from_str(&text).map_err(|_| anyhow!("cannot parse source"))?; + self.source = Some(source); + Ok(()) + } + + async fn read_form(mut form_data: warp::multipart::FormData) -> anyhow::Result { + let mut object_parser = ObjectParser::default(); + while let Some(part) = form_data.next().await { + let part = part.map_err(|e| anyhow!("cannot read form data: {}", e))?; + match part.name() { + "hash" => { + object_parser.read_hash(part).await?; + } + "size" => { + object_parser.read_size(part).await?; + } + "source" => { + object_parser.read_source(part).await?; + } + "data" => { + object_parser.data_part = Some(part); + // This early return was added to avoid the "failed to lock multipart state" error. + // It implies that the data field must be the last one sent in the multipart form. + return Ok(object_parser); + } + // Ignore but accept signature-related fields for backward compatibility + "chain_id" | "msg" => { + // Read and discard the data + let _ = object_parser.read_part(part).await?; + } + _ => { + return Err(anyhow!("unknown form field")); + } + } + } + Ok(object_parser) + } +} + +lazy_static! { + static ref COUNTER_BLOBS_UPLOADED: IntCounter = register_int_counter!( + "objects_blobs_uploaded_total", + "Number of successfully uploaded blobs" + ) + .unwrap(); + static ref COUNTER_BYTES_UPLOADED: IntCounter = register_int_counter!( + "objects_bytes_uploaded_total", + "Number of successfully uploaded bytes" + ) + .unwrap(); + static ref HISTOGRAM_UPLOAD_TIME: Histogram = register_histogram!( + "objects_upload_time_seconds", + "Time spent uploading an object in seconds" + ) + .unwrap(); + static ref COUNTER_BLOBS_DOWNLOADED: IntCounter = register_int_counter!( + "objects_blobs_downloaded_total", + "Number of successfully downloaded blobs" + ) + .unwrap(); + static ref COUNTER_BYTES_DOWNLOADED: IntCounter = register_int_counter!( + "objects_bytes_downloaded_total", + "Number of successfully downloaded bytes" + ) + .unwrap(); + static ref HISTOGRAM_DOWNLOAD_TIME: Histogram = register_histogram!( + "objects_download_time_seconds", + "Time spent downloading an object in seconds" + ) + .unwrap(); +} + +async fn handle_health() -> Result { + Ok(warp::reply::reply()) +} + +async fn handle_node_addr(iroh: IrohNode) -> Result { + let node_addr = iroh.endpoint().node_addr().await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to get iroh node address info: {}", e), + }) + })?; + Ok(warp::reply::json(&node_addr)) +} + +#[derive(Serialize)] +struct UploadResponse { + hash: String, // Hash sequence hash (for bucket storage) + orig_hash: String, // Original blob content hash (for addBlob) + metadata_hash: String, +} + +async fn handle_object_upload( + iroh: IrohNode, + form_data: warp::multipart::FormData, + max_size: u64, +) -> Result { + let start_time = Instant::now(); + let parser = ObjectParser::read_form(form_data).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to read form: {}", e), + }) + })?; + + let size = match parser.size { + Some(size) => size, + None => { + return Err(Rejection::from(BadRequest { + message: "missing size in form".to_string(), + })) + } + }; + if size > max_size { + return Err(Rejection::from(BadRequest { + message: format!("blob size exceeds maximum of {}", max_size), + })); + } + + let upload_id = Uuid::new_v4(); + + // Handle the two upload cases + let hash = match (parser.source, parser.data_part) { + // Case 1: Source node provided - download from the source + (Some(source), None) => { + let hash = match parser.hash { + Some(hash) => hash, + None => { + return Err(Rejection::from(BadRequest { + message: "missing hash in form".to_string(), + })) + } + }; + + let tag = iroh_blobs::Tag(format!("temp-{hash}-{upload_id}").into()); + let progress = iroh + .blobs_client() + .download_with_opts( + hash, + iroh_blobs::rpc::client::blobs::DownloadOptions { + format: iroh_blobs::BlobFormat::Raw, + nodes: vec![source], + tag: SetTagOption::Named(tag), + mode: iroh_blobs::rpc::client::blobs::DownloadMode::Queued, + }, + ) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to fetch blob {}: {}", hash, e), + }) + })?; + let outcome = progress.finish().await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to fetch blob {}: {}", hash, e), + }) + })?; + let outcome_size = outcome.local_size + outcome.downloaded_size; + if outcome_size != size { + return Err(Rejection::from(BadRequest { + message: format!( + "blob size and given size do not match (expected {}, got {})", + size, outcome_size + ), + })); + } + + debug!( + "downloaded blob {} in {:?} (size: {}; local_size: {}; downloaded_size: {})", + hash, outcome.stats.elapsed, size, outcome.local_size, outcome.downloaded_size, + ); + COUNTER_BYTES_UPLOADED.inc_by(outcome.downloaded_size); + hash + } + + // Case 2: Direct upload - store the provided data + (None, Some(data_part)) => { + let stream = data_part.stream().map(|result| { + result + .map(|mut buf| buf.copy_to_bytes(buf.remaining())) + .map_err(|e| { + std::io::Error::new(std::io::ErrorKind::Other, format!("Warp error: {}", e)) + }) + }); + + let batch = iroh.blobs_client().batch().await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to store blob: {}", e), + }) + })?; + let temp_tag = batch.add_stream(stream).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to store blob: {}", e), + }) + })?; + + let hash = *temp_tag.hash(); + let new_tag = iroh_blobs::Tag(format!("temp-{hash}-{upload_id}").into()); + batch.persist_to(temp_tag, new_tag).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to persist blob: {}", e), + }) + })?; + + drop(batch); + + let status = iroh.blobs_client().status(hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to check blob status: {}", e), + }) + })?; + let BlobStatus::Complete { size } = status else { + return Err(Rejection::from(BadRequest { + message: "failed to store data".to_string(), + })); + }; + COUNTER_BYTES_UPLOADED.inc_by(size); + debug!("stored uploaded blob {} (size: {})", hash, size); + + hash + } + + (Some(_), Some(_)) => { + return Err(Rejection::from(BadRequest { + message: "cannot provide both source and data".to_string(), + })); + } + + (None, None) => { + return Err(Rejection::from(BadRequest { + message: "must provide either source or data".to_string(), + })); + } + }; + + debug!("raw uploaded hash: {}", hash); + + let ent = new_entangler(iroh.blobs_client()).map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to create entangler: {}", e), + }) + })?; + let ent_result = ent.entangle_uploaded(hash.to_string()).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to entangle uploaded data: {}", e), + }) + })?; + + debug!( + "entanglement result: orig_hash={}, metadata_hash={}, upload_results_count={}", + ent_result.orig_hash, + ent_result.metadata_hash, + ent_result.upload_results.len() + ); + + let hash_seq_hash = tag_entangled_data(&iroh, &ent_result, upload_id) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to tag entangled data: {}", e), + }) + })?; + + debug!("hash_seq_hash: {}", hash_seq_hash); + + COUNTER_BLOBS_UPLOADED.inc(); + HISTOGRAM_UPLOAD_TIME.observe(start_time.elapsed().as_secs_f64()); + + let response = UploadResponse { + hash: hash_seq_hash.to_string(), + orig_hash: ent_result.orig_hash.clone(), + metadata_hash: ent_result.metadata_hash, + }; + Ok(warp::reply::json(&response)) +} + +async fn tag_entangled_data( + iroh: &IrohNode, + ent_result: &EntanglementResult, + upload_id: Uuid, +) -> Result { + let orig_hash = Hash::from_str(ent_result.orig_hash.as_str())?; + let metadata_hash = Hash::from_str(ent_result.metadata_hash.as_str())?; + + // collect all hashes related to the blob, but ignore the metadata hash, as we want to make + // sure that the metadata hash is the second hash in the sequence after the original hash + let upload_hashes = ent_result + .upload_results + .iter() + .map(|r| Hash::from_str(&r.hash)) + .collect::, _>>()? + .into_iter() + .filter(|h| h != &metadata_hash) + .collect::>(); + + let mut hashes = vec![orig_hash, metadata_hash]; + hashes.extend(upload_hashes); + + let hashes_str = hashes + .iter() + .map(|h| h.to_string()) + .collect::>() + .join(", "); + + let batch = iroh.blobs_client().batch().await?; + + // make a hash sequence object from the hashes and upload it to iroh + let hash_seq = hashes.into_iter().collect::(); + + let temp_tag = batch + .add_bytes_with_opts(hash_seq, iroh_blobs::BlobFormat::HashSeq) + .await?; + let hash_seq_hash = *temp_tag.hash(); + + debug!( + "storing hash sequence: {} ({})", + hash_seq_hash.to_string(), + hashes_str + ); + + // this tag will be replaced later by the validator to "stored-seq-{hash_seq_hash}" + let hash_seq_tag = iroh_blobs::Tag(format!("temp-seq-{hash_seq_hash}").into()); + batch.persist_to(temp_tag, hash_seq_tag).await?; + + drop(batch); + + // delete all tags returned by the entangler + for ent_upload_result in &ent_result.upload_results { + let tag_value = ent_upload_result + .info + .get("tag") + .ok_or_else(|| anyhow!("Missing tag in entanglement upload result"))?; + let tag = iroh_blobs::Tag::from(tag_value.clone()); + iroh.blobs_client().tags().delete(tag).await?; + } + + // remove upload tags + let orig_tag = iroh_blobs::Tag(format!("temp-{orig_hash}-{upload_id}").into()); + iroh.blobs_client().tags().delete(orig_tag).await?; + + Ok(hash_seq_hash) +} + +fn new_entangler(iroh: &BlobsClient) -> Result, entangler::Error> { + Entangler::new( + EntanglerIrohStorage::from_client(iroh.clone()), + Config::new(ENTANGLER_ALPHA, ENTANGLER_S), + ) +} + +fn get_range_params(range: String, size: u64) -> Result<(u64, u64), ObjectsError> { + let range: Vec = range + .replace("bytes=", "") + .split('-') + .map(|n| n.to_string()) + .collect(); + if range.len() != 2 { + return Err(ObjectsError::RangeHeaderInvalid); + } + let (first, mut last): (u64, u64) = match (!range[0].is_empty(), !range[1].is_empty()) { + (true, true) => (range[0].parse::()?, range[1].parse::()?), + (true, false) => (range[0].parse::()?, size - 1), + (false, true) => { + let last = range[1].parse::()?; + if last > size { + (0, size - 1) + } else { + (size - last, size - 1) + } + } + (false, false) => (0, size - 1), + }; + if first > last || first >= size { + return Err(ObjectsError::RangeHeaderInvalid); + } + if last >= size { + last = size - 1; + } + Ok((first, last)) +} + +struct ObjectRange { + start: u64, + end: u64, + len: u64, + size: u64, + body: Body, +} + +async fn handle_object_download( + address: String, + tail: Tail, + method: String, + range: Option, + height_query: HeightQuery, + client: F, + iroh: BlobsClient, +) -> Result { + let address = parse_address(&address).map_err(|e| { + Rejection::from(BadRequest { + message: format!("invalid address {}: {}", address, e), + }) + })?; + let height = height_query + .height + .unwrap_or(FvmQueryHeight::Committed.into()); + + let path = urlencoding::decode(tail.as_str()) + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("invalid address {}: {}", address, e), + }) + })? + .to_string(); + + let key: Vec = path.into(); + let start_time = Instant::now(); + let maybe_object = os_get(client, address, GetParams(key.clone()), height) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("bucket get error: {}", e), + }) + })?; + + match maybe_object { + Some(object) => { + let seq_hash = Hash::from_bytes(object.hash.0); + let (hash, size) = get_blob_hash_and_size(&iroh, seq_hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: e.to_string(), + }) + })?; + + let ent = new_entangler(&iroh).map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to create entangler: {}", e), + }) + })?; + let recovery_hash = Hash::from_bytes(object.recovery_hash.0); + + let object_range = match range { + Some(range) => { + let (first_byte, last_byte) = get_range_params(range, size).map_err(|e| { + Rejection::from(BadRequest { + message: e.to_string(), + }) + })?; + let len = (last_byte - first_byte) + 1; + + let first_chunk = first_byte / CHUNK_SIZE; + let last_chunk = last_byte / CHUNK_SIZE; + + let bytes_stream = ent + .download_range( + &hash.to_string(), + ChunkRange::Between(first_chunk, last_chunk), + Some(recovery_hash.to_string()), + ) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to download object: {} {}", hash, e), + }) + })?; + + let offset = (first_byte % CHUNK_SIZE) as usize; + let end_offset = (last_byte % CHUNK_SIZE + 1) as usize; + + let bytes_stream = bytes_stream.enumerate().map(move |(i, chunk)| { + let chunk = chunk?; + let result = if first_chunk == last_chunk { + // Single chunk case - slice with both offsets + chunk.slice(offset..end_offset) + } else if i == 0 { + // First of multiple chunks + chunk.slice(offset..) + } else if i == (last_chunk - first_chunk) as usize { + // Last of multiple chunks + chunk.slice(..end_offset) + } else { + // Middle chunks + chunk + }; + Ok::<_, anyhow::Error>(result) + }); + + let body = Body::wrap_stream(bytes_stream); + ObjectRange { + start: first_byte, + end: last_byte, + len, + size, + body, + } + } + None => { + let bytes_stream = ent + .download(&hash.to_string(), Some(&recovery_hash.to_string())) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to download object: {} {}", hash, e), + }) + })?; + let body = Body::wrap_stream(bytes_stream.map_err(|e| anyhow::anyhow!(e))); + ObjectRange { + start: 0, + end: size - 1, + len: size, + size, + body, + } + } + }; + + // If it is a HEAD request, we don't need to send the body, + // but we still need to send the Content-Length header + if method == "HEAD" { + let mut response = warp::reply::Response::new(Body::empty()); + let mut header_map = HeaderMap::new(); + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + let headers = response.headers_mut(); + headers.extend(header_map); + return Ok(response); + } + + let mut response = warp::reply::Response::new(object_range.body); + let mut header_map = HeaderMap::new(); + if object_range.len < object_range.size { + *response.status_mut() = StatusCode::PARTIAL_CONTENT; + header_map.insert( + "Content-Range", + HeaderValue::from_str(&format!( + "bytes {}-{}/{}", + object_range.start, object_range.end, object_range.size + )) + .unwrap(), + ); + } else { + header_map.insert("Accept-Ranges", HeaderValue::from_str("bytes").unwrap()); + } + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + + let content_type = object + .metadata + .get("content-type") + .cloned() + .unwrap_or_else(|| "application/octet-stream".to_string()); + header_map.insert( + "Content-Type", + HeaderValue::from_str(&content_type).unwrap(), + ); + + let key_str = String::from_utf8_lossy(&key); + if let Some(val) = get_filename_with_extension(&key_str, &content_type) { + let disposition = format!("attachment; filename=\"{}\"", val); + header_map.insert( + "Content-Disposition", + HeaderValue::from_str(&disposition).unwrap(), + ); + } + + let headers = response.headers_mut(); + headers.extend(header_map); + + COUNTER_BLOBS_DOWNLOADED.inc(); + COUNTER_BYTES_DOWNLOADED.inc_by(object_range.len); + HISTOGRAM_DOWNLOAD_TIME.observe(start_time.elapsed().as_secs_f64()); + + Ok(response) + } + None => Err(Rejection::from(NotFound)), + } +} + +/// Handle direct blob download by querying the blobs actor. +async fn handle_blob_download( + blob_hash_str: String, + method: String, + range: Option, + height_query: HeightQuery, + client: F, + iroh: BlobsClient, +) -> Result { + // Strip 0x prefix if present + let blob_hash_hex = blob_hash_str.strip_prefix("0x").unwrap_or(&blob_hash_str); + + let blob_hash_bytes = hex::decode(blob_hash_hex).map_err(|e| { + Rejection::from(BadRequest { + message: format!("invalid blob hash {}: {}", blob_hash_str, e), + }) + })?; + + if blob_hash_bytes.len() != 32 { + return Err(Rejection::from(BadRequest { + message: format!("blob hash must be 32 bytes, got {}", blob_hash_bytes.len()), + })); + } + + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&blob_hash_bytes); + let blob_hash = fendermint_actor_blobs_shared::bytes::B256(hash_array); + + let height = height_query + .height + .unwrap_or(FvmQueryHeight::Committed.into()); + + let start_time = Instant::now(); + + // Query the blobs actor to get blob info + let maybe_blob = blob_get(client, blob_hash, height).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("blobs actor query error: {}", e), + }) + })?; + + match maybe_blob { + Some(blob) => { + // The blob hash from blobs actor is the hash sequence hash + // We need to parse it to get the original content hash + let hash_seq_hash = Hash::from_bytes(blob_hash.0); + let size = blob.size; + + debug!( + "blob download: hash_seq_hash={}, size={}", + hash_seq_hash, size + ); + + // Read the hash sequence to get the original content hash + let hash_seq_bytes = iroh.read_to_bytes(hash_seq_hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to read hash sequence: {} {}", hash_seq_hash, e), + }) + })?; + + let hash_seq = HashSeq::try_from(hash_seq_bytes).map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to parse hash sequence: {}", e), + }) + })?; + + // First hash in the sequence is the original content + let orig_hash = hash_seq.iter().next().ok_or_else(|| { + Rejection::from(BadRequest { + message: "hash sequence is empty".to_string(), + }) + })?; + + debug!("parsed orig_hash from hash sequence: {}", orig_hash); + + let object_range = match range { + Some(range) => { + let (first_byte, last_byte) = get_range_params(range, size).map_err(|e| { + Rejection::from(BadRequest { + message: e.to_string(), + }) + })?; + let len = (last_byte - first_byte) + 1; + + // Use read_at for range requests on the original content + use iroh_blobs::rpc::client::blobs::ReadAtLen; + let read_len = ReadAtLen::AtMost(len); + let bytes = iroh + .read_at_to_bytes(orig_hash, first_byte, read_len) + .await + .map_err(|e| { + Rejection::from(BadRequest { + message: format!( + "failed to read blob at range: {} {}", + orig_hash, e + ), + }) + })?; + + let body = Body::from(bytes); + ObjectRange { + start: first_byte, + end: last_byte, + len, + size, + body, + } + } + None => { + // Read the entire original content blob directly from Iroh + debug!("reading original content with hash: {}", orig_hash); + + let reader = iroh.read(orig_hash).await.map_err(|e| { + Rejection::from(BadRequest { + message: format!("failed to read blob: {} {}", orig_hash, e), + }) + })?; + + let bytes_stream = reader.map(move |chunk_result: Result| { + chunk_result.map_err(|e: std::io::Error| anyhow::anyhow!(e)) + }); + + let body = Body::wrap_stream(bytes_stream); + ObjectRange { + start: 0, + end: size - 1, + len: size, + size, + body, + } + } + }; + + // If it is a HEAD request, we don't need to send the body + if method == "HEAD" { + let mut response = warp::reply::Response::new(Body::empty()); + let mut header_map = HeaderMap::new(); + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + let headers = response.headers_mut(); + headers.extend(header_map); + return Ok(response); + } + + let mut response = warp::reply::Response::new(object_range.body); + let mut header_map = HeaderMap::new(); + if object_range.len < object_range.size { + *response.status_mut() = StatusCode::PARTIAL_CONTENT; + header_map.insert( + "Content-Range", + HeaderValue::from_str(&format!( + "bytes {}-{}/{}", + object_range.start, object_range.end, object_range.size + )) + .unwrap(), + ); + } else { + header_map.insert("Accept-Ranges", HeaderValue::from_str("bytes").unwrap()); + } + header_map.insert("Content-Length", HeaderValue::from(object_range.len)); + header_map.insert( + "Content-Type", + HeaderValue::from_str("application/octet-stream").unwrap(), + ); + + let headers = response.headers_mut(); + headers.extend(header_map); + + COUNTER_BLOBS_DOWNLOADED.inc(); + COUNTER_BYTES_DOWNLOADED.inc_by(object_range.len); + HISTOGRAM_DOWNLOAD_TIME.observe(start_time.elapsed().as_secs_f64()); + + Ok(response) + } + None => Err(Rejection::from(NotFound)), + } +} + +/// Parse an f/eth-address from string. +pub fn parse_address(s: &str) -> anyhow::Result
{ + let addr = Network::Mainnet + .parse_address(s) + .or_else(|e| match e { + NetworkError::UnknownNetwork => Network::Testnet.parse_address(s), + _ => Err(e), + }) + .or_else(|_| { + let addr = ethers::types::Address::from_str(s)?; + ethers_address_to_fil_address(&addr) + })?; + Ok(addr) +} + +// Rejection handlers + +#[derive(Clone, Debug)] +struct BadRequest { + message: String, +} + +impl warp::reject::Reject for BadRequest {} + +#[derive(Debug)] +struct NotFound; + +impl warp::reject::Reject for NotFound {} + +#[derive(Clone, Debug, Serialize)] +struct ErrorMessage { + code: u16, + message: String, +} + +async fn handle_rejection(err: Rejection) -> Result { + let (code, message) = if err.is_not_found() || err.find::().is_some() { + (StatusCode::NOT_FOUND, "Not Found".to_string()) + } else if let Some(e) = err.find::() { + let err = e.to_owned(); + (StatusCode::BAD_REQUEST, err.message) + } else if err.find::().is_some() { + ( + StatusCode::PAYLOAD_TOO_LARGE, + "Payload too large".to_string(), + ) + } else { + (StatusCode::INTERNAL_SERVER_ERROR, format!("{:?}", err)) + }; + + let reply = warp::reply::json(&ErrorMessage { + code: code.as_u16(), + message, + }); + let reply = warp::reply::with_header(reply, "Access-Control-Allow-Origin", "*"); + Ok(warp::reply::with_status(reply, code)) +} + +// RPC methods + +async fn os_get( + mut client: F, + address: Address, + params: GetParams, + height: u64, +) -> anyhow::Result> { + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + let h = FvmQueryHeight::from(height); + + let return_data = client + .os_get_call(address, params, TokenAmount::default(), gas_params, h) + .await?; + + Ok(return_data) +} + +async fn blob_get( + mut client: F, + blob_hash: fendermint_actor_blobs_shared::bytes::B256, + height: u64, +) -> anyhow::Result> { + let gas_params = GasParams { + gas_limit: Default::default(), + gas_fee_cap: Default::default(), + gas_premium: Default::default(), + }; + let h = FvmQueryHeight::from(height); + + let return_data = client + .blob_get_call(blob_hash, TokenAmount::default(), gas_params, h) + .await?; + + Ok(return_data) +} + +fn get_filename_with_extension(filename: &str, content_type: &str) -> Option { + let path = Path::new(filename); + + // Checks if filename already has extension + if path.extension().and_then(|ext| ext.to_str()).is_some() { + return Some(filename.to_string()); + } + + get_mime_extensions_str(content_type)? + .first() + .map(|ext| format!("{}.{}", filename, ext)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_range_params() { + // bad formats + let _ = get_range_params("bytes=0,50".into(), 100).is_err(); + let _ = get_range_params("bytes=-0-50".into(), 100).is_err(); + let _ = get_range_params("bytes=-50-".into(), 100).is_err(); + // first > last + let _ = get_range_params("bytes=50-0".into(), 100).is_err(); + // first >= size + let _ = get_range_params("bytes=100-".into(), 100).is_err(); + // first == last + let (first, last) = get_range_params("bytes=0-0".into(), 100).unwrap(); + assert_eq!(first, 0); + assert_eq!(last, 0); + // exact range given + let (first, last) = get_range_params("bytes=0-50".into(), 100).unwrap(); + assert_eq!(first, 0); + assert_eq!(last, 50); + // only end given, this means "give me last 50 bytes" + let (first, last) = get_range_params("bytes=-50".into(), 100).unwrap(); + assert_eq!(first, 50); + assert_eq!(last, 99); + // only start given, this means "give me everything but the first 50 bytes" + let (first, last) = get_range_params("bytes=50-".into(), 100).unwrap(); + assert_eq!(first, 50); + assert_eq!(last, 99); + // neither given, this means "give me everything" + let (first, last) = get_range_params("bytes=-".into(), 100).unwrap(); + assert_eq!(first, 0); + assert_eq!(last, 99); + // last >= size + let (first, last) = get_range_params("bytes=50-100".into(), 100).unwrap(); + assert_eq!(first, 50); + assert_eq!(last, 99); + } +} diff --git a/ipc-storage/ipc-dropbox/.env.example b/ipc-storage/ipc-dropbox/.env.example new file mode 100644 index 0000000000..9c9059842d --- /dev/null +++ b/ipc-storage/ipc-dropbox/.env.example @@ -0,0 +1,8 @@ +# IPC Network Configuration +VITE_TENDERMINT_RPC=http://localhost:26657 +VITE_OBJECTS_LISTEN_ADDR=http://localhost:8080 +VITE_NODE_OPERATION_OBJECT_API=http://localhost:8081 +VITE_ETH_RPC=http://localhost:8545 +VITE_BLOBS_ACTOR=0x6d342defae60f6402aee1f804653bbae4e66ae46 +VITE_ADM_ACTOR=0x7caec36fc8a3a867ca5b80c6acb5e5871d05aa28 +VITE_CHAIN_ID=1023102 diff --git a/ipc-storage/ipc-dropbox/README.md b/ipc-storage/ipc-dropbox/README.md new file mode 100644 index 0000000000..1cb15f41f8 --- /dev/null +++ b/ipc-storage/ipc-dropbox/README.md @@ -0,0 +1,89 @@ +# IPC Decentralized Dropbox + +A Dropbox-like web application for storing and managing files on the IPC network. + +## Prerequisites + +- Node.js 18+ +- MetaMask browser extension +- Running IPC network services: + - Gateway (port 8080) + - Node (port 8081) + - Tendermint RPC (port 26657) + - Ethereum RPC (port 8545) + +## Setup + +1. Install dependencies: + +```bash +npm install +``` + +2. Copy the environment file and configure: + +```bash +cp .env.example .env +``` + +Edit `.env` with your service URLs if different from defaults. + +3. Start the development server: + +```bash +npm run dev +``` + +4. Open http://localhost:3000 in your browser + +## Configuration + +The following environment variables can be configured: + +| Variable | Default | Description | +|----------|---------|-------------| +| `VITE_TENDERMINT_RPC` | `http://localhost:26657` | Tendermint RPC endpoint | +| `VITE_OBJECTS_LISTEN_ADDR` | `http://localhost:8080` | Gateway objects API | +| `VITE_NODE_OPERATION_OBJECT_API` | `http://localhost:8081` | Node operation API | +| `VITE_ETH_RPC` | `http://localhost:8545` | Ethereum RPC endpoint | +| `VITE_BLOBS_ACTOR` | `0x6d342...` | Blobs actor contract address | +| `VITE_ADM_ACTOR` | `0x7caec...` | ADM actor contract address | + +## Usage Flow + +1. **Connect Wallet**: Click "Connect MetaMask" to connect your wallet. The app will attempt to switch to the IPC network automatically. + +2. **Buy Credit**: If you don't have credit, purchase some using FIL. This is required for storage. + +3. **Create Bucket**: Create a storage bucket to hold your files. Each bucket is an on-chain smart contract. + +4. **Upload Files**: Once you have credit and a bucket, you can: + - Upload files using the "Upload File" button + - Create folders for organization + - Navigate through folders using breadcrumbs + +5. **Download Files**: Click the "Download" button next to any file to retrieve it. + +## Features + +- MetaMask wallet integration +- Credit balance display and purchase +- Bucket creation and management +- File upload to gateway + on-chain registration +- Folder-based navigation (S3-style) +- File download from node + +## Tech Stack + +- React 18 +- TypeScript +- Vite +- ethers.js v6 + +## Building for Production + +```bash +npm run build +``` + +The built files will be in the `dist` directory. diff --git a/ipc-storage/ipc-dropbox/index.html b/ipc-storage/ipc-dropbox/index.html new file mode 100644 index 0000000000..0fce51b4a2 --- /dev/null +++ b/ipc-storage/ipc-dropbox/index.html @@ -0,0 +1,13 @@ + + + + + + + IPC Decentralized Dropbox + + +
+ + + diff --git a/ipc-storage/ipc-dropbox/package.json b/ipc-storage/ipc-dropbox/package.json new file mode 100644 index 0000000000..2874854609 --- /dev/null +++ b/ipc-storage/ipc-dropbox/package.json @@ -0,0 +1,23 @@ +{ + "name": "ipc-dropbox", + "version": "1.0.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "ethers": "^6.9.0", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@types/react": "^18.2.43", + "@types/react-dom": "^18.2.17", + "@vitejs/plugin-react": "^4.2.1", + "typescript": "^5.3.3", + "vite": "^5.0.10" + } +} diff --git a/ipc-storage/ipc-dropbox/src/App.tsx b/ipc-storage/ipc-dropbox/src/App.tsx new file mode 100644 index 0000000000..e708aeb517 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/App.tsx @@ -0,0 +1,132 @@ +import React from 'react'; +import { useWallet } from './hooks/useWallet'; +import { useCredit } from './hooks/useCredit'; +import { useBucket, useFileExplorer } from './hooks/useBucket'; +import { useUpload } from './hooks/useUpload'; +import { useDownload } from './hooks/useDownload'; +import { WalletConnect } from './components/WalletConnect'; +import { CreditManager } from './components/CreditManager'; +import { BucketManager } from './components/BucketManager'; +import { FileExplorer } from './components/FileExplorer'; + +function App() { + const wallet = useWallet(); + const credit = useCredit(wallet.signer, wallet.address); + const bucket = useBucket(wallet.signer, wallet.address); + const fileExplorer = useFileExplorer(wallet.signer, bucket.bucketAddress); + const upload = useUpload(wallet.signer, bucket.bucketAddress); + const download = useDownload(); + + return ( +
+
+

IPC Decentralized Dropbox

+ +
+ +
+ {!wallet.isConnected ? ( +
+

Welcome to IPC Decentralized Dropbox

+

Connect your wallet to start storing files on the IPC network.

+ +
+ ) : !credit.hasCredit ? ( +
+

Step 1: Get Storage Credit

+ +
+ ) : !bucket.hasBucket ? ( +
+

Step 2: Create a Storage Bucket

+
+ +
+ +
+ ) : ( +
+
+ + +
+
+ +
+
+ )} +
+ +
+

Powered by IPC Network

+
+
+ ); +} + +export default App; diff --git a/ipc-storage/ipc-dropbox/src/components/BucketManager.tsx b/ipc-storage/ipc-dropbox/src/components/BucketManager.tsx new file mode 100644 index 0000000000..4cab36ef0d --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/components/BucketManager.tsx @@ -0,0 +1,59 @@ +import React, { useEffect } from 'react'; + +interface BucketManagerProps { + bucketAddress: string | null; + hasBucket: boolean; + isLoading: boolean; + isCreating: boolean; + error: string | null; + onFetchBuckets: () => Promise; + onCreateBucket: () => Promise; +} + +export function BucketManager({ + bucketAddress, + hasBucket, + isLoading, + isCreating, + error, + onFetchBuckets, + onCreateBucket, +}: BucketManagerProps) { + useEffect(() => { + onFetchBuckets(); + }, [onFetchBuckets]); + + const shortenAddress = (addr: string) => + `${addr.slice(0, 10)}...${addr.slice(-8)}`; + + if (isLoading) { + return
Checking for buckets...
; + } + + return ( +
+

Storage Bucket

+ {hasBucket ? ( +
+

+ Bucket Address:{' '} + {shortenAddress(bucketAddress!)} +

+
+ ) : ( +
+

You need a bucket to store files.

+ +
+ )} + + {error &&

{error}

} +
+ ); +} diff --git a/ipc-storage/ipc-dropbox/src/components/CreditManager.tsx b/ipc-storage/ipc-dropbox/src/components/CreditManager.tsx new file mode 100644 index 0000000000..ee071bebc0 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/components/CreditManager.tsx @@ -0,0 +1,83 @@ +import React, { useEffect, useState } from 'react'; +import { ethers } from 'ethers'; +import { CreditInfo } from '../types'; + +interface CreditManagerProps { + credit: CreditInfo | null; + hasCredit: boolean; + isLoading: boolean; + isPurchasing: boolean; + error: string | null; + onFetchCredit: () => void; + onBuyCredit: (amount: string) => Promise; +} + +export function CreditManager({ + credit, + hasCredit, + isLoading, + isPurchasing, + error, + onFetchCredit, + onBuyCredit, +}: CreditManagerProps) { + const [amount, setAmount] = useState('0.1'); + + useEffect(() => { + onFetchCredit(); + }, [onFetchCredit]); + + const formatCredit = (value: bigint) => { + return ethers.formatEther(value); + }; + + const handleBuyCredit = async () => { + await onBuyCredit(amount); + }; + + if (isLoading) { + return
Loading credit info...
; + } + + return ( +
+

Credit Balance

+ {credit && ( +
+

+ Current Credit: {formatCredit(credit.balance)} FIL +

+

+ Free Credit: {formatCredit(credit.freeCredit)} FIL +

+
+ )} + + {!hasCredit && ( +
+

You need credit to use IPC storage.

+
+ setAmount(e.target.value)} + step="0.1" + min="0.01" + className="input" + /> + FIL + +
+
+ )} + + {error &&

{error}

} +
+ ); +} diff --git a/ipc-storage/ipc-dropbox/src/components/FileExplorer.tsx b/ipc-storage/ipc-dropbox/src/components/FileExplorer.tsx new file mode 100644 index 0000000000..51301ed70f --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/components/FileExplorer.tsx @@ -0,0 +1,237 @@ +import React, { useEffect, useRef, useState } from 'react'; +import { FileItem } from '../types'; + +interface FileExplorerProps { + files: FileItem[]; + currentPath: string; + isLoading: boolean; + isUploading: boolean; + isDeleting: boolean; + uploadProgress: string; + error: string | null; + uploadError: string | null; + deleteError: string | null; + onNavigateToFolder: (path: string) => void; + onNavigateUp: () => void; + onRefresh: () => void; + onUpload: (file: File, targetPath: string) => Promise; + onDownload: (blobHash: string, fileName: string) => Promise; + onDelete: (key: string) => Promise; + onFetchFiles: (prefix: string) => void; +} + +export function FileExplorer({ + files, + currentPath, + isLoading, + isUploading, + isDeleting, + uploadProgress, + error, + uploadError, + deleteError, + onNavigateToFolder, + onNavigateUp, + onRefresh, + onUpload, + onDownload, + onDelete, + onFetchFiles, +}: FileExplorerProps) { + const fileInputRef = useRef(null); + const [newFolderName, setNewFolderName] = useState(''); + const [showNewFolderInput, setShowNewFolderInput] = useState(false); + + useEffect(() => { + onFetchFiles(currentPath); + }, [onFetchFiles, currentPath]); + + const handleFileSelect = async (e: React.ChangeEvent) => { + const file = e.target.files?.[0]; + if (file) { + const success = await onUpload(file, currentPath); + if (success) { + onRefresh(); + } + } + // Reset input + if (fileInputRef.current) { + fileInputRef.current.value = ''; + } + }; + + const handleCreateFolder = () => { + if (newFolderName.trim()) { + const folderPath = currentPath + newFolderName.trim() + '/'; + onNavigateToFolder(folderPath); + setNewFolderName(''); + setShowNewFolderInput(false); + } + }; + + const formatSize = (size?: bigint) => { + if (!size) return '-'; + const bytes = Number(size); + if (bytes < 1024) return `${bytes} B`; + if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`; + if (bytes < 1024 * 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`; + return `${(bytes / (1024 * 1024 * 1024)).toFixed(1)} GB`; + }; + + const getBreadcrumbs = () => { + const parts = currentPath.split('/').filter(Boolean); + const crumbs = [{ name: 'Home', path: '' }]; + let path = ''; + for (const part of parts) { + path += part + '/'; + crumbs.push({ name: part, path }); + } + return crumbs; + }; + + return ( +
+
+
+ {getBreadcrumbs().map((crumb, index, arr) => ( + + + {index < arr.length - 1 && /} + + ))} +
+ +
+ + + + + +
+
+ + {showNewFolderInput && ( +
+ setNewFolderName(e.target.value)} + placeholder="Folder name" + className="input" + onKeyDown={(e) => e.key === 'Enter' && handleCreateFolder()} + /> + + +
+ )} + + {(error || uploadError || deleteError) && ( +

{error || uploadError || deleteError}

+ )} + + {isLoading ? ( +
Loading files...
+ ) : files.length === 0 ? ( +
+

This folder is empty

+

Upload a file or create a folder to get started

+
+ ) : ( +
+
+ Name + Size + Actions +
+ {files.map((file) => ( +
+ + {file.isFolder ? ( + + ) : ( + + File + {file.name} + + )} + + {formatSize(file.size)} + + {!file.isFolder && file.blobHash && ( + <> + + + + )} + +
+ ))} +
+ )} +
+ ); +} diff --git a/ipc-storage/ipc-dropbox/src/components/WalletConnect.tsx b/ipc-storage/ipc-dropbox/src/components/WalletConnect.tsx new file mode 100644 index 0000000000..8be4cc4e8a --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/components/WalletConnect.tsx @@ -0,0 +1,42 @@ +import React from 'react'; + +interface WalletConnectProps { + address: string | null; + isConnecting: boolean; + error: string | null; + onConnect: () => void; + onDisconnect: () => void; +} + +export function WalletConnect({ + address, + isConnecting, + error, + onConnect, + onDisconnect, +}: WalletConnectProps) { + const shortenAddress = (addr: string) => + `${addr.slice(0, 6)}...${addr.slice(-4)}`; + + return ( +
+ {address ? ( +
+ {shortenAddress(address)} + +
+ ) : ( + + )} + {error &&

{error}

} +
+ ); +} diff --git a/ipc-storage/ipc-dropbox/src/hooks/useBucket.ts b/ipc-storage/ipc-dropbox/src/hooks/useBucket.ts new file mode 100644 index 0000000000..2eaee998dd --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/hooks/useBucket.ts @@ -0,0 +1,252 @@ +import { useState, useCallback } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; +import { getAdmContract, getBucketContract, MACHINE_INITIALIZED_TOPIC } from '../utils/contracts'; +import { QueryResult, ObjectEntry, FileItem } from '../types'; + +export function useBucket(signer: ethers.Signer | null, address: string | null) { + const [bucketAddress, setBucketAddress] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [isCreating, setIsCreating] = useState(false); + const [error, setError] = useState(null); + + const fetchBuckets = useCallback(async () => { + if (!signer || !address) return []; + + setIsLoading(true); + setError(null); + + try { + const config = getConfig(); + // Use provider for view calls to avoid MetaMask issues + const provider = await signer.provider; + if (!provider) throw new Error('No provider available'); + const contract = getAdmContract(config.admActor, provider); + // listBuckets returns array of (kind, addr, metadata[]) + const machines = await contract.listBuckets(address); + + console.log('listBuckets raw result:', machines); + + // ethers.js v6 returns tuples as arrays, access by index + // Machine = [kind, addr, metadata[]] + const buckets: string[] = []; + for (const m of machines) { + // Access as array: m[0] = kind, m[1] = addr, m[2] = metadata + const kind = typeof m.kind !== 'undefined' ? m.kind : m[0]; + const addr = typeof m.addr !== 'undefined' ? m.addr : m[1]; + console.log('Machine:', { kind, addr }); + if (Number(kind) === 0) { + buckets.push(addr); + } + } + + console.log('Filtered buckets:', buckets); + + if (buckets.length > 0) { + setBucketAddress(buckets[0]); // Use the first bucket + } + + return buckets; + } catch (err: unknown) { + const error = err as Error; + console.error('fetchBuckets error:', err); + setError(error.message || 'Failed to fetch buckets'); + return []; + } finally { + setIsLoading(false); + } + }, [signer, address]); + + const createBucket = useCallback(async () => { + if (!signer) { + setError('Wallet not connected'); + return null; + } + + setIsCreating(true); + setError(null); + + try { + const config = getConfig(); + const contract = getAdmContract(config.admActor, signer); + const tx = await contract.createBucket(); + const receipt = await tx.wait(); + + // Extract bucket address from MachineInitialized event + let newBucketAddress: string | null = null; + for (const log of receipt.logs) { + if (log.topics[0] === MACHINE_INITIALIZED_TOPIC) { + // The address is in the data field (last 20 bytes of 32-byte word) + const data = log.data; + newBucketAddress = '0x' + data.slice(26, 66); + break; + } + } + + if (newBucketAddress) { + setBucketAddress(newBucketAddress); + } + + return newBucketAddress; + } catch (err: unknown) { + const error = err as Error; + setError(error.message || 'Failed to create bucket'); + return null; + } finally { + setIsCreating(false); + } + }, [signer]); + + const selectBucket = useCallback((address: string) => { + setBucketAddress(address); + }, []); + + return { + bucketAddress, + isLoading, + isCreating, + error, + fetchBuckets, + createBucket, + selectBucket, + hasBucket: !!bucketAddress, + }; +} + +export function useFileExplorer(signer: ethers.Signer | null, bucketAddress: string | null) { + const [files, setFiles] = useState([]); + const [currentPath, setCurrentPath] = useState(''); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + + const fetchFiles = useCallback(async (prefix: string = '') => { + if (!signer || !bucketAddress) return; + + setIsLoading(true); + setError(null); + + try { + // Use provider for view calls to avoid MetaMask issues + const provider = await signer.provider; + if (!provider) throw new Error('No provider available'); + const contract = getBucketContract(bucketAddress, provider); + + let result: QueryResult; + if (prefix) { + result = await contract['queryObjects(string,string)'](prefix, '/'); + } else { + result = await contract['queryObjects(string,string)']('', '/'); + } + + const fileItems: FileItem[] = []; + + // Add folders from commonPrefixes + for (const folderPath of result.commonPrefixes) { + const name = folderPath.slice(prefix.length).replace(/\/$/, ''); + if (name) { + fileItems.push({ + name, + fullPath: folderPath, + isFolder: true, + }); + } + } + + // Add files from objects + console.log('queryObjects result:', result); + console.log('objects:', result.objects); + for (const obj of result.objects) { + console.log('Raw object:', obj); + const objEntry = obj as unknown as ObjectEntry; + const key = objEntry.key || (obj as unknown as { 0: string })[0]; + const state = objEntry.state || (obj as unknown as { 1: { 0: string; 1: bigint; 2: bigint } })[1]; + + console.log('Parsed object:', { key, state }); + + const name = key.slice(prefix.length); + if (name && !name.includes('/')) { + const fileItem = { + name, + fullPath: key, + isFolder: false, + size: state.size ?? (state as unknown as { 1: bigint })[1], + expiry: state.expiry ?? (state as unknown as { 2: bigint })[2], + blobHash: state.blobHash ?? (state as unknown as { 0: string })[0], + }; + console.log('FileItem:', fileItem); + fileItems.push(fileItem); + } + } + + console.log('Final fileItems:', fileItems); + setFiles(fileItems); + setCurrentPath(prefix); + } catch (err: unknown) { + const error = err as Error; + console.error('fetchFiles error:', err); + setError(error.message || 'Failed to fetch files'); + } finally { + setIsLoading(false); + } + }, [signer, bucketAddress]); + + const navigateToFolder = useCallback((folderPath: string) => { + fetchFiles(folderPath); + }, [fetchFiles]); + + const navigateUp = useCallback(() => { + if (!currentPath) return; + const parts = currentPath.split('/').filter(Boolean); + parts.pop(); + const newPath = parts.length > 0 ? parts.join('/') + '/' : ''; + fetchFiles(newPath); + }, [currentPath, fetchFiles]); + + const refresh = useCallback(() => { + fetchFiles(currentPath); + }, [fetchFiles, currentPath]); + + const [isDeleting, setIsDeleting] = useState(false); + const [deleteError, setDeleteError] = useState(null); + + const deleteObject = useCallback(async (key: string) => { + if (!signer || !bucketAddress) { + setDeleteError('Wallet or bucket not connected'); + return false; + } + + setIsDeleting(true); + setDeleteError(null); + + try { + const contract = getBucketContract(bucketAddress, signer); + const tx = await contract.deleteObject(key); + await tx.wait(); + + // Refresh the file list after deletion + await fetchFiles(currentPath); + return true; + } catch (err: unknown) { + const error = err as Error; + console.error('deleteObject error:', err); + setDeleteError(error.message || 'Failed to delete object'); + return false; + } finally { + setIsDeleting(false); + } + }, [signer, bucketAddress, fetchFiles, currentPath]); + + return { + files, + currentPath, + isLoading, + error, + fetchFiles, + navigateToFolder, + navigateUp, + refresh, + deleteObject, + isDeleting, + deleteError, + }; +} diff --git a/ipc-storage/ipc-dropbox/src/hooks/useCredit.ts b/ipc-storage/ipc-dropbox/src/hooks/useCredit.ts new file mode 100644 index 0000000000..1ef9352dbc --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/hooks/useCredit.ts @@ -0,0 +1,88 @@ +import { useState, useCallback } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; +import { getBlobsContract } from '../utils/contracts'; +import { CreditInfo } from '../types'; + +export function useCredit(signer: ethers.Signer | null, address: string | null) { + const [credit, setCredit] = useState(null); + const [isLoading, setIsLoading] = useState(false); + const [isPurchasing, setIsPurchasing] = useState(false); + const [error, setError] = useState(null); + + const fetchCredit = useCallback(async () => { + if (!signer || !address) return; + + setIsLoading(true); + setError(null); + + try { + const config = getConfig(); + // Use provider for view calls to avoid MetaMask issues + const provider = await signer.provider; + if (!provider) throw new Error('No provider available'); + const contract = getBlobsContract(config.blobsActor, provider); + const account = await contract.getAccount(address); + + console.log('getAccount raw result:', account); + + // Access by property name or index (ethers v6 returns both) + const creditFree = account.creditFree ?? account[1]; + const creditCommitted = account.creditCommitted ?? account[2]; + const lastDebitEpoch = account.lastDebitEpoch ?? account[4]; + + console.log('Parsed credit:', { creditFree, creditCommitted, lastDebitEpoch }); + + setCredit({ + balance: creditFree + creditCommitted, + freeCredit: creditFree, + lastDebitEpoch: BigInt(lastDebitEpoch), + }); + } catch (err: unknown) { + const error = err as Error; + console.error('fetchCredit error:', err); + setError(error.message || 'Failed to fetch credit'); + } finally { + setIsLoading(false); + } + }, [signer, address]); + + const buyCredit = useCallback(async (amountEther: string) => { + if (!signer) { + setError('Wallet not connected'); + return false; + } + + setIsPurchasing(true); + setError(null); + + try { + const config = getConfig(); + const contract = getBlobsContract(config.blobsActor, signer); + const tx = await contract.buyCredit({ + value: ethers.parseEther(amountEther), + }); + await tx.wait(); + await fetchCredit(); + return true; + } catch (err: unknown) { + const error = err as Error; + setError(error.message || 'Failed to buy credit'); + return false; + } finally { + setIsPurchasing(false); + } + }, [signer, fetchCredit]); + + const hasCredit = credit && (credit.balance > 0n || credit.freeCredit > 0n); + + return { + credit, + isLoading, + isPurchasing, + error, + fetchCredit, + buyCredit, + hasCredit, + }; +} diff --git a/ipc-storage/ipc-dropbox/src/hooks/useDownload.ts b/ipc-storage/ipc-dropbox/src/hooks/useDownload.ts new file mode 100644 index 0000000000..8326f34acd --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/hooks/useDownload.ts @@ -0,0 +1,58 @@ +import { useState, useCallback } from 'react'; +import { getConfig } from '../utils/config'; + +export function useDownload() { + const [isDownloading, setIsDownloading] = useState(false); + const [error, setError] = useState(null); + + const downloadFile = useCallback(async (blobHash: string, fileName: string) => { + console.log('downloadFile called:', { blobHash, fileName }); + setIsDownloading(true); + setError(null); + + try { + const config = getConfig(); + + // Remove 0x prefix if present + const hash = blobHash.startsWith('0x') ? blobHash.slice(2) : blobHash; + console.log('Fetching from:', `${config.nodeOperationObjectApi}/v1/blobs/${hash}/content`); + + const response = await fetch(`${config.nodeOperationObjectApi}/v1/blobs/${hash}/content`); + + if (!response.ok) { + throw new Error(`Download failed: ${response.statusText}`); + } + + const blob = await response.blob(); + + // Create download link + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = fileName; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + + return true; + } catch (err: unknown) { + const error = err as Error; + setError(error.message || 'Download failed'); + return false; + } finally { + setIsDownloading(false); + } + }, []); + + const clearError = useCallback(() => { + setError(null); + }, []); + + return { + isDownloading, + error, + downloadFile, + clearError, + }; +} diff --git a/ipc-storage/ipc-dropbox/src/hooks/useUpload.ts b/ipc-storage/ipc-dropbox/src/hooks/useUpload.ts new file mode 100644 index 0000000000..4b389e173f --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/hooks/useUpload.ts @@ -0,0 +1,145 @@ +import { useState, useCallback } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; +import { getBucketContract, getBlobsContract, BlobStatus } from '../utils/contracts'; +import { base32ToHex } from '../utils/base32'; +import { UploadResponse, NodeInfo } from '../types'; + +export function useUpload(signer: ethers.Signer | null, bucketAddress: string | null) { + const [isUploading, setIsUploading] = useState(false); + const [uploadProgress, setUploadProgress] = useState(''); + const [blobStatus, setBlobStatus] = useState(null); + const [error, setError] = useState(null); + + const pollBlobStatus = useCallback(async (blobHash: string, maxAttempts: number = 60) => { + const config = getConfig(); + const provider = signer?.provider; + if (!provider) return; + + const blobsContract = getBlobsContract(config.blobsActor, provider); + + for (let i = 0; i < maxAttempts; i++) { + try { + const blob = await blobsContract.getBlob(blobHash); + const status = Number(blob.status ?? blob[3]); + + if (status === BlobStatus.Resolved) { + setBlobStatus('Resolved'); + setUploadProgress('Upload complete! Blob resolved.'); + return true; + } else if (status === BlobStatus.Failed) { + setBlobStatus('Failed'); + setUploadProgress('Blob resolution failed.'); + return false; + } else { + setBlobStatus('Pending'); + setUploadProgress(`Waiting for resolution... (${i + 1}/${maxAttempts})`); + } + } catch (err) { + console.log('Blob not yet registered, waiting...', err); + setUploadProgress(`Waiting for blob registration... (${i + 1}/${maxAttempts})`); + } + + // Wait 2 seconds before next poll + await new Promise(resolve => setTimeout(resolve, 2000)); + } + + setUploadProgress('Timeout waiting for blob resolution'); + return false; + }, [signer]); + + const uploadFile = useCallback(async (file: File, targetPath: string) => { + if (!signer || !bucketAddress) { + setError('Wallet or bucket not connected'); + return false; + } + + setIsUploading(true); + setUploadProgress('Preparing upload...'); + setBlobStatus(null); + setError(null); + + try { + const config = getConfig(); + + // Step 1: Upload to gateway + setUploadProgress('Uploading to gateway...'); + const formData = new FormData(); + formData.append('size', file.size.toString()); + formData.append('data', file); + + const uploadResponse = await fetch(`${config.objectsListenAddr}/v1/objects`, { + method: 'POST', + body: formData, + }); + + if (!uploadResponse.ok) { + throw new Error(`Upload failed: ${uploadResponse.statusText}`); + } + + const uploadResult: UploadResponse = await uploadResponse.json(); + console.log('Upload result:', uploadResult); + + // Get node info + const nodeResponse = await fetch(`${config.objectsListenAddr}/v1/node`); + const nodeInfo: NodeInfo = await nodeResponse.json(); + + // Convert base32 hashes to hex + const blobHash = base32ToHex(uploadResult.hash); + const metadataHash = base32ToHex(uploadResult.metadata_hash || uploadResult.metadataHash || ''); + const sourceNode = '0x' + nodeInfo.node_id; + + console.log('Blob hash (hex):', blobHash); + console.log('Metadata hash (hex):', metadataHash); + console.log('Source node:', sourceNode); + + // Step 2: Register in bucket + setUploadProgress('Registering in bucket...'); + const contract = getBucketContract(bucketAddress, signer); + + // Build the full path + let fullPath = targetPath; + if (!fullPath.endsWith('/') && fullPath !== '') { + fullPath += '/'; + } + fullPath += file.name; + + const tx = await contract.addObject( + sourceNode, + fullPath, + blobHash, + metadataHash, + BigInt(file.size) + ); + + setUploadProgress('Waiting for transaction confirmation...'); + await tx.wait(); + + // Step 3: Poll for blob status + setUploadProgress('Checking blob status...'); + await pollBlobStatus(blobHash); + + return true; + } catch (err: unknown) { + const error = err as Error; + console.error('Upload error:', err); + setError(error.message || 'Upload failed'); + return false; + } finally { + setIsUploading(false); + } + }, [signer, bucketAddress, pollBlobStatus]); + + const clearError = useCallback(() => { + setError(null); + }, []); + + return { + isUploading, + uploadProgress, + blobStatus, + error, + uploadFile, + clearError, + }; +} diff --git a/ipc-storage/ipc-dropbox/src/hooks/useWallet.ts b/ipc-storage/ipc-dropbox/src/hooks/useWallet.ts new file mode 100644 index 0000000000..59b9fd4190 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/hooks/useWallet.ts @@ -0,0 +1,130 @@ +import { useState, useCallback, useEffect } from 'react'; +import { ethers } from 'ethers'; +import { getConfig } from '../utils/config'; + +declare global { + interface Window { + ethereum?: ethers.Eip1193Provider & { + on: (event: string, callback: (...args: unknown[]) => void) => void; + removeListener: (event: string, callback: (...args: unknown[]) => void) => void; + }; + } +} + +export interface WalletState { + address: string | null; + signer: ethers.Signer | null; + provider: ethers.BrowserProvider | null; + isConnecting: boolean; + error: string | null; +} + +export function useWallet() { + const [state, setState] = useState({ + address: null, + signer: null, + provider: null, + isConnecting: false, + error: null, + }); + + const connect = useCallback(async () => { + if (!window.ethereum) { + setState(s => ({ ...s, error: 'MetaMask not found. Please install MetaMask.' })); + return; + } + + setState(s => ({ ...s, isConnecting: true, error: null })); + + try { + const config = getConfig(); + const provider = new ethers.BrowserProvider(window.ethereum); + + // Request accounts + await provider.send('eth_requestAccounts', []); + + // Try to switch to the correct network + try { + const chainId = await provider.send('eth_chainId', []); + const targetChainId = '0x' + BigInt(config.chainId).toString(16); + + if (chainId !== targetChainId) { + try { + await provider.send('wallet_switchEthereumChain', [{ chainId: targetChainId }]); + } catch (switchError: unknown) { + const err = switchError as { code?: number }; + // Chain not added, try to add it + if (err.code === 4902) { + await provider.send('wallet_addEthereumChain', [{ + chainId: targetChainId, + chainName: 'IPC Local', + rpcUrls: [config.ethRpc], + nativeCurrency: { + name: 'FIL', + symbol: 'FIL', + decimals: 18, + }, + }]); + } + } + } + } catch { + // Ignore network switch errors + } + + const signer = await provider.getSigner(); + const address = await signer.getAddress(); + + setState({ + address, + signer, + provider, + isConnecting: false, + error: null, + }); + } catch (err: unknown) { + const error = err as Error; + setState(s => ({ + ...s, + isConnecting: false, + error: error.message || 'Failed to connect wallet', + })); + } + }, []); + + const disconnect = useCallback(() => { + setState({ + address: null, + signer: null, + provider: null, + isConnecting: false, + error: null, + }); + }, []); + + // Listen for account changes + useEffect(() => { + if (!window.ethereum) return; + + const handleAccountsChanged = (accounts: unknown) => { + const accs = accounts as string[]; + if (accs.length === 0) { + disconnect(); + } else if (state.address && accs[0].toLowerCase() !== state.address.toLowerCase()) { + connect(); + } + }; + + window.ethereum.on('accountsChanged', handleAccountsChanged); + return () => { + window.ethereum?.removeListener('accountsChanged', handleAccountsChanged); + }; + }, [state.address, connect, disconnect]); + + return { + ...state, + connect, + disconnect, + isConnected: !!state.address, + }; +} diff --git a/ipc-storage/ipc-dropbox/src/index.css b/ipc-storage/ipc-dropbox/src/index.css new file mode 100644 index 0000000000..3aedc0fa09 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/index.css @@ -0,0 +1,509 @@ +* { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +:root { + --primary: #4f46e5; + --primary-hover: #4338ca; + --secondary: #6b7280; + --secondary-hover: #4b5563; + --success: #10b981; + --warning: #f59e0b; + --error: #ef4444; + --background: #f9fafb; + --surface: #ffffff; + --border: #e5e7eb; + --text: #111827; + --text-secondary: #6b7280; + --radius: 8px; + --shadow: 0 1px 3px rgba(0, 0, 0, 0.1); + --shadow-lg: 0 4px 6px rgba(0, 0, 0, 0.1); +} + +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + background-color: var(--background); + color: var(--text); + line-height: 1.5; +} + +.app { + min-height: 100vh; + display: flex; + flex-direction: column; +} + +/* Header */ +.header { + background: var(--surface); + border-bottom: 1px solid var(--border); + padding: 1rem 2rem; + display: flex; + justify-content: space-between; + align-items: center; + box-shadow: var(--shadow); +} + +.header h1 { + font-size: 1.5rem; + font-weight: 700; + color: var(--primary); +} + +/* Wallet Connect */ +.wallet-connect { + display: flex; + align-items: center; + gap: 1rem; +} + +.wallet-info { + display: flex; + align-items: center; + gap: 0.75rem; +} + +.wallet-address { + font-family: monospace; + background: var(--background); + padding: 0.5rem 0.75rem; + border-radius: var(--radius); + font-size: 0.875rem; +} + +/* Buttons */ +.btn { + padding: 0.5rem 1rem; + border: none; + border-radius: var(--radius); + font-size: 0.875rem; + font-weight: 500; + cursor: pointer; + transition: all 0.2s; +} + +.btn:disabled { + opacity: 0.5; + cursor: not-allowed; +} + +.btn-primary { + background: var(--primary); + color: white; +} + +.btn-primary:hover:not(:disabled) { + background: var(--primary-hover); +} + +.btn-secondary { + background: var(--secondary); + color: white; +} + +.btn-secondary:hover:not(:disabled) { + background: var(--secondary-hover); +} + +.btn-icon { + background: var(--background); + color: var(--text); + border: 1px solid var(--border); +} + +.btn-icon:hover:not(:disabled) { + background: var(--border); +} + +.btn-small { + padding: 0.25rem 0.5rem; + font-size: 0.75rem; +} + +.btn-danger { + background: var(--error); + color: white; +} + +.btn-danger:hover:not(:disabled) { + background: #dc2626; +} + +.btn-large { + padding: 0.75rem 1.5rem; + font-size: 1rem; +} + +/* Main Content */ +.main { + flex: 1; + padding: 2rem; + max-width: 1400px; + margin: 0 auto; + width: 100%; +} + +/* Welcome Screen */ +.welcome { + text-align: center; + padding: 4rem 2rem; +} + +.welcome h2 { + font-size: 2rem; + margin-bottom: 1rem; +} + +.welcome p { + color: var(--text-secondary); + margin-bottom: 2rem; +} + +/* Setup Steps */ +.setup-step { + max-width: 600px; + margin: 0 auto; + background: var(--surface); + padding: 2rem; + border-radius: var(--radius); + box-shadow: var(--shadow); +} + +.setup-step h2 { + font-size: 1.5rem; + margin-bottom: 1.5rem; + text-align: center; +} + +.credit-summary { + margin-bottom: 2rem; + padding-bottom: 2rem; + border-bottom: 1px solid var(--border); +} + +/* Credit Manager */ +.credit-manager h3, +.bucket-manager h3 { + font-size: 1rem; + margin-bottom: 1rem; + color: var(--text-secondary); +} + +.credit-info p, +.bucket-info p { + margin-bottom: 0.5rem; +} + +.credit-info strong, +.bucket-info strong { + color: var(--text); +} + +.buy-credit { + margin-top: 1rem; +} + +.buy-form { + display: flex; + align-items: center; + gap: 0.5rem; + margin-top: 0.75rem; +} + +.input { + padding: 0.5rem 0.75rem; + border: 1px solid var(--border); + border-radius: var(--radius); + font-size: 0.875rem; + width: 100px; +} + +.unit { + color: var(--text-secondary); + font-size: 0.875rem; +} + +/* Dashboard Layout */ +.dashboard { + display: grid; + grid-template-columns: 280px 1fr; + gap: 2rem; +} + +.sidebar { + display: flex; + flex-direction: column; + gap: 1.5rem; +} + +.sidebar > div { + background: var(--surface); + padding: 1.25rem; + border-radius: var(--radius); + box-shadow: var(--shadow); +} + +.content { + background: var(--surface); + border-radius: var(--radius); + box-shadow: var(--shadow); + overflow: hidden; +} + +/* File Explorer */ +.file-explorer { + min-height: 500px; +} + +.explorer-toolbar { + padding: 1rem 1.25rem; + border-bottom: 1px solid var(--border); + display: flex; + justify-content: space-between; + align-items: center; + flex-wrap: wrap; + gap: 1rem; +} + +.breadcrumbs { + display: flex; + align-items: center; + gap: 0.25rem; + flex-wrap: wrap; +} + +.breadcrumb { + background: none; + border: none; + color: var(--primary); + cursor: pointer; + padding: 0.25rem 0.5rem; + border-radius: 4px; + font-size: 0.875rem; +} + +.breadcrumb:hover:not(:disabled) { + background: var(--background); +} + +.breadcrumb:disabled { + color: var(--text); + cursor: default; + font-weight: 500; +} + +.separator { + color: var(--text-secondary); +} + +.toolbar-actions { + display: flex; + align-items: center; + gap: 0.5rem; +} + +/* New Folder Input */ +.new-folder-input { + padding: 1rem 1.25rem; + border-bottom: 1px solid var(--border); + display: flex; + align-items: center; + gap: 0.5rem; + background: var(--background); +} + +.new-folder-input .input { + flex: 1; + max-width: 300px; +} + +/* File List */ +.file-list { + overflow-x: auto; +} + +.file-header, +.file-row { + display: grid; + grid-template-columns: 1fr 100px 180px; + padding: 0.75rem 1.25rem; + gap: 1rem; + align-items: center; +} + +.file-header { + background: var(--background); + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + color: var(--text-secondary); + border-bottom: 1px solid var(--border); +} + +.file-row { + border-bottom: 1px solid var(--border); +} + +.file-row:hover { + background: var(--background); +} + +.file-row:last-child { + border-bottom: none; +} + +.col-name { + min-width: 0; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.col-size { + text-align: right; + font-size: 0.875rem; + color: var(--text-secondary); +} + +.col-actions { + text-align: right; + display: flex; + justify-content: flex-end; + gap: 0.5rem; +} + +.folder-link { + background: none; + border: none; + color: var(--primary); + cursor: pointer; + font-size: inherit; + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0; + text-align: left; +} + +.folder-link:hover { + text-decoration: underline; +} + +.file-name { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.icon { + font-size: 0.75rem; + padding: 0.25rem 0.5rem; + background: var(--background); + border-radius: 4px; + color: var(--text-secondary); +} + +.folder-icon { + background: #fef3c7; + color: #d97706; +} + +.file-icon { + background: #dbeafe; + color: #2563eb; +} + +/* Empty State */ +.empty-state { + padding: 4rem 2rem; + text-align: center; + color: var(--text-secondary); +} + +.empty-state .hint { + font-size: 0.875rem; + margin-top: 0.5rem; +} + +/* Loading */ +.loading { + padding: 2rem; + text-align: center; + color: var(--text-secondary); +} + +/* Messages */ +.error { + color: var(--error); + font-size: 0.875rem; + margin-top: 0.75rem; +} + +.warning { + color: var(--warning); + font-size: 0.875rem; + margin-bottom: 0.75rem; +} + +/* Footer */ +.footer { + text-align: center; + padding: 1rem; + color: var(--text-secondary); + font-size: 0.875rem; + border-top: 1px solid var(--border); +} + +/* Code */ +code { + font-family: monospace; + background: var(--background); + padding: 0.25rem 0.5rem; + border-radius: 4px; + font-size: 0.875rem; +} + +/* Responsive */ +@media (max-width: 900px) { + .dashboard { + grid-template-columns: 1fr; + } + + .sidebar { + flex-direction: row; + flex-wrap: wrap; + } + + .sidebar > div { + flex: 1; + min-width: 250px; + } +} + +@media (max-width: 600px) { + .header { + flex-direction: column; + gap: 1rem; + } + + .explorer-toolbar { + flex-direction: column; + align-items: stretch; + } + + .toolbar-actions { + flex-wrap: wrap; + justify-content: flex-start; + } + + .file-header, + .file-row { + grid-template-columns: 1fr 80px; + } + + .col-actions { + display: none; + } +} diff --git a/ipc-storage/ipc-dropbox/src/main.tsx b/ipc-storage/ipc-dropbox/src/main.tsx new file mode 100644 index 0000000000..964aeb4c7e --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/main.tsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +) diff --git a/ipc-storage/ipc-dropbox/src/types.ts b/ipc-storage/ipc-dropbox/src/types.ts new file mode 100644 index 0000000000..a645946e96 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/types.ts @@ -0,0 +1,57 @@ +export interface Config { + tendermintRpc: string; + objectsListenAddr: string; + nodeOperationObjectApi: string; + ethRpc: string; + blobsActor: string; + admActor: string; + chainId: number; +} + +export interface ObjectMetadata { + key: string; + value: string; +} + +export interface ObjectState { + blobHash: string; + size: bigint; + expiry: bigint; + metadata: ObjectMetadata[]; +} + +export interface ObjectEntry { + key: string; + state: ObjectState; +} + +export interface QueryResult { + objects: ObjectEntry[]; + commonPrefixes: string[]; + nextKey: string; +} + +export interface UploadResponse { + hash: string; + metadata_hash?: string; + metadataHash?: string; +} + +export interface NodeInfo { + node_id: string; +} + +export interface CreditInfo { + balance: bigint; + freeCredit: bigint; + lastDebitEpoch: bigint; +} + +export interface FileItem { + name: string; + fullPath: string; + isFolder: boolean; + size?: bigint; + expiry?: bigint; + blobHash?: string; +} diff --git a/ipc-storage/ipc-dropbox/src/utils/base32.ts b/ipc-storage/ipc-dropbox/src/utils/base32.ts new file mode 100644 index 0000000000..559d6dbb40 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/utils/base32.ts @@ -0,0 +1,34 @@ +const BASE32_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'; + +export function base32ToHex(base32: string): string { + // Normalize: uppercase and add padding + let input = base32.toUpperCase(); + const padding = (8 - (input.length % 8)) % 8; + input = input + '='.repeat(padding); + + // Decode base32 + let bits = ''; + for (const char of input) { + if (char === '=') break; + const index = BASE32_ALPHABET.indexOf(char); + if (index === -1) continue; + bits += index.toString(2).padStart(5, '0'); + } + + // Convert bits to bytes + const bytes: number[] = []; + for (let i = 0; i + 8 <= bits.length; i += 8) { + bytes.push(parseInt(bits.slice(i, i + 8), 2)); + } + + // Ensure exactly 32 bytes for hash + while (bytes.length < 32) { + bytes.push(0); + } + if (bytes.length > 32) { + bytes.length = 32; + } + + // Convert to hex + return '0x' + bytes.map(b => b.toString(16).padStart(2, '0')).join(''); +} diff --git a/ipc-storage/ipc-dropbox/src/utils/config.ts b/ipc-storage/ipc-dropbox/src/utils/config.ts new file mode 100644 index 0000000000..cbfbaa02e6 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/utils/config.ts @@ -0,0 +1,13 @@ +import { Config } from '../types'; + +export function getConfig(): Config { + return { + tendermintRpc: import.meta.env.VITE_TENDERMINT_RPC || 'http://localhost:26657', + objectsListenAddr: import.meta.env.VITE_OBJECTS_LISTEN_ADDR || 'http://localhost:8080', + nodeOperationObjectApi: import.meta.env.VITE_NODE_OPERATION_OBJECT_API || 'http://localhost:8081', + ethRpc: import.meta.env.VITE_ETH_RPC || 'http://localhost:8545', + blobsActor: import.meta.env.VITE_BLOBS_ACTOR || '0x6d342defae60f6402aee1f804653bbae4e66ae46', + admActor: import.meta.env.VITE_ADM_ACTOR || '0x7caec36fc8a3a867ca5b80c6acb5e5871d05aa28', + chainId: parseInt(import.meta.env.VITE_CHAIN_ID || '1023102'), + }; +} diff --git a/ipc-storage/ipc-dropbox/src/utils/contracts.ts b/ipc-storage/ipc-dropbox/src/utils/contracts.ts new file mode 100644 index 0000000000..dba564594b --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/utils/contracts.ts @@ -0,0 +1,50 @@ +import { ethers } from 'ethers'; + +// ABI for Blobs Actor +export const BLOBS_ABI = [ + 'function buyCredit() payable', + 'function getAccount(address addr) view returns (tuple(uint64 capacityUsed, uint256 creditFree, uint256 creditCommitted, address creditSponsor, uint64 lastDebitEpoch, tuple(address addr, tuple(uint256 creditLimit, uint256 gasFeeLimit, uint64 expiry, uint256 creditUsed, uint256 gasFeeUsed) approval)[] approvalsTo, tuple(address addr, tuple(uint256 creditLimit, uint256 gasFeeLimit, uint64 expiry, uint256 creditUsed, uint256 gasFeeUsed) approval)[] approvalsFrom, uint64 maxTtl, uint256 gasAllowance))', + 'function getBlob(bytes32 blobHash) view returns (tuple(uint64 size, bytes32 metadataHash, tuple(string id, int64 expiry)[] subscriptions, uint8 status))', +]; + +// Blob status enum values +export enum BlobStatus { + Pending = 0, + Resolved = 1, + Failed = 2, +} + +// ABI for ADM Actor +export const ADM_ABI = [ + 'function createBucket() returns (address)', + 'function listBuckets(address owner) view returns (tuple(uint8 kind, address addr, tuple(string key, string value)[] metadata)[])', + 'event MachineInitialized(uint8 indexed kind, address machineAddress)', +]; + +// ABI for Bucket Actor +export const BUCKET_ABI = [ + 'function addObject(bytes32 source, string key, bytes32 hash, bytes32 recoveryHash, uint64 size)', + 'function getObject(string key) view returns (tuple(bytes32 blobHash, bytes32 recoveryHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata))', + 'function deleteObject(string key)', + 'function updateObjectMetadata(string key, tuple(string key, string value)[] metadata)', + 'function queryObjects() view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function queryObjects(string prefix) view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function queryObjects(string prefix, string delimiter) view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function queryObjects(string prefix, string delimiter, string startKey, uint64 limit) view returns (tuple(tuple(string key, tuple(bytes32 blobHash, uint64 size, uint64 expiry, tuple(string key, string value)[] metadata) state)[] objects, string[] commonPrefixes, string nextKey))', + 'function owner() view returns (address)', +]; + +export function getBlobsContract(address: string, signer: ethers.Signer | ethers.Provider) { + return new ethers.Contract(address, BLOBS_ABI, signer); +} + +export function getAdmContract(address: string, signer: ethers.Signer | ethers.Provider) { + return new ethers.Contract(address, ADM_ABI, signer); +} + +export function getBucketContract(address: string, signer: ethers.Signer | ethers.Provider) { + return new ethers.Contract(address, BUCKET_ABI, signer); +} + +// Event topic for MachineInitialized +export const MACHINE_INITIALIZED_TOPIC = '0x8f7252642373d5f0b89a0c5cd9cd242e5cd5bb1a36aec623756e4f52a8c1ea6e'; diff --git a/ipc-storage/ipc-dropbox/src/vite-env.d.ts b/ipc-storage/ipc-dropbox/src/vite-env.d.ts new file mode 100644 index 0000000000..bc52dafec7 --- /dev/null +++ b/ipc-storage/ipc-dropbox/src/vite-env.d.ts @@ -0,0 +1,15 @@ +/// + +interface ImportMetaEnv { + readonly VITE_TENDERMINT_RPC: string; + readonly VITE_OBJECTS_LISTEN_ADDR: string; + readonly VITE_NODE_OPERATION_OBJECT_API: string; + readonly VITE_ETH_RPC: string; + readonly VITE_BLOBS_ACTOR: string; + readonly VITE_ADM_ACTOR: string; + readonly VITE_CHAIN_ID: string; +} + +interface ImportMeta { + readonly env: ImportMetaEnv; +} diff --git a/ipc-storage/ipc-dropbox/tsconfig.json b/ipc-storage/ipc-dropbox/tsconfig.json new file mode 100644 index 0000000000..3934b8f6d6 --- /dev/null +++ b/ipc-storage/ipc-dropbox/tsconfig.json @@ -0,0 +1,21 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/ipc-storage/ipc-dropbox/tsconfig.node.json b/ipc-storage/ipc-dropbox/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/ipc-storage/ipc-dropbox/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/ipc-storage/ipc-dropbox/vite.config.ts b/ipc-storage/ipc-dropbox/vite.config.ts new file mode 100644 index 0000000000..184cd3c58d --- /dev/null +++ b/ipc-storage/ipc-dropbox/vite.config.ts @@ -0,0 +1,24 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' + +export default defineConfig({ + plugins: [react()], + server: { + port: 3000, + proxy: { + '/api/gateway': { + target: 'http://localhost:8080', + changeOrigin: true, + rewrite: (path) => path.replace(/^\/api\/gateway/, ''), + }, + '/api/node': { + target: 'http://localhost:8081', + changeOrigin: true, + rewrite: (path) => path.replace(/^\/api\/node/, ''), + }, + }, + }, + define: { + 'process.env': {} + } +}) diff --git a/ipc-storage/ipld/Cargo.toml b/ipc-storage/ipld/Cargo.toml new file mode 100644 index 0000000000..f08b697b76 --- /dev/null +++ b/ipc-storage/ipld/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "ipc_storage_ipld" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +anyhow = { workspace = true } +cid = { workspace = true } +fil_actors_runtime = { workspace = true } +fvm_ipld_amt = { workspace = true } +fvm_ipld_blockstore = { workspace = true } +fvm_ipld_encoding = { workspace = true } +fvm_ipld_hamt = { workspace = true } +fvm_shared = { workspace = true } +fvm_sdk = { workspace = true } +integer-encoding = { workspace = true } +serde = { workspace = true, features = ["derive"] } + +[features] +fil-actor = ["fil_actors_runtime/fil-actor"] \ No newline at end of file diff --git a/ipc-storage/ipld/src/amt.rs b/ipc-storage/ipld/src/amt.rs new file mode 100644 index 0000000000..f3116c91ef --- /dev/null +++ b/ipc-storage/ipld/src/amt.rs @@ -0,0 +1,9 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod core; +pub mod vec; + +pub use vec::Root; diff --git a/ipc-storage/ipld/src/amt/core.rs b/ipc-storage/ipld/src/amt/core.rs new file mode 100644 index 0000000000..2048d7ee39 --- /dev/null +++ b/ipc-storage/ipld/src/amt/core.rs @@ -0,0 +1,162 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Debug; + +use anyhow::anyhow; +use cid::Cid; +use fil_actors_runtime::{ActorError, AsActorError}; +use fvm_ipld_amt as amt; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared::error::ExitCode; +use serde::de::DeserializeOwned; +use serde::Serialize; + +/// Wraps a HAMT to provide a convenient map API. +/// Any errors are returned with exit code indicating illegal state. +/// The name is not persisted in state, but adorns any error messages. +pub struct Vec +where + BS: Blockstore, + V: DeserializeOwned + Serialize, +{ + amt: amt::Amt, +} + +/// Configuration options for an AMT instance. +#[derive(Debug, Clone)] +pub struct Config { + /// The `bit_width` drives how wide and high the tree is going to be. + /// Each node in the tree will have `2^bit_width` number of slots for child nodes, + /// and consume `bit_width` number of bits from the hashed keys at each level. + pub bit_width: u32, +} + +impl Default for Config { + fn default() -> Self { + Self { + bit_width: AMT_BIT_WIDTH, + } + } +} + +pub const AMT_BIT_WIDTH: u32 = 5; + +pub const DEFAULT_AMT_CONFIG: Config = Config { + bit_width: AMT_BIT_WIDTH, +}; + +impl Vec +where + BS: Blockstore, + V: DeserializeOwned + Serialize, +{ + /// Creates a new, empty vec. + pub fn empty(store: BS, config: Config) -> Self { + Self { + amt: amt::Amt::new_with_bit_width(store, config.bit_width), + } + } + + /// Creates a new empty vec and flushes it to the store. + /// Returns the CID of the empty vec root. + pub fn flush_empty(store: BS, config: Config) -> Result { + Self::empty(store, config).flush() + } + + /// Loads a vec from the store. + pub fn load(store: BS, root: &Cid) -> Result { + Ok(Self { + amt: amt::Amt::load(root, store) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load AMT with root '{}'", root) + })?, + }) + } + + /// Flushes the vec's contents to the store. + /// Returns the root node CID. + pub fn flush(&mut self) -> Result { + self.amt + .flush() + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || "failed to flush AMT") + } + + /// Returns a reference to the value at the given index, if present. + pub fn get(&self, index: u64) -> Result, ActorError> { + self.amt + .get(index) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get from AMT at index {}", index) + }) + } + + /// Inserts a value into the vec at the given index. + pub fn set(&mut self, index: u64, value: V) -> Result<(), ActorError> + where + V: PartialEq, + { + self.amt + .set(index, value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set AMT at index {}", index) + }) + } + + /// Deletes a value from the vec at the given index. + pub fn delete(&mut self, index: u64) -> Result, ActorError> { + self.amt + .delete(index) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete from AMT at index {}", index) + }) + } + + /// Returns the height of the vec. + pub fn height(&self) -> u32 { + self.amt.height() + } + + /// Returns count of elements in the vec. + pub fn count(&self) -> u64 { + self.amt.count() + } + + /// Iterates and runs a function over values in the vec starting at an index up to a limit. + /// Returns the index if there are more items. + pub fn for_each_while_ranged( + &self, + start_at: Option, + limit: Option, + mut f: F, + ) -> Result<(u64, Option), ActorError> + where + F: FnMut(u64, &V) -> Result, + { + match self + .amt + .for_each_while_ranged(start_at, limit, |i, v| f(i, v).map_err(|e| anyhow!(e))) + { + Ok((traversed, next)) => Ok((traversed, next)), + Err(amt_err) => self.map_amt_error(amt_err), + } + } + + fn map_amt_error(&self, amt_err: amt::Error) -> Result { + match amt_err { + amt::Error::Dynamic(e) => match e.downcast::() { + Ok(actor_error) => Err(actor_error), + Err(e) => Err(ActorError::illegal_state(format!( + "error in callback traversing AMT: {}", + e + ))), + }, + e => Err(ActorError::illegal_state(format!( + "error traversing AMT: {}", + e + ))), + } + } +} diff --git a/ipc-storage/ipld/src/amt/vec.rs b/ipc-storage/ipld/src/amt/vec.rs new file mode 100644 index 0000000000..5d0030c242 --- /dev/null +++ b/ipc-storage/ipld/src/amt/vec.rs @@ -0,0 +1,155 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::marker::PhantomData; + +use super::core::{Vec, DEFAULT_AMT_CONFIG}; + +#[derive(Clone, PartialEq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Root +where + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + cid: Cid, + #[serde(skip)] + value_type: PhantomData, +} + +impl Root +where + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub fn new(store: BS) -> Result { + Amt::::flush_empty(store) + } + + pub fn from_cid(cid: Cid) -> Self { + Self { + cid, + value_type: Default::default(), + } + } + + pub fn amt<'a, BS: Blockstore>(&self, store: BS) -> Result, ActorError> { + Amt::load(store, &self.cid) + } + + pub fn cid(&self) -> &Cid { + &self.cid + } +} + +pub struct Amt<'a, BS, V> +where + BS: Blockstore, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + vec: Vec, + _marker: PhantomData<&'a BS>, +} + +#[derive(Debug, Clone)] +pub struct TrackedFlushResult +where + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub root: Root, +} + +impl Amt<'_, BS, V> +where + BS: Blockstore, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + fn load(store: BS, root: &Cid) -> Result { + let vec = Vec::::load(store, root)?; + Ok(Self { + vec, + _marker: Default::default(), + }) + } + + pub fn get(&self, index: u64) -> Result, ActorError> { + self.vec.get(index).map(|value| value.cloned()) + } + + pub fn get_or_err(&self, index: u64) -> Result { + self.get(index)? + .ok_or_else(|| ActorError::not_found(format!("value at index {} not found", index))) + } + + pub fn set(&mut self, index: u64, value: V) -> Result<(), ActorError> { + self.vec.set(index, value) + } + + pub fn set_and_flush(&mut self, index: u64, value: V) -> Result, ActorError> { + self.set(index, value)?; + let cid = self.vec.flush()?; + Ok(Root::from_cid(cid)) + } + + pub fn set_and_flush_tracked( + &mut self, + index: u64, + value: V, + ) -> Result, ActorError> { + let root = self.set_and_flush(index, value)?; + Ok(TrackedFlushResult { root }) + } + + pub fn delete(&mut self, index: u64) -> Result, ActorError> { + self.vec.delete(index) + } + + pub fn delete_and_flush(&mut self, index: u64) -> Result, ActorError> { + self.delete(index)?; + let cid = self.vec.flush()?; + Ok(Root::from_cid(cid)) + } + + pub fn delete_and_flush_tracked( + &mut self, + index: u64, + ) -> Result, ActorError> { + let root = self.delete_and_flush(index)?; + Ok(TrackedFlushResult { root }) + } + + pub fn flush(&mut self) -> Result, ActorError> { + let cid = self.vec.flush()?; + Ok(Root::from_cid(cid)) + } + + pub fn flush_empty(store: BS) -> Result, ActorError> { + let cid = Vec::::flush_empty(store, DEFAULT_AMT_CONFIG)?; + Ok(Root::from_cid(cid)) + } + + pub fn height(&self) -> u32 { + self.vec.height() + } + + pub fn count(&self) -> u64 { + self.vec.count() + } + + pub fn for_each_while_ranged( + &self, + start_at: Option, + limit: Option, + mut f: F, + ) -> Result<(u64, Option), ActorError> + where + F: FnMut(u64, &V) -> Result, + { + self.vec.for_each_while_ranged(start_at, limit, &mut f) + } +} diff --git a/ipc-storage/ipld/src/hamt.rs b/ipc-storage/ipld/src/hamt.rs new file mode 100644 index 0000000000..1cb241d348 --- /dev/null +++ b/ipc-storage/ipld/src/hamt.rs @@ -0,0 +1,13 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod core; +pub mod map; + +pub use core::Map; +pub use core::MapKey; +pub use core::DEFAULT_HAMT_CONFIG; +pub use fvm_ipld_hamt::{BytesKey, Error}; +pub use map::Root; diff --git a/ipc-storage/ipld/src/hamt/core.rs b/ipc-storage/ipld/src/hamt/core.rs new file mode 100644 index 0000000000..c09029fa2f --- /dev/null +++ b/ipc-storage/ipld/src/hamt/core.rs @@ -0,0 +1,416 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Debug; +use std::marker::PhantomData; + +use crate::hamt::BytesKey; +use crate::Hasher; +use anyhow::anyhow; +use cid::Cid; +use fil_actors_runtime::{ActorError, AsActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt as hamt; +use fvm_ipld_hamt::Error; +use fvm_shared::address::Address; +use fvm_shared::error::ExitCode; +use integer_encoding::VarInt; +use serde::de::DeserializeOwned; +use serde::Serialize; + +/// Wraps a HAMT to provide a convenient map API. +/// Any errors are returned with exit code indicating illegal state. +/// The name is not persisted in state, but adorns any error messages. +pub struct Map +where + BS: Blockstore, + K: MapKey, + V: DeserializeOwned + Serialize, +{ + hamt: hamt::Hamt, + name: String, + key_type: PhantomData, +} + +pub trait MapKey: Sized + Debug { + fn from_bytes(b: &[u8]) -> Result; + fn to_bytes(&self) -> Result, String>; +} + +pub type Config = hamt::Config; + +pub const DEFAULT_HAMT_CONFIG: Config = Config { + bit_width: 5, + min_data_depth: 2, + max_array_width: 1, +}; + +impl Map +where + BS: Blockstore, + K: MapKey, + V: DeserializeOwned + Serialize, +{ + pub fn name(&self) -> String { + self.name.clone() + } + + /// Creates a new, empty map. + pub fn empty(store: BS, config: Config, name: String) -> Self { + Self { + hamt: hamt::Hamt::new_with_config(store, config), + name, + key_type: Default::default(), + } + } + + /// Creates a new empty map and flushes it to the store. + /// Returns the CID of the empty map root. + pub fn flush_empty(store: BS, config: Config) -> Result { + // This CID is constant regardless of the HAMT's configuration, so as an optimization, + // we could hard-code it and merely check it is already stored. + Self::empty(store, config, "empty".into()).flush() + } + + /// Loads a map from the store. + // There is no version of this method that doesn't take an explicit config parameter. + // The caller must know the configuration to interpret the HAMT correctly. + // Forcing them to provide it makes it harder to accidentally use an incorrect default. + pub fn load(store: BS, root: &Cid, config: Config, name: String) -> Result { + Ok(Self { + hamt: hamt::Hamt::load_with_config(root, store, config) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load HAMT '{}'", name) + })?, + name, + key_type: Default::default(), + }) + } + + /// Flushes the map's contents to the store. + /// Returns the root node CID. + pub fn flush(&mut self) -> Result { + self.hamt + .flush() + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to flush HAMT '{}'", self.name) + }) + } + + /// Returns a reference to the value associated with a key, if present. + pub fn get(&self, key: &K) -> Result, ActorError> { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .get(&k) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get key {key:?} from HAMT '{}'", self.name) + }) + } + + pub fn contains_key(&self, key: &K) -> Result { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .contains_key(&k) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to check key {key:?} in HAMT '{}'", self.name) + }) + } + + /// Inserts a key-value pair into the map. + /// Returns any value previously associated with the key. + pub fn set(&mut self, key: &K, value: V) -> Result, ActorError> + where + V: PartialEq, + { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .set(k.into(), value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set key {key:?} in HAMT '{}'", self.name) + }) + } + + /// Inserts a key-value pair only if the key does not already exist. + /// Returns whether the map was modified (i.e. key was absent). + pub fn set_if_absent(&mut self, key: &K, value: V) -> Result + where + V: PartialEq, + { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .set_if_absent(k.into(), value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set key {key:?} in HAMT '{}'", self.name) + }) + } + + pub fn delete(&mut self, key: &K) -> Result, ActorError> { + let k = key + .to_bytes() + .with_context_code(ExitCode::USR_ASSERTION_FAILED, || { + format!("invalid key {key:?}") + })?; + self.hamt + .delete(&k) + .map(|delete_result| delete_result.map(|(_k, v)| v)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete key {key:?} from HAMT '{}'", self.name) + }) + } + + /// Iterates over all key-value pairs in the map. + #[allow(clippy::blocks_in_conditions)] + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + // Note the result type of F uses ActorError. + // The implementation will extract and propagate any ActorError + // wrapped in a hamt::Error::Dynamic. + F: FnMut(K, &V) -> Result<(), ActorError>, + { + match self.hamt.for_each(|k, v| { + let key = K::from_bytes(k).context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?; + f(key, v).map_err(|e| anyhow!(e)) + }) { + Ok(_) => Ok(()), + Err(hamt_err) => self.map_hamt_error(hamt_err), + } + } + + /// Iterates over key-value pairs in the map starting at a key up to a max. + /// Returns the next key if there are more items in the map. + #[allow(clippy::blocks_in_conditions)] + pub fn for_each_ranged( + &self, + starting_key: Option<&hamt::BytesKey>, + max: Option, + mut f: F, + ) -> Result<(usize, Option), ActorError> + where + // Note the result type of F uses ActorError. + // The implementation will extract and propagate any ActorError + // wrapped in a hamt::Error::Dynamic. + F: FnMut(K, &V) -> Result, + { + match self.inner_for_each_ranged(starting_key, max, |k, v| { + let key = K::from_bytes(k).context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?; + f(key, v).map_err(|e| anyhow!(e)) + }) { + Ok((traversed, next)) => { + let next = if let Some(next) = next { + Some( + K::from_bytes(&next) + .context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?, + ) + } else { + None + }; + Ok((traversed, next)) + } + Err(hamt_err) => self.map_hamt_error(hamt_err), + } + } + + fn inner_for_each_ranged( + &self, + starting_key: Option<&hamt::BytesKey>, + max: Option, + mut f: F, + ) -> Result<(usize, Option), Error> + where + F: FnMut(&hamt::BytesKey, &V) -> anyhow::Result, + { + let mut iter = match starting_key { + Some(key) => self.hamt.iter_from(key)?, + None => self.hamt.iter(), + } + .fuse(); + + let mut traversed = 0usize; + let limit = max.unwrap_or(usize::MAX); + loop { + if traversed >= limit { + break; + } + + match iter.next() { + Some(res) => { + let (k, v) = res?; + if !(f)(k, v)? { + continue; + } + traversed += 1; + } + None => break, + } + } + let next = iter.next().transpose()?.map(|kv| kv.0).cloned(); + Ok((traversed, next)) + } + + /// Iterates over key-value pairs in the map starting at a key up to an ending_key (included). + #[allow(clippy::blocks_in_conditions)] + pub fn for_each_until( + &self, + starting_key: Option<&hamt::BytesKey>, + ending_key: &hamt::BytesKey, + mut f: F, + ) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + let iter = match starting_key { + Some(key) => self.hamt.iter_from(key).map_err(|error| { + ActorError::illegal_state(format!("error traversing HAMT {}: {}", self.name, error)) + })?, + None => self.hamt.iter(), + }; + for res in iter.fuse().by_ref() { + match res { + Ok((k, v)) => { + if k.le(ending_key) { + let k = K::from_bytes(k) + .context_code(ExitCode::USR_ILLEGAL_STATE, "invalid key")?; + f(k, v)?; + } + } + Err(hamt_err) => { + return self.map_hamt_error(hamt_err); + } + } + } + Ok(()) + } + + pub fn iter(&self) -> hamt::Iter { + self.hamt.iter() + } + + pub fn is_empty(&self) -> bool { + self.hamt.is_empty() + } + + fn map_hamt_error(&self, hamt_err: hamt::Error) -> Result { + match hamt_err { + hamt::Error::Dynamic(e) => match e.downcast::() { + Ok(actor_error) => Err(actor_error), + Err(e) => Err(ActorError::illegal_state(format!( + "error in callback traversing HAMT {}: {}", + self.name, e + ))), + }, + e => Err(ActorError::illegal_state(format!( + "error traversing HAMT {}: {}", + self.name, e + ))), + } + } +} + +impl MapKey for Vec { + fn from_bytes(b: &[u8]) -> Result { + Ok(b.to_vec()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.clone()) + } +} + +impl MapKey for String { + fn from_bytes(b: &[u8]) -> Result { + String::from_utf8(b.to_vec()).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.as_bytes().to_vec()) + } +} + +impl MapKey for u64 { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = VarInt::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + Ok(result) + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.encode_var_vec()) + } +} + +impl MapKey for i64 { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = VarInt::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + Ok(result) + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.encode_var_vec()) + } +} + +impl MapKey for Address { + fn from_bytes(b: &[u8]) -> Result { + Address::from_bytes(b).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(Address::to_bytes(*self)) + } +} + +impl MapKey for Cid { + fn from_bytes(b: &[u8]) -> Result { + Cid::try_from(b).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.to_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fvm_ipld_blockstore::MemoryBlockstore; + + #[test] + fn basic_put_get() { + let bs = MemoryBlockstore::new(); + let mut m = Map::<_, u64, String>::empty(bs, DEFAULT_HAMT_CONFIG, "empty".into()); + m.set(&1234, "1234".to_string()).unwrap(); + assert!(m.get(&2222).unwrap().is_none()); + assert_eq!(&"1234".to_string(), m.get(&1234).unwrap().unwrap()); + } + + #[test] + fn for_each_callback_exitcode_propagates() { + let bs = MemoryBlockstore::new(); + let mut m = Map::<_, u64, String>::empty(bs, DEFAULT_HAMT_CONFIG, "empty".into()); + m.set(&1234, "1234".to_string()).unwrap(); + let res = m.for_each(|_, _| Err(ActorError::forbidden("test".to_string()))); + assert!(res.is_err()); + assert_eq!(res.unwrap_err(), ActorError::forbidden("test".to_string())); + } +} diff --git a/ipc-storage/ipld/src/hamt/map.rs b/ipc-storage/ipld/src/hamt/map.rs new file mode 100644 index 0000000000..10ecb3608a --- /dev/null +++ b/ipc-storage/ipld/src/hamt/map.rs @@ -0,0 +1,248 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Display; +use std::marker::PhantomData; + +use cid::Cid; +use fil_actors_runtime::ActorError; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_hamt::{BytesKey, Iter}; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use super::core::{Map, MapKey, DEFAULT_HAMT_CONFIG}; +use crate::Hasher; + +#[derive(Clone, PartialEq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Root +where + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + cid: Cid, + name: String, + #[serde(skip)] + key_type: PhantomData, + #[serde(skip)] + value_type: PhantomData, +} + +impl Root +where + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub fn new(store: BS, name: &str) -> Result { + Hamt::::flush_empty(store, name.to_owned()) + } + + pub fn from_cid(cid: Cid, name: String) -> Self { + Self { + cid, + name, + key_type: Default::default(), + value_type: Default::default(), + } + } + + pub fn hamt<'a, BS: Blockstore>( + &self, + store: BS, + size: u64, + ) -> Result, ActorError> { + Hamt::load(store, &self.cid, self.name.clone(), size) + } + + pub fn cid(&self) -> &Cid { + &self.cid + } + + pub fn name(&self) -> &str { + &self.name + } +} + +pub struct Hamt<'a, BS, K, V> +where + BS: Blockstore, + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + map: Map, + size: u64, + _marker: PhantomData<&'a BS>, +} + +#[derive(Debug, Clone)] +pub struct TrackedFlushResult +where + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + pub root: Root, + pub size: u64, +} + +impl Hamt<'_, BS, K, V> +where + BS: Blockstore, + K: MapKey + Display, + V: DeserializeOwned + Serialize + PartialEq + Clone, +{ + fn load(store: BS, root: &Cid, name: String, size: u64) -> Result { + let map = Map::::load(store, root, DEFAULT_HAMT_CONFIG, name)?; + Ok(Self { + map, + size, + _marker: Default::default(), + }) + } + + pub fn get(&self, key: &K) -> Result, ActorError> { + self.map.get(key).map(|value| value.cloned()) + } + + pub fn set(&mut self, key: &K, value: V) -> Result, ActorError> { + let previous = self.map.set(key, value)?; + if previous.is_none() { + self.size = self.size.saturating_add(1); + } + Ok(previous) + } + + pub fn set_if_absent(&mut self, key: &K, value: V) -> Result { + let was_absent = self.map.set_if_absent(key, value.clone())?; + if was_absent { + self.size = self.size.saturating_add(1); + } + Ok(was_absent) + } + + pub fn set_and_flush(&mut self, key: &K, value: V) -> Result, ActorError> { + self.set(key, value)?; + let cid = self.map.flush()?; + Ok(Root::from_cid(cid, self.map.name())) + } + + pub fn set_and_flush_tracked( + &mut self, + key: &K, + value: V, + ) -> Result, ActorError> { + let root = self.set_and_flush(key, value)?; + Ok(TrackedFlushResult { + root, + size: self.size, + }) + } + + pub fn get_or_err(&self, key: &K) -> Result { + self.get(key)?.ok_or_else(|| { + ActorError::not_found(format!("{} not found in {}", key, self.map.name())) + }) + } + + pub fn get_or_create(&self, key: &K, create_fn: F) -> Result + where + F: FnOnce() -> Result, + { + if let Some(value) = self.map.get(key)? { + Ok(value.clone()) + } else { + Ok(create_fn()?) + } + } + + pub fn contains_key(&self, key: &K) -> Result { + self.map.contains_key(key) + } + + pub fn delete(&mut self, key: &K) -> Result, ActorError> { + let deleted = self.map.delete(key)?; + if deleted.is_some() { + self.size = self.size.saturating_sub(1); + } + Ok(deleted) + } + + pub fn delete_and_flush(&mut self, key: &K) -> Result<(Root, Option), ActorError> { + let deleted = self.delete(key)?; + let cid = self.map.flush()?; + Ok((Root::from_cid(cid, self.map.name()), deleted)) + } + + pub fn delete_and_flush_tracked( + &mut self, + key: &K, + ) -> Result<(TrackedFlushResult, Option), ActorError> { + let (root, deleted) = self.delete_and_flush(key)?; + Ok(( + TrackedFlushResult { + root, + size: self.size, + }, + deleted, + )) + } + + pub fn flush(&mut self) -> Result, ActorError> { + let cid = self.map.flush()?; + Ok(Root::from_cid(cid, self.map.name())) + } + + pub fn flush_empty(store: BS, name: String) -> Result, ActorError> { + let cid = Map::::flush_empty(store, DEFAULT_HAMT_CONFIG)?; + Ok(Root::from_cid(cid, name)) + } + + pub fn flush_tracked(&mut self) -> Result, ActorError> { + let root = self.flush()?; + Ok(TrackedFlushResult { + root, + size: self.size, + }) + } + + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + self.map.for_each(&mut f) + } + + pub fn for_each_ranged( + &self, + starting_key: Option<&BytesKey>, + max: Option, + mut f: F, + ) -> Result<(usize, Option), ActorError> + where + F: FnMut(K, &V) -> Result, + { + self.map.for_each_ranged(starting_key, max, &mut f) + } + + pub fn for_each_until( + &self, + starting_key: Option<&BytesKey>, + ending_key: &BytesKey, + mut f: F, + ) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + self.map.for_each_until(starting_key, ending_key, &mut f) + } + + pub fn iter(&self) -> Iter { + self.map.iter() + } +} diff --git a/ipc-storage/ipld/src/hash_algorithm.rs b/ipc-storage/ipld/src/hash_algorithm.rs new file mode 100644 index 0000000000..a72e58166d --- /dev/null +++ b/ipc-storage/ipld/src/hash_algorithm.rs @@ -0,0 +1,44 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +// use fvm_ipld_hamt::{Hash, HashAlgorithm, HashedKey}; +use fvm_ipld_hamt::{Hash, HashAlgorithm}; +use fvm_sdk as fvm; +use fvm_shared::crypto::hash::SupportedHashes; +use std::hash::Hasher; + +pub type HashedKey = [u8; 32]; + +#[derive(Default)] +struct RuntimeHasherWrapper(pub Vec); + +/// This Hasher impl only intercepts key bytes. Is used only together with FvmHashSha256 below. +impl Hasher for RuntimeHasherWrapper { + fn finish(&self) -> u64 { + // u64 hash not used in hamt + 0 + } + + fn write(&mut self, bytes: &[u8]) { + self.0.extend_from_slice(bytes); + } +} + +#[derive(Default, Debug)] +pub struct FvmHashSha256; + +impl HashAlgorithm for FvmHashSha256 { + fn hash(key: &X) -> HashedKey + where + X: Hash + ?Sized, + { + let mut rval_digest: HashedKey = Default::default(); + let mut hasher = RuntimeHasherWrapper::default(); + key.hash(&mut hasher); + + fvm::crypto::hash_into(SupportedHashes::Sha2_256, &hasher.0, &mut rval_digest); + + rval_digest + } +} diff --git a/ipc-storage/ipld/src/lib.rs b/ipc-storage/ipld/src/lib.rs new file mode 100644 index 0000000000..b6aef499aa --- /dev/null +++ b/ipc-storage/ipld/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +#[cfg(feature = "fil-actor")] +use crate::hash_algorithm::FvmHashSha256; +#[cfg(not(feature = "fil-actor"))] +use fvm_ipld_hamt::Sha256; + +pub mod amt; +pub mod hamt; +mod hash_algorithm; + +#[cfg(feature = "fil-actor")] +type Hasher = FvmHashSha256; + +#[cfg(not(feature = "fil-actor"))] +type Hasher = Sha256; diff --git a/ipc-storage/iroh_manager/Cargo.toml b/ipc-storage/iroh_manager/Cargo.toml new file mode 100644 index 0000000000..623d4ed6ed --- /dev/null +++ b/ipc-storage/iroh_manager/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "iroh_manager" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +anyhow = { workspace = true } +iroh = { workspace = true } +iroh-blobs = { workspace = true } +iroh-quinn = { workspace = true } +iroh-relay = { workspace = true } +n0-future = { workspace = true } +num-traits = { workspace = true } +quic-rpc = { workspace = true, features = ["quinn-transport", "test-utils"] } +tokio = { workspace = true } +tracing = { workspace = true } +url = { workspace = true } + +[dev-dependencies] +tempfile = { workspace = true } +tracing-subscriber = { workspace = true } diff --git a/ipc-storage/iroh_manager/src/lib.rs b/ipc-storage/iroh_manager/src/lib.rs new file mode 100644 index 0000000000..10becf887c --- /dev/null +++ b/ipc-storage/iroh_manager/src/lib.rs @@ -0,0 +1,70 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::{anyhow, Result}; +use iroh_blobs::hashseq::HashSeq; +use iroh_blobs::rpc::client::blobs::BlobStatus; +use iroh_blobs::Hash; +use num_traits::Zero; + +mod manager; +mod node; + +pub use self::manager::{connect as connect_rpc, BlobsRpcClient, IrohManager}; +pub use self::node::IrohNode; +pub use quic_rpc::Connector; + +pub type BlobsClient = iroh_blobs::rpc::client::blobs::Client; + +/// Returns the user blob hash and size from the hash sequence. +/// The user blob hash is the first hash in the sequence. +pub async fn get_blob_hash_and_size( + iroh: &BlobsClient, + seq_hash: Hash, +) -> Result<(Hash, u64), anyhow::Error> { + // Get the hash sequence status (it needs to be available) + let status = iroh.status(seq_hash).await.map_err(|e| { + anyhow!( + "failed to get status for hash sequence object: {} {}", + seq_hash, + e + ) + })?; + let BlobStatus::Complete { size } = status else { + return Err(anyhow!( + "hash sequence object {} is not available", + seq_hash + )); + }; + if size.is_zero() { + return Err(anyhow!("hash sequence object {} has zero size", seq_hash)); + } + + // Read the bytes and create a hash sequence + let res = iroh + .read_to_bytes(seq_hash) + .await + .map_err(|e| anyhow!("failed to read hash sequence object: {} {}", seq_hash, e))?; + let hash_seq = HashSeq::try_from(res) + .map_err(|e| anyhow!("failed to parse hash sequence object: {} {}", seq_hash, e))?; + + // Get the user blob status at index 0 (it needs to be available) + let blob_hash = hash_seq.get(0).ok_or_else(|| { + anyhow!( + "failed to get hash with index 0 from hash sequence object: {}", + seq_hash + ) + })?; + let status = iroh + .status(blob_hash) + .await + .map_err(|e| anyhow!("failed to read object: {} {}", blob_hash, e))?; + + // Finally, get the size from the status + let BlobStatus::Complete { size } = status else { + return Err(anyhow!("object {} is not available", blob_hash)); + }; + + Ok((blob_hash, size)) +} diff --git a/ipc-storage/iroh_manager/src/manager.rs b/ipc-storage/iroh_manager/src/manager.rs new file mode 100644 index 0000000000..af206e3be1 --- /dev/null +++ b/ipc-storage/iroh_manager/src/manager.rs @@ -0,0 +1,140 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::path::Path; + +use anyhow::Result; +use iroh_blobs::rpc::proto::RpcService; +use n0_future::task::AbortOnDropHandle; +use quic_rpc::client::QuinnConnector; +use tracing::info; + +use crate::{BlobsClient, IrohNode}; + +#[derive(Debug)] +pub struct IrohManager { + client: IrohNode, + server_key: Vec, + rpc_addr: SocketAddr, + _rpc_task: AbortOnDropHandle<()>, +} + +impl IrohManager { + pub async fn new( + v4_addr: Option, + v6_addr: Option, + path: impl AsRef, + rpc_addr: Option, + ) -> Result { + let storage_path = path.as_ref().to_path_buf(); + let client = IrohNode::persistent(v4_addr, v6_addr, &storage_path).await?; + + // setup an RPC listener + let rpc_addr = rpc_addr.unwrap_or_else(|| "127.0.0.1:0".parse().unwrap()); + + let (config, server_key) = quic_rpc::transport::quinn::configure_server()?; + let endpoint = iroh_quinn::Endpoint::server(config, rpc_addr)?; + let local_addr = endpoint.local_addr()?; + + info!("Iroh RPC listening on {} ({})", local_addr, rpc_addr); + let rpc_server = quic_rpc::transport::quinn::QuinnListener::new(endpoint)?; + let rpc_server = quic_rpc::RpcServer::::new(rpc_server); + let blobs = client.blobs.clone(); + let rpc_task = rpc_server + .spawn_accept_loop(move |msg, chan| blobs.clone().handle_rpc_request(msg, chan)); + + Ok(Self { + client, + server_key, + rpc_addr: local_addr, + _rpc_task: rpc_task, + }) + } + + /// Retrives a blob client, and starts the node if it has not started yet. + pub fn blobs_client(&self) -> BlobsClient { + self.client.blobs_client().boxed() + } + + /// Returns the key for the RPC client. + pub fn rpc_key(&self) -> &[u8] { + &self.server_key + } + + pub fn rpc_addr(&self) -> SocketAddr { + self.rpc_addr + } +} + +pub type BlobsRpcClient = iroh_blobs::rpc::client::blobs::Client>; + +/// Connect to the given rpc listening on this address, with this key. +pub async fn connect(remote_addr: SocketAddr) -> Result { + info!("iroh RPC connecting to {}", remote_addr); + let bind_addr: SocketAddr = "0.0.0.0:0".parse()?; + let client = quic_rpc::transport::quinn::make_insecure_client_endpoint(bind_addr)?; + let client = QuinnConnector::::new(client, remote_addr, "localhost".to_string()); + let client = quic_rpc::RpcClient::::new(client); + let client = iroh_blobs::rpc::client::blobs::Client::new(client); + Ok(client.boxed()) +} + +#[cfg(test)] +mod tests { + use n0_future::StreamExt; + + use super::*; + + #[tokio::test] + async fn test_append_delete() -> Result<()> { + tracing_subscriber::fmt().init(); + let dir = tempfile::tempdir()?; + + let iroh = IrohManager::new(None, None, dir.path(), None).await?; + + let tags: Vec<_> = (0..10).map(|i| format!("tag-{i}")).collect(); + + for tag in &tags { + iroh.blobs_client() + .add_bytes_named(format!("content-for-{tag}"), tag.as_bytes()) + .await?; + } + + let existing_tags: Vec<_> = iroh + .blobs_client() + .tags() + .list() + .await? + .try_collect() + .await?; + assert_eq!(existing_tags.len(), 10); + + let t = tags.clone(); + let rpc_addr = iroh.rpc_addr(); + let task = tokio::task::spawn(async move { + let client = connect(rpc_addr).await?; + + for tag in t { + client.tags().delete(tag).await?; + } + + anyhow::Ok(()) + }); + + task.await??; + + let existing_tags: Vec<_> = iroh + .blobs_client() + .tags() + .list() + .await? + .try_collect() + .await?; + dbg!(&existing_tags); + assert_eq!(existing_tags.len(), 0); + + Ok(()) + } +} diff --git a/ipc-storage/iroh_manager/src/node.rs b/ipc-storage/iroh_manager/src/node.rs new file mode 100644 index 0000000000..56775a757a --- /dev/null +++ b/ipc-storage/iroh_manager/src/node.rs @@ -0,0 +1,208 @@ +// Copyright 2025 Recall Contributors +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; +use std::path::Path; +use std::time::Duration; + +use anyhow::Result; +use iroh::{ + defaults::DEFAULT_STUN_PORT, protocol::Router, Endpoint, RelayMap, RelayMode, RelayNode, +}; +use iroh_blobs::{ + net_protocol::Blobs, rpc::proto::RpcService, store::GcConfig, util::fs::load_secret_key, +}; +use iroh_relay::RelayQuicConfig; +use quic_rpc::server::{ChannelTypes, RpcChannel, RpcServerError}; +use tracing::info; +use url::Url; + +use crate::BlobsClient; + +/// Wrapper around and iroh `Endpoint` and the functionality +/// to handle blobs. +#[derive(Debug, Clone)] +pub struct IrohNode { + router: Router, + pub(crate) blobs: BlobsWrapper, +} + +#[derive(Debug, Clone)] +pub(crate) enum BlobsWrapper { + Mem { + blobs: Blobs, + client: BlobsClient, + }, + Fs { + blobs: Blobs, + client: BlobsClient, + }, +} + +impl BlobsWrapper { + fn client(&self) -> &BlobsClient { + match self { + BlobsWrapper::Mem { ref client, .. } => client, + BlobsWrapper::Fs { ref client, .. } => client, + } + } + + pub(crate) async fn handle_rpc_request( + self, + msg: iroh_blobs::rpc::proto::Request, + chan: RpcChannel, + ) -> std::result::Result<(), RpcServerError> + where + C: ChannelTypes, + { + match self { + BlobsWrapper::Mem { blobs, .. } => blobs.handle_rpc_request(msg, chan).await, + BlobsWrapper::Fs { blobs, .. } => blobs.handle_rpc_request(msg, chan).await, + } + } +} + +/// GC interval duration. +const GC_DURATION: Duration = Duration::from_secs(300); + +const DEFAULT_PORT_V4: u16 = 11204; +const DEFAULT_PORT_V6: u16 = 11205; + +/// Hostname of the default USE relay. +pub const USE_RELAY_HOSTNAME: &str = "use1-1.relay.recallnet.recall.iroh.link."; +/// Hostname of the default USW relay. +pub const USW_RELAY_HOSTNAME: &str = "usw1-1.relay.recallnet.recall.iroh.link."; +/// Hostname of the default EUC relay. +pub const EUC_RELAY_HOSTNAME: &str = "euc1-1.relay.recallnet.recall.iroh.link."; + +/// Get the default [`RelayMap`]. +pub fn default_relay_map() -> RelayMap { + RelayMap::from_iter([ + default_use_relay_node(), + default_usw_relay_node(), + default_euc_relay_node(), + ]) +} + +/// Get the default [`RelayNode`] for USE. +pub fn default_use_relay_node() -> RelayNode { + let url: Url = format!("https://{USE_RELAY_HOSTNAME}") + .parse() + .expect("default url"); + RelayNode { + url: url.into(), + stun_only: false, + stun_port: DEFAULT_STUN_PORT, + quic: Some(RelayQuicConfig::default()), + } +} + +/// Get the default [`RelayNode`] for USW. +pub fn default_usw_relay_node() -> RelayNode { + let url: Url = format!("https://{USW_RELAY_HOSTNAME}") + .parse() + .expect("default_url"); + RelayNode { + url: url.into(), + stun_only: false, + stun_port: DEFAULT_STUN_PORT, + quic: Some(RelayQuicConfig::default()), + } +} + +/// Get the default [`RelayNode`] for EUC +pub fn default_euc_relay_node() -> RelayNode { + // The default Asia-Pacific relay server run by number0. + let url: Url = format!("https://{EUC_RELAY_HOSTNAME}") + .parse() + .expect("default_url"); + RelayNode { + url: url.into(), + stun_only: false, + stun_port: DEFAULT_STUN_PORT, + quic: Some(RelayQuicConfig::default()), + } +} + +impl IrohNode { + /// Creates a new persistent iroh node in the specified location. + /// + /// If the addrs are set to `None` will bind to the unspecified network addr + /// on port `0`, aka a randomport. + pub async fn persistent( + v4_addr: Option, + v6_addr: Option, + path: impl AsRef, + ) -> Result { + // TODO: enable metrics + + let root = path.as_ref(); + info!("creating persistent iroh node in {}", root.display()); + + let blobs_path = root.join("blobs"); + let secret_key_path = root.join("iroh_key"); + + tokio::fs::create_dir_all(&blobs_path).await?; + let secret_key = load_secret_key(secret_key_path).await?; + + let v4 = + v4_addr.unwrap_or_else(|| SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, DEFAULT_PORT_V4)); + let v6 = v6_addr + .unwrap_or_else(|| SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, DEFAULT_PORT_V6, 0, 0)); + + let endpoint = Endpoint::builder() + .discovery_n0() + .relay_mode(RelayMode::Custom(default_relay_map())) + .secret_key(secret_key) + .bind_addr_v4(v4) + .bind_addr_v6(v6) + .bind() + .await?; + let blobs = Blobs::persistent(path).await?.build(&endpoint); + blobs.start_gc(GcConfig { + period: GC_DURATION, + done_callback: None, + })?; + + let router = Router::builder(endpoint) + .accept(iroh_blobs::ALPN, blobs.clone()) + .spawn(); + + let client = blobs.client().boxed(); + Ok(Self { + router, + blobs: BlobsWrapper::Fs { blobs, client }, + }) + } + + /// Creates a new in memory based iroh node. + pub async fn memory() -> Result { + info!("creating inmemory iroh node"); + let endpoint = Endpoint::builder().discovery_n0().bind().await?; + let blobs = Blobs::memory().build(&endpoint); + blobs.start_gc(GcConfig { + period: GC_DURATION, + done_callback: None, + })?; + + let router = Router::builder(endpoint) + .accept(iroh_blobs::ALPN, blobs.clone()) + .spawn(); + let client = blobs.client().boxed(); + Ok(Self { + router, + blobs: BlobsWrapper::Mem { blobs, client }, + }) + } + + /// Returns the [`Endpoint`] for this node. + pub fn endpoint(&self) -> &Endpoint { + self.router.endpoint() + } + + /// Returns the blobs client, necessary to interact with the blobs API: + pub fn blobs_client(&self) -> &BlobsClient { + self.blobs.client() + } +} diff --git a/ipc-storage/sol-facade/crates/facade/Cargo.lock b/ipc-storage/sol-facade/crates/facade/Cargo.lock new file mode 100644 index 0000000000..bb197ccf1f --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/Cargo.lock @@ -0,0 +1,2089 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloy-json-abi" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24acd2f5ba97c7a320e67217274bc81fe3c3174b8e6144ec875d9d54e760e278" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-primitives" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec878088ec6283ce1e90d280316aadd3d6ce3de06ff63d68953c855e7e447e92" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "foldhash", + "hashbrown", + "indexmap", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +dependencies = [ + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d039d267aa5cbb7732fa6ce1fd9b5e9e29368f580f80ba9d7a8450c794de4b2" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "620ae5eee30ee7216a38027dec34e0585c55099f827f92f50d11e3d2d3a4a954" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.96", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad9f7d057e00f8c5994e4ff4492b76532c51ead39353aa2ed63f8c50c0f4d52e" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.96", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74e60b084fe1aef8acecda2743ff2d93c18ff3eb67a2d3b12f62582a1e66ef5e" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1382302752cd751efd275f4d6ef65877ddf61e0e6f5ac84ef4302b79a33a31a" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", + "serde", +] + +[[package]] +name = "anyhow" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" + +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.1", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "auto_impl" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +dependencies = [ + "serde", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "blake2b_simd" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06e903a20b159e944f91ec8499fe1e55651480c541ea0a584f5d967c49ad9d99" +dependencies = [ + "arrayref", + "arrayvec", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "byte-slice-cast" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +dependencies = [ + "serde", +] + +[[package]] +name = "cbor4ii" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544cf8c89359205f4f990d0e6f3828db42df85b5dac95d09157a250eb0749c4" +dependencies = [ + "serde", +] + +[[package]] +name = "cc" +version = "1.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cid" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" +dependencies = [ + "core2", + "multibase", + "multihash", + "serde", + "serde_bytes", + "unsigned-varint", +] + +[[package]] +name = "const-hex" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "data-encoding" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" + +[[package]] +name = "data-encoding-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b16d9d0d88a5273d830dac8b78ceb217ffc9b1d5404e5597a3542515329405b" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1145d32e826a7748b69ee8fc62d3e6355ff7f1051df53141e7048162fc90481b" +dependencies = [ + "data-encoding", + "syn 2.0.96", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "zeroize", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", + "unicode-xid", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "zeroize", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core", + "subtle", +] + +[[package]] +name = "fixed-hash" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" +dependencies = [ + "byteorder", + "rand", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "fvm_ipld_blockstore" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d064b957420f5ecc137a153baaa6c32e2eb19b674135317200b6f2537eabdbfd" +dependencies = [ + "anyhow", + "cid", + "multihash", +] + +[[package]] +name = "fvm_ipld_encoding" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90608092e31d9a06236268c58f7c36668ab4b2a48afafe3a97e08f094ad7ae50" +dependencies = [ + "anyhow", + "cid", + "fvm_ipld_blockstore", + "multihash", + "serde", + "serde_ipld_dagcbor", + "serde_repr", + "serde_tuple", + "thiserror 1.0.69", +] + +[[package]] +name = "fvm_shared" +version = "4.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d3355d3bd2eb159a734a06d67dbb21b067a99540f5aefaf7d0d26503ccc73e3" +dependencies = [ + "anyhow", + "bitflags", + "blake2b_simd", + "cid", + "data-encoding", + "data-encoding-macro", + "fvm_ipld_encoding", + "lazy_static", + "multihash", + "num-bigint", + "num-derive", + "num-integer", + "num-traits", + "serde", + "serde_tuple", + "thiserror 1.0.69", + "unsigned-varint", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", + "serde", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.7", +] + +[[package]] +name = "impl-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +dependencies = [ + "equivalent", + "hashbrown", + "serde", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" + +[[package]] +name = "k256" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +dependencies = [ + "cfg-if", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "keccak-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.169" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" + +[[package]] +name = "libm" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" +dependencies = [ + "blake2b_simd", + "core2", + "multihash-derive", + "serde", + "serde-big-array", + "unsigned-varint", +] + +[[package]] +name = "multihash-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" +dependencies = [ + "proc-macro-crate 1.1.3", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "parity-scale-codec" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91c2d9a6a6004e205b7e881856fb1a0f5022d382acc2c01b52185f7b6f65997" +dependencies = [ + "arrayvec", + "bitvec", + "byte-slice-cast", + "const_format", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "rustversion", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77555fd9d578b6470470463fded832619a5fec5ad6cbc551fe4d7507ce50cd3a" +dependencies = [ + "proc-macro-crate 3.2.0", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "pest" +version = "2.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +dependencies = [ + "memchr", + "thiserror 2.0.11", + "ucd-trie", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +dependencies = [ + "proc-macro2", + "syn 2.0.96", +] + +[[package]] +name = "primitive-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" +dependencies = [ + "fixed-hash", + "impl-codec", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +dependencies = [ + "thiserror 1.0.69", + "toml", +] + +[[package]] +name = "proc-macro-crate" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "proc-macro2" +version = "1.0.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax", + "rusty-fork", + "tempfile", + "unarray", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", + "serde", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.15", +] + +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + +[[package]] +name = "recall_sol_facade" +version = "0.1.2" +dependencies = [ + "alloy-primitives", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "anyhow", + "dunce", + "eyre", + "fvm_ipld_encoding", + "fvm_shared", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "serde", + "serde_json", + "syn 2.0.96", + "thiserror 2.0.11", + "walkdir", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rlp" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" +dependencies = [ + "bytes", + "rustc-hex", +] + +[[package]] +name = "ruint" +version = "1.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp 0.3.1", + "fastrlp 0.4.0", + "num-bigint", + "num-integer", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" + +[[package]] +name = "rustc-hash" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver 1.0.25", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + +[[package]] +name = "ryu" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" + +[[package]] +name = "semver-parser" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" +dependencies = [ + "pest", +] + +[[package]] +name = "serde" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-big-array" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.217" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "serde_ipld_dagcbor" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e880e0b1f9c7a8db874642c1217f7e19b29e325f24ab9f0fcb11818adec7f01" +dependencies = [ + "cbor4ii", + "cid", + "scopeguard", + "serde", +] + +[[package]] +name = "serde_json" +version = "1.0.138" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "serde_tuple" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f025b91216f15a2a32aa39669329a475733590a015835d1783549a56d09427" +dependencies = [ + "serde", + "serde_tuple_macros", +] + +[[package]] +name = "serde_tuple_macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4076151d1a2b688e25aaf236997933c66e18b870d0369f8b248b8ab2be630d7e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3-asm" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" +dependencies = [ + "cc", + "cfg-if", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.96" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn-solidity" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b84e4d83a0a6704561302b917a932484e1cae2d8c6354c64be8b7bac1c1fe057" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unicode-xid", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" +dependencies = [ + "cfg-if", + "fastrand", + "getrandom 0.3.1", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +dependencies = [ + "thiserror-impl 2.0.11", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" + +[[package]] +name = "toml_edit" +version = "0.22.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "uint" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + +[[package]] +name = "unicode-ident" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.6.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] diff --git a/ipc-storage/sol-facade/crates/facade/Cargo.toml b/ipc-storage/sol-facade/crates/facade/Cargo.toml new file mode 100644 index 0000000000..0deca09c1c --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "ipc_storage_sol_facade" +description = "Rust bindings for the Solidity Facades" +edition = "2021" +license = "MIT OR Apache-2.0" +version = "0.1.2" + +[dependencies] +anyhow = "1.0.95" +alloy-primitives = { version = "~0.8.19", features = ["std"] } +alloy-sol-types = { version = "~0.8.19", features = ["std"] } +# Upgraded to FVM 4.7 for IPC main branch compatibility +fvm_ipld_encoding = { workspace = true } +fvm_shared = { workspace = true } + +[build-dependencies] +alloy-primitives = { version = "0.8.19" } +alloy-sol-macro-expander = { version = "0.8.19", features = ["json"] } +alloy-sol-macro-input = { version = "0.8.19", features = ["json"] } +alloy-sol-types = { version = "0.8.19", features = ["json"] } +dunce = "1.0.5" +eyre = "0.6.12" +prettyplease = "0.2.29" +proc-macro2 = "1.0.93" +quote = "1.0.38" +regex = "1.11.1" +syn = "2.0.96" +serde = "1.0.217" +serde_json = "1.0.138" +thiserror = "2.0.11" +walkdir = "2.5.0" + +[features] +blob-reader = [] +blobs = [] +bucket = [] +config = [] +credit = [] +gas = [] +machine = [] +timehub = [] diff --git a/ipc-storage/sol-facade/crates/facade/build.rs b/ipc-storage/sol-facade/crates/facade/build.rs new file mode 100644 index 0000000000..d2bbf4b5b4 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/build.rs @@ -0,0 +1,171 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Adapted from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/forge/bin/cmd/bind.rs +//! +//! This build script generates Rust bindings for Solidity contracts using Forge. +//! +//! Ideally, this script would programmatically execute `forge install` and `forge build` +//! to avoid committing generated artifacts (the bindings) to version control. +//! This is the standard practice for build outputs. +//! +//! Currently, downstream crates can use the pre-generated bindings directly. +//! However, this requires developers to manually run `make rust-bindings` (which performs the +//! Forge build and bind) whenever the Solidity facades change and then commit the resulting +//! changes to version control. +//! +//! While convenient for downstream users, this approach is suboptimal. +//! A future improvement would be to implement programmatic `forge install` and `forge build` +//! within this script, eliminating the manual steps and the need to commit build +//! artifacts. +//! This would ensure that downstream crates always use up-to-date bindings without relying on +//! potentially outdated committed versions and would streamline the development workflow. +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use std::path::{Path, PathBuf}; + +use alloy_primitives::map::HashSet; +use eyre::Result; +use forge::{fs::json_files, MultiSolMacroGen, SolMacroGen}; +use regex::Regex; + +mod forge; + +const FACADES: &[&str] = &[ + "BlobReader", + "Blobs", + "Bucket", + "Config", + "Credit", + "Gas", + "Machine", + "Timehub", +]; + +fn main() { + if std::env::var("BUILD_BINDINGS").unwrap_or("0".to_string()) == "0" { + return; + } + + let cargo_dir = env!("CARGO_MANIFEST_DIR"); + let artifacts_dir = PathBuf::from(format!("{}/../../out", cargo_dir)); + + for facade in FACADES { + let out_dir = PathBuf::from(format!( + "{}/src/{}_facade", + cargo_dir, + facade.to_lowercase() + )); + let select = Regex::new(format!("I{}Facade", facade).as_str()).unwrap(); + let binder = ForgeBinder { + artifacts: artifacts_dir.clone(), + out: out_dir, + select: vec![select], + }; + binder + .run() + .unwrap_or_else(|_| panic!("failed to generate {} bindings", facade)); + } +} + +#[derive(Clone, Debug)] +pub struct ForgeBinder { + pub artifacts: PathBuf, + pub out: PathBuf, + pub select: Vec, +} + +impl ForgeBinder { + pub fn run(self) -> Result<()> { + self.generate_bindings(&self.artifacts, &self.out)?; + Ok(()) + } + + fn get_filter(&self) -> Result { + Ok(Filter::Select(self.select.clone())) + } + + /// Returns an iterator over the JSON files and the contract name in the `artifacts` directory. + fn get_json_files(&self, artifacts: &Path) -> Result> { + let filter = self.get_filter()?; + Ok(json_files(artifacts) + .filter_map(|path| { + // Ignore the build info JSON. + if path.to_str()?.contains("build-info") { + return None; + } + + // We don't want `.metadata.json` files. + let stem = path.file_stem()?.to_str()?; + if stem.ends_with(".metadata") { + return None; + } + + let name = stem.split('.').next().unwrap(); + + // Best effort identifier cleanup. + let name = name.replace(char::is_whitespace, "").replace('-', "_"); + + Some((name, path)) + }) + .filter(move |(name, _path)| filter.is_match(name))) + } + + fn get_solmacrogen(&self, artifacts: &Path) -> Result { + let mut dup = HashSet::::default(); + let instances = self + .get_json_files(artifacts)? + .filter_map(|(name, path)| { + if dup.insert(name.clone()) { + Some(SolMacroGen::new(path, name)) + } else { + None + } + }) + .collect::>(); + + let multi = MultiSolMacroGen::new(instances); + eyre::ensure!(!multi.instances.is_empty(), "No contract artifacts found"); + Ok(multi) + } + + /// Generate the bindings + fn generate_bindings(&self, artifacts: &Path, bindings_root: &Path) -> Result<()> { + let mut solmacrogen = self.get_solmacrogen(artifacts)?; + solmacrogen.write_to_module(bindings_root, false) + } +} + +pub enum Filter { + All, + Select(Vec), + Skip(Vec), +} + +impl Filter { + pub fn is_match(&self, name: &str) -> bool { + match self { + Self::All => true, + Self::Select(regexes) => regexes.iter().any(|regex| regex.is_match(name)), + Self::Skip(regexes) => !regexes.iter().any(|regex| regex.is_match(name)), + } + } + + pub fn skip_default() -> Self { + let skip = [ + ".*Test.*", + ".*Script", + "console[2]?", + "CommonBase", + "Components", + "[Ss]td(Chains|Math|Error|Json|Utils|Cheats|Style|Invariant|Assertions|Toml|Storage(Safe)?)", + "[Vv]m.*", + "IMulticall3", + ] + .iter() + .map(|pattern| Regex::new(pattern).unwrap()) + .collect::>(); + + Self::Skip(skip) + } +} diff --git a/ipc-storage/sol-facade/crates/facade/forge/forge_sol_macro_gen/mod.rs b/ipc-storage/sol-facade/crates/facade/forge/forge_sol_macro_gen/mod.rs new file mode 100644 index 0000000000..39defff2e3 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/forge_sol_macro_gen/mod.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +mod sol_macro_gen; +pub use sol_macro_gen::*; diff --git a/ipc-storage/sol-facade/crates/facade/forge/forge_sol_macro_gen/sol_macro_gen.rs b/ipc-storage/sol-facade/crates/facade/forge/forge_sol_macro_gen/sol_macro_gen.rs new file mode 100644 index 0000000000..53813dff04 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/forge_sol_macro_gen/sol_macro_gen.rs @@ -0,0 +1,156 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Partially copied from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/sol-macro-gen/src/sol_macro_gen.rs +//! +//! SolMacroGen and MultiSolMacroGen +//! +//! This type encapsulates the logic for expansion of a Rust TokenStream from Solidity tokens. It +//! uses the `expand` method from `alloy_sol_macro_expander` underneath. +//! +//! It holds info such as `path` to the ABI file, `name` of the file and the rust binding being +//! generated, and lastly the `expansion` itself, i.e the Rust binding for the provided ABI. +//! +//! It contains methods to read the json abi, generate rust bindings from the abi and ultimately +//! write the bindings to a crate or modules. +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use alloy_sol_macro_expander::expand::expand; +use alloy_sol_macro_input::{SolInput, SolInputKind}; +use eyre::{Context, Result}; +use proc_macro2::{Span, TokenStream}; +use std::{ + fmt::Write, + path::{Path, PathBuf}, +}; + +use crate::forge::fs; + +pub struct SolMacroGen { + pub path: PathBuf, + pub name: String, + pub expansion: Option, +} + +impl SolMacroGen { + pub fn new(path: PathBuf, name: String) -> Self { + Self { + path, + name, + expansion: None, + } + } + + pub fn get_sol_input(&self) -> Result { + let path = self.path.to_string_lossy().into_owned(); + let name = proc_macro2::Ident::new(&self.name, Span::call_site()); + let tokens = quote::quote! { + #name, + #path + }; + + let sol_input: SolInput = syn::parse2(tokens).wrap_err("failed to parse input")?; + + Ok(sol_input) + } +} + +pub struct MultiSolMacroGen { + pub instances: Vec, +} + +impl MultiSolMacroGen { + pub fn new(instances: Vec) -> Self { + Self { instances } + } + + pub fn generate_bindings(&mut self) -> Result<()> { + for instance in &mut self.instances { + Self::generate_binding(instance).wrap_err_with(|| { + format!( + "failed to generate bindings for {}:{}", + instance.path.display(), + instance.name + ) + })?; + } + + Ok(()) + } + + fn generate_binding(instance: &mut SolMacroGen) -> Result<()> { + let input = instance.get_sol_input()?.normalize_json()?; + + let SolInput { + attrs: _, + path: _, + kind, + } = input; + + let tokens = match kind { + SolInputKind::Sol(mut file) => { + let sol_attr: syn::Attribute = syn::parse_quote! { + #[sol()] + }; + file.attrs.push(sol_attr); + expand(file).wrap_err("failed to expand")? + } + _ => unreachable!(), + }; + + instance.expansion = Some(tokens); + Ok(()) + } + + pub fn write_to_module(&mut self, bindings_path: &Path, single_file: bool) -> Result<()> { + self.generate_bindings()?; + + let _ = fs::create_dir_all(bindings_path); + + let mut mod_contents = r#"#![allow(unused_imports, clippy::all, rustdoc::all)] + //! This module contains the sol! generated bindings for solidity contracts. + //! This is autogenerated code. + //! Do not manually edit these files. + //! These files may be overwritten by the codegen system at any time. + "# + .to_string(); + + for instance in &self.instances { + let name = instance.name.to_lowercase(); + if !single_file { + // Module + write_mod_name(&mut mod_contents, &name)?; + let mut contents = String::new(); + + write!(contents, "{}", instance.expansion.as_ref().unwrap())?; + let file = syn::parse_file(&contents)?; + + let contents = prettyplease::unparse(&file); + fs::write(bindings_path.join(format!("{name}.rs")), contents) + .wrap_err("Failed to write file")?; + } else { + // Single File + let mut contents = String::new(); + write!(contents, "{}\n\n", instance.expansion.as_ref().unwrap())?; + write!(mod_contents, "{contents}")?; + } + } + + let mod_path = bindings_path.join("mod.rs"); + let mod_file = syn::parse_file(&mod_contents)?; + let mod_contents = prettyplease::unparse(&mod_file); + + fs::write(mod_path, mod_contents).wrap_err("Failed to write mod.rs")?; + + Ok(()) + } +} + +fn write_mod_name(contents: &mut String, name: &str) -> Result<()> { + if syn::parse_str::(&format!("pub mod {name};")).is_ok() { + write!(contents, "pub mod {name};")?; + } else { + write!(contents, "pub mod r#{name};")?; + } + Ok(()) +} diff --git a/ipc-storage/sol-facade/crates/facade/forge/foundry_common/errors/fs.rs b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/errors/fs.rs new file mode 100644 index 0000000000..cb434f2b6a --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/errors/fs.rs @@ -0,0 +1,176 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Copied from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/common/src/errors/fs.rs +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use std::{ + io, + path::{Path, PathBuf}, +}; + +#[allow(unused_imports)] +use std::fs::{self, File}; + +/// Various error variants for `fs` operations that serve as an addition to the io::Error which +/// does not provide any information about the path. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum FsPathError { + /// Provides additional path context for [`fs::write`]. + #[error("failed to write to {path:?}: {source}")] + Write { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::read`]. + #[error("failed to read from {path:?}: {source}")] + Read { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::copy`]. + #[error("failed to copy from {from:?} to {to:?}: {source}")] + Copy { + source: io::Error, + from: PathBuf, + to: PathBuf, + }, + /// Provides additional path context for [`fs::read_link`]. + #[error("failed to read from {path:?}: {source}")] + ReadLink { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`File::create`]. + #[error("failed to create file {path:?}: {source}")] + CreateFile { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::remove_file`]. + #[error("failed to remove file {path:?}: {source}")] + RemoveFile { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::create_dir`]. + #[error("failed to create dir {path:?}: {source}")] + CreateDir { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`fs::remove_dir`]. + #[error("failed to remove dir {path:?}: {source}")] + RemoveDir { source: io::Error, path: PathBuf }, + /// Provides additional path context for [`File::open`]. + #[error("failed to open file {path:?}: {source}")] + Open { source: io::Error, path: PathBuf }, + /// Provides additional path context for the file whose contents should be parsed as JSON. + #[error("failed to parse json file: {path:?}: {source}")] + ReadJson { + source: serde_json::Error, + path: PathBuf, + }, + /// Provides additional path context for the new JSON file. + #[error("failed to write to json file: {path:?}: {source}")] + WriteJson { + source: serde_json::Error, + path: PathBuf, + }, +} + +impl FsPathError { + /// Returns the complementary error variant for [`fs::write`]. + pub fn write(source: io::Error, path: impl Into) -> Self { + Self::Write { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::read`]. + pub fn read(source: io::Error, path: impl Into) -> Self { + Self::Read { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::copy`]. + pub fn copy(source: io::Error, from: impl Into, to: impl Into) -> Self { + Self::Copy { + source, + from: from.into(), + to: to.into(), + } + } + + /// Returns the complementary error variant for [`fs::read_link`]. + pub fn read_link(source: io::Error, path: impl Into) -> Self { + Self::ReadLink { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`File::create`]. + pub fn create_file(source: io::Error, path: impl Into) -> Self { + Self::CreateFile { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::remove_file`]. + pub fn remove_file(source: io::Error, path: impl Into) -> Self { + Self::RemoveFile { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::create_dir`]. + pub fn create_dir(source: io::Error, path: impl Into) -> Self { + Self::CreateDir { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`fs::remove_dir`]. + pub fn remove_dir(source: io::Error, path: impl Into) -> Self { + Self::RemoveDir { + source, + path: path.into(), + } + } + + /// Returns the complementary error variant for [`File::open`]. + pub fn open(source: io::Error, path: impl Into) -> Self { + Self::Open { + source, + path: path.into(), + } + } +} + +impl AsRef for FsPathError { + fn as_ref(&self) -> &Path { + match self { + Self::Write { path, .. } + | Self::Read { path, .. } + | Self::ReadLink { path, .. } + | Self::Copy { from: path, .. } + | Self::CreateDir { path, .. } + | Self::RemoveDir { path, .. } + | Self::CreateFile { path, .. } + | Self::RemoveFile { path, .. } + | Self::Open { path, .. } + | Self::ReadJson { path, .. } + | Self::WriteJson { path, .. } => path, + } + } +} + +impl From for io::Error { + fn from(value: FsPathError) -> Self { + match value { + FsPathError::Write { source, .. } + | FsPathError::Read { source, .. } + | FsPathError::ReadLink { source, .. } + | FsPathError::Copy { source, .. } + | FsPathError::CreateDir { source, .. } + | FsPathError::RemoveDir { source, .. } + | FsPathError::CreateFile { source, .. } + | FsPathError::RemoveFile { source, .. } + | FsPathError::Open { source, .. } => source, + + FsPathError::ReadJson { source, .. } | FsPathError::WriteJson { source, .. } => { + source.into() + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/forge/foundry_common/errors/mod.rs b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/errors/mod.rs new file mode 100644 index 0000000000..aa3fef5027 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/errors/mod.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +mod fs; +pub use fs::FsPathError; diff --git a/ipc-storage/sol-facade/crates/facade/forge/foundry_common/fs.rs b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/fs.rs new file mode 100644 index 0000000000..34b87edc9d --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/fs.rs @@ -0,0 +1,192 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Copied from https://github.com/foundry-rs/foundry/blob/60f0b692acae47a4933bb4a0bc4a29cab8831ba1/crates/common/src/fs.rs +//! +//! Contains various `std::fs` wrapper functions that also contain the target path in their errors. +//! +//! SPDX-License-Identifier: Apache-2.0, MIT + +use crate::forge::errors::FsPathError; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fs::{self, File}, + io::{BufWriter, Write}, + path::{Component, Path, PathBuf}, +}; + +/// The [`fs`](self) result type. +pub type Result = std::result::Result; + +/// Wrapper for [`File::create`]. +pub fn create_file(path: impl AsRef) -> Result { + let path = path.as_ref(); + File::create(path).map_err(|err| FsPathError::create_file(err, path)) +} + +/// Wrapper for [`std::fs::remove_file`]. +pub fn remove_file(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_file(path).map_err(|err| FsPathError::remove_file(err, path)) +} + +/// Wrapper for [`std::fs::read`]. +pub fn read(path: impl AsRef) -> Result> { + let path = path.as_ref(); + fs::read(path).map_err(|err| FsPathError::read(err, path)) +} + +/// Wrapper for [`std::fs::read_link`]. +pub fn read_link(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_link(path).map_err(|err| FsPathError::read_link(err, path)) +} + +/// Wrapper for [`std::fs::read_to_string`]. +pub fn read_to_string(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::read_to_string(path).map_err(|err| FsPathError::read(err, path)) +} + +/// Reads the JSON file and deserialize it into the provided type. +pub fn read_json_file(path: &Path) -> Result { + // read the file into a byte array first + // https://github.com/serde-rs/json/issues/160 + let s = read_to_string(path)?; + serde_json::from_str(&s).map_err(|source| FsPathError::ReadJson { + source, + path: path.into(), + }) +} + +/// Writes the object as a JSON object. +pub fn write_json_file(path: &Path, obj: &T) -> Result<()> { + let file = create_file(path)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer(&mut writer, obj).map_err(|source| FsPathError::WriteJson { + source, + path: path.into(), + })?; + writer.flush().map_err(|e| FsPathError::write(e, path)) +} + +/// Writes the object as a pretty JSON object. +pub fn write_pretty_json_file(path: &Path, obj: &T) -> Result<()> { + let file = create_file(path)?; + let mut writer = BufWriter::new(file); + serde_json::to_writer_pretty(&mut writer, obj).map_err(|source| FsPathError::WriteJson { + source, + path: path.into(), + })?; + writer.flush().map_err(|e| FsPathError::write(e, path)) +} + +/// Wrapper for `std::fs::write` +pub fn write(path: impl AsRef, contents: impl AsRef<[u8]>) -> Result<()> { + let path = path.as_ref(); + fs::write(path, contents).map_err(|err| FsPathError::write(err, path)) +} + +/// Wrapper for `std::fs::copy` +pub fn copy(from: impl AsRef, to: impl AsRef) -> Result { + let from = from.as_ref(); + let to = to.as_ref(); + fs::copy(from, to).map_err(|err| FsPathError::copy(err, from, to)) +} + +/// Wrapper for `std::fs::create_dir` +pub fn create_dir(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::create_dir(path).map_err(|err| FsPathError::create_dir(err, path)) +} + +/// Wrapper for `std::fs::create_dir_all` +pub fn create_dir_all(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::create_dir_all(path).map_err(|err| FsPathError::create_dir(err, path)) +} + +/// Wrapper for `std::fs::remove_dir` +pub fn remove_dir(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_dir(path).map_err(|err| FsPathError::remove_dir(err, path)) +} + +/// Wrapper for `std::fs::remove_dir_all` +pub fn remove_dir_all(path: impl AsRef) -> Result<()> { + let path = path.as_ref(); + fs::remove_dir_all(path).map_err(|err| FsPathError::remove_dir(err, path)) +} + +/// Wrapper for `std::fs::File::open` +pub fn open(path: impl AsRef) -> Result { + let path = path.as_ref(); + fs::File::open(path).map_err(|err| FsPathError::open(err, path)) +} + +/// Normalize a path, removing things like `.` and `..`. +/// +/// NOTE: This does not return symlinks and does not touch the filesystem at all (unlike +/// [`std::fs::canonicalize`]) +/// +/// ref: +pub fn normalize_path(path: &Path) -> PathBuf { + let mut components = path.components().peekable(); + let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() { + components.next(); + PathBuf::from(c.as_os_str()) + } else { + PathBuf::new() + }; + + for component in components { + match component { + Component::Prefix(..) => unreachable!(), + Component::RootDir => { + ret.push(component.as_os_str()); + } + Component::CurDir => {} + Component::ParentDir => { + ret.pop(); + } + Component::Normal(c) => { + ret.push(c); + } + } + } + ret +} + +/// Returns an iterator over all files with the given extension under the `root` dir. +pub fn files_with_ext<'a>(root: &Path, ext: &'a str) -> impl Iterator + 'a { + walkdir::WalkDir::new(root) + .sort_by_file_name() + .into_iter() + .filter_map(walkdir::Result::ok) + .filter(|e| e.file_type().is_file() && e.path().extension() == Some(ext.as_ref())) + .map(walkdir::DirEntry::into_path) +} + +/// Returns an iterator over all JSON files under the `root` dir. +pub fn json_files(root: &Path) -> impl Iterator { + files_with_ext(root, "json") +} + +/// Canonicalize a path, returning an error if the path does not exist. +/// +/// Mainly useful to apply canonicalization to paths obtained from project files but still error +/// properly instead of flattening the errors. +pub fn canonicalize_path(path: impl AsRef) -> std::io::Result { + dunce::canonicalize(path) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_normalize_path() { + let p = Path::new("/a/../file.txt"); + let normalized = normalize_path(p); + assert_eq!(normalized, PathBuf::from("/file.txt")); + } +} diff --git a/ipc-storage/sol-facade/crates/facade/forge/foundry_common/mod.rs b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/mod.rs new file mode 100644 index 0000000000..08aea1435c --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/foundry_common/mod.rs @@ -0,0 +1,4 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +pub mod errors; +pub mod fs; diff --git a/ipc-storage/sol-facade/crates/facade/forge/mod.rs b/ipc-storage/sol-facade/crates/facade/forge/mod.rs new file mode 100644 index 0000000000..cff335bc05 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/forge/mod.rs @@ -0,0 +1,9 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(dead_code)] + +mod forge_sol_macro_gen; +mod foundry_common; + +pub use forge_sol_macro_gen::*; +pub use foundry_common::*; diff --git a/ipc-storage/sol-facade/crates/facade/src/blobreader_facade/iblobreaderfacade.rs b/ipc-storage/sol-facade/crates/facade/src/blobreader_facade/iblobreaderfacade.rs new file mode 100644 index 0000000000..cbe34681bc --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/blobreader_facade/iblobreaderfacade.rs @@ -0,0 +1,556 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface IBlobReaderFacade { + event ReadRequestClosed(bytes32 id); + event ReadRequestOpened(bytes32 id, bytes32 blobHash, uint256 readOffset, uint256 readLength, address callbackAddress, uint256 callbackMethod); + event ReadRequestPending(bytes32 id); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "event", + "name": "ReadRequestClosed", + "inputs": [ + { + "name": "id", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ReadRequestOpened", + "inputs": [ + { + "name": "id", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "blobHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "readOffset", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "readLength", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "callbackAddress", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "callbackMethod", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ReadRequestPending", + "inputs": [ + { + "name": "id", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IBlobReaderFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `ReadRequestClosed(bytes32)` and selector `0x9a8c63a9b921adb4983af5ca5dd1649500a411a34894cb1c0f9fab740b6f75ed`. + ```solidity + event ReadRequestClosed(bytes32 id); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ReadRequestClosed { + #[allow(missing_docs)] + pub id: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ReadRequestClosed { + type DataTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ReadRequestClosed(bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 154u8, 140u8, 99u8, 169u8, 185u8, 33u8, 173u8, 180u8, 152u8, 58u8, 245u8, + 202u8, 93u8, 209u8, 100u8, 149u8, 0u8, 164u8, 17u8, 163u8, 72u8, 148u8, 203u8, + 28u8, 15u8, 159u8, 171u8, 116u8, 11u8, 111u8, 117u8, 237u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { id: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.id), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ReadRequestClosed { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ReadRequestClosed> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ReadRequestClosed) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ReadRequestOpened(bytes32,bytes32,uint256,uint256,address,uint256)` and selector `0xd540be3f3450d40e6b169d0adac00a1e18cba05ee46950b4de6383b76c780f59`. + ```solidity + event ReadRequestOpened(bytes32 id, bytes32 blobHash, uint256 readOffset, uint256 readLength, address callbackAddress, uint256 callbackMethod); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ReadRequestOpened { + #[allow(missing_docs)] + pub id: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub readOffset: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub readLength: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub callbackAddress: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub callbackMethod: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ReadRequestOpened { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = + "ReadRequestOpened(bytes32,bytes32,uint256,uint256,address,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 213u8, 64u8, 190u8, 63u8, 52u8, 80u8, 212u8, 14u8, 107u8, 22u8, 157u8, 10u8, + 218u8, 192u8, 10u8, 30u8, 24u8, 203u8, 160u8, 94u8, 228u8, 105u8, 80u8, 180u8, + 222u8, 99u8, 131u8, 183u8, 108u8, 120u8, 15u8, 89u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + id: data.0, + blobHash: data.1, + readOffset: data.2, + readLength: data.3, + callbackAddress: data.4, + callbackMethod: data.5, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.id), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.readOffset), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.readLength), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.callbackAddress, + ), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.callbackMethod), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ReadRequestOpened { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ReadRequestOpened> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ReadRequestOpened) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ReadRequestPending(bytes32)` and selector `0x6b9c9f2ecba3015efc370b4e57621c55d8c1f17805015860f0b337a0288512e4`. + ```solidity + event ReadRequestPending(bytes32 id); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ReadRequestPending { + #[allow(missing_docs)] + pub id: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ReadRequestPending { + type DataTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ReadRequestPending(bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 107u8, 156u8, 159u8, 46u8, 203u8, 163u8, 1u8, 94u8, 252u8, 55u8, 11u8, 78u8, + 87u8, 98u8, 28u8, 85u8, 216u8, 193u8, 241u8, 120u8, 5u8, 1u8, 88u8, 96u8, + 240u8, 179u8, 55u8, 160u8, 40u8, 133u8, 18u8, 228u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { id: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.id), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ReadRequestPending { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ReadRequestPending> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ReadRequestPending) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + ///Container for all the [`IBlobReaderFacade`](self) events. + pub enum IBlobReaderFacadeEvents { + #[allow(missing_docs)] + ReadRequestClosed(ReadRequestClosed), + #[allow(missing_docs)] + ReadRequestOpened(ReadRequestOpened), + #[allow(missing_docs)] + ReadRequestPending(ReadRequestPending), + } + #[automatically_derived] + impl IBlobReaderFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 107u8, 156u8, 159u8, 46u8, 203u8, 163u8, 1u8, 94u8, 252u8, 55u8, 11u8, 78u8, 87u8, + 98u8, 28u8, 85u8, 216u8, 193u8, 241u8, 120u8, 5u8, 1u8, 88u8, 96u8, 240u8, 179u8, + 55u8, 160u8, 40u8, 133u8, 18u8, 228u8, + ], + [ + 154u8, 140u8, 99u8, 169u8, 185u8, 33u8, 173u8, 180u8, 152u8, 58u8, 245u8, 202u8, + 93u8, 209u8, 100u8, 149u8, 0u8, 164u8, 17u8, 163u8, 72u8, 148u8, 203u8, 28u8, 15u8, + 159u8, 171u8, 116u8, 11u8, 111u8, 117u8, 237u8, + ], + [ + 213u8, 64u8, 190u8, 63u8, 52u8, 80u8, 212u8, 14u8, 107u8, 22u8, 157u8, 10u8, 218u8, + 192u8, 10u8, 30u8, 24u8, 203u8, 160u8, 94u8, 228u8, 105u8, 80u8, 180u8, 222u8, + 99u8, 131u8, 183u8, 108u8, 120u8, 15u8, 89u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IBlobReaderFacadeEvents { + const NAME: &'static str = "IBlobReaderFacadeEvents"; + const COUNT: usize = 3usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ReadRequestClosed) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ReadRequestOpened) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ReadRequestPending) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IBlobReaderFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::ReadRequestClosed(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ReadRequestOpened(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ReadRequestPending(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::ReadRequestClosed(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ReadRequestOpened(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ReadRequestPending(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/blobreader_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/blobreader_facade/mod.rs new file mode 100644 index 0000000000..fd93434a4f --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/blobreader_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#iblobreaderfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/blobs_facade/iblobsfacade.rs b/ipc-storage/sol-facade/crates/facade/src/blobs_facade/iblobsfacade.rs new file mode 100644 index 0000000000..259e3e5029 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/blobs_facade/iblobsfacade.rs @@ -0,0 +1,3417 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface IBlobsFacade { + type BlobStatus is uint8; + struct Blob { + uint64 size; + bytes32 metadataHash; + Subscription[] subscriptions; + BlobStatus status; + } + struct SubnetStats { + uint256 balance; + uint64 capacityFree; + uint64 capacityUsed; + uint256 creditSold; + uint256 creditCommitted; + uint256 creditDebited; + uint256 tokenCreditRate; + uint64 numAccounts; + uint64 numBlobs; + uint64 numAdded; + uint64 bytesAdded; + uint64 numResolving; + uint64 bytesResolving; + } + struct Subscription { + string subscriptionId; + uint64 expiry; + } + struct TrimBlobExpiries { + uint32 processed; + bytes32 nextKey; + } + + event BlobAdded(address indexed subscriber, bytes32 hash, uint256 size, uint256 expiry, uint256 bytesUsed); + event BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased); + event BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved); + event BlobPending(address indexed subscriber, bytes32 hash, bytes32 sourceId); + + function addBlob(address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + function deleteBlob(address subscriber, bytes32 blobHash, string memory subscriptionId) external; + function getBlob(bytes32 blobHash) external view returns (Blob memory blob); + function getStats() external view returns (SubnetStats memory stats); + function overwriteBlob(bytes32 oldHash, address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + function trimBlobExpiries(address subscriber, bytes32 startingHash, uint32 limit) external returns (TrimBlobExpiries memory); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "addBlob", + "inputs": [ + { + "name": "sponsor", + "type": "address", + "internalType": "address" + }, + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "metadataHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "deleteBlob", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "internalType": "address" + }, + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getBlob", + "inputs": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + } + ], + "outputs": [ + { + "name": "blob", + "type": "tuple", + "internalType": "struct Blob", + "components": [ + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadataHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptions", + "type": "tuple[]", + "internalType": "struct Subscription[]", + "components": [ + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + } + ] + }, + { + "name": "status", + "type": "uint8", + "internalType": "enum BlobStatus" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getStats", + "inputs": [], + "outputs": [ + { + "name": "stats", + "type": "tuple", + "internalType": "struct SubnetStats", + "components": [ + { + "name": "balance", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "capacityFree", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "capacityUsed", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditSold", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditCommitted", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditDebited", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "tokenCreditRate", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "numAccounts", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "numBlobs", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "numAdded", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "bytesAdded", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "numResolving", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "bytesResolving", + "type": "uint64", + "internalType": "uint64" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "overwriteBlob", + "inputs": [ + { + "name": "oldHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "sponsor", + "type": "address", + "internalType": "address" + }, + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "metadataHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "subscriptionId", + "type": "string", + "internalType": "string" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "trimBlobExpiries", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "internalType": "address" + }, + { + "name": "startingHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "limit", + "type": "uint32", + "internalType": "uint32" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct TrimBlobExpiries", + "components": [ + { + "name": "processed", + "type": "uint32", + "internalType": "uint32" + }, + { + "name": "nextKey", + "type": "bytes32", + "internalType": "bytes32" + } + ] + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "BlobAdded", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "bytesUsed", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BlobDeleted", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "bytesReleased", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BlobFinalized", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "resolved", + "type": "bool", + "indexed": false, + "internalType": "bool" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "BlobPending", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "hash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "sourceId", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IBlobsFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct BlobStatus(u8); + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'_> + { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::tokenize(self).0 + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::abi_encoded_size( + self, + ) + } + } + #[automatically_derived] + impl BlobStatus { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for BlobStatus { + type RustType = u8; + type Token<'a> = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::type_check( + token, + ) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::detokenize( + token, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for BlobStatus { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::EventTopic>::encode_topic( + rust, + ) + } + } + }; + /**```solidity + struct Blob { uint64 size; bytes32 metadataHash; Subscription[] subscriptions; BlobStatus status; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Blob { + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub metadataHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptions: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub status: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Array, + BlobStatus, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + u64, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::Vec<::RustType>, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Blob) -> Self { + ( + value.size, + value.metadataHash, + value.subscriptions, + value.status, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Blob { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + size: tuple.0, + metadataHash: tuple.1, + subscriptions: tuple.2, + status: tuple.3, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Blob { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Blob { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.metadataHash), + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::SolType>::tokenize(&self.subscriptions), + ::tokenize(&self.status), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Blob { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Blob { + const NAME: &'static str = "Blob"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Blob(uint64 size,bytes32 metadataHash,Subscription[] subscriptions,uint8 status)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components + .extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.size) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadataHash) + .0, + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::SolType>::eip712_data_word(&self.subscriptions) + .0, + ::eip712_data_word( + &self.status, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Blob { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.size) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadataHash, + ) + + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.subscriptions, + ) + + ::topic_preimage_length( + &rust.status, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.size, + out, + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadataHash, + out, + ); + <::alloy_sol_types::sol_data::Array< + Subscription, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.subscriptions, + out, + ); + ::encode_topic_preimage( + &rust.status, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct SubnetStats { uint256 balance; uint64 capacityFree; uint64 capacityUsed; uint256 creditSold; uint256 creditCommitted; uint256 creditDebited; uint256 tokenCreditRate; uint64 numAccounts; uint64 numBlobs; uint64 numAdded; uint64 bytesAdded; uint64 numResolving; uint64 bytesResolving; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct SubnetStats { + #[allow(missing_docs)] + pub balance: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub capacityFree: u64, + #[allow(missing_docs)] + pub capacityUsed: u64, + #[allow(missing_docs)] + pub creditSold: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditCommitted: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditDebited: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub tokenCreditRate: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub numAccounts: u64, + #[allow(missing_docs)] + pub numBlobs: u64, + #[allow(missing_docs)] + pub numAdded: u64, + #[allow(missing_docs)] + pub bytesAdded: u64, + #[allow(missing_docs)] + pub numResolving: u64, + #[allow(missing_docs)] + pub bytesResolving: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + u64, + u64, + u64, + u64, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: SubnetStats) -> Self { + ( + value.balance, + value.capacityFree, + value.capacityUsed, + value.creditSold, + value.creditCommitted, + value.creditDebited, + value.tokenCreditRate, + value.numAccounts, + value.numBlobs, + value.numAdded, + value.bytesAdded, + value.numResolving, + value.bytesResolving, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for SubnetStats { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + balance: tuple.0, + capacityFree: tuple.1, + capacityUsed: tuple.2, + creditSold: tuple.3, + creditCommitted: tuple.4, + creditDebited: tuple.5, + tokenCreditRate: tuple.6, + numAccounts: tuple.7, + numBlobs: tuple.8, + numAdded: tuple.9, + bytesAdded: tuple.10, + numResolving: tuple.11, + bytesResolving: tuple.12, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for SubnetStats { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for SubnetStats { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.balance, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.capacityFree, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.capacityUsed, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditSold, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditCommitted, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditDebited, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.tokenCreditRate, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numAccounts, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numBlobs, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numAdded, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.bytesAdded, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.numResolving, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.bytesResolving, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for SubnetStats { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for SubnetStats { + const NAME: &'static str = "SubnetStats"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "SubnetStats(uint256 balance,uint64 capacityFree,uint64 capacityUsed,uint256 creditSold,uint256 creditCommitted,uint256 creditDebited,uint256 tokenCreditRate,uint64 numAccounts,uint64 numBlobs,uint64 numAdded,uint64 bytesAdded,uint64 numResolving,uint64 bytesResolving)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.balance) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.capacityFree) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.capacityUsed) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditSold) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.creditCommitted, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditDebited) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.tokenCreditRate, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numAccounts) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numBlobs) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numAdded) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.bytesAdded) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.numResolving) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.bytesResolving, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for SubnetStats { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.balance, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.capacityFree, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.capacityUsed, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditSold, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditCommitted, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditDebited, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.tokenCreditRate, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numAccounts, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numBlobs, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numAdded, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.bytesAdded, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.numResolving, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.bytesResolving, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.balance, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.capacityFree, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.capacityUsed, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditSold, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditCommitted, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditDebited, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.tokenCreditRate, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numAccounts, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numBlobs, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numAdded, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.bytesAdded, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.numResolving, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.bytesResolving, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Subscription { string subscriptionId; uint64 expiry; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Subscription { + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub expiry: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String, u64); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Subscription) -> Self { + (value.subscriptionId, value.expiry) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Subscription { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriptionId: tuple.0, + expiry: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Subscription { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Subscription { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.expiry, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Subscription { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Subscription { + const NAME: &'static str = "Subscription"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Subscription(string subscriptionId,uint64 expiry)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.subscriptionId, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Subscription { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.subscriptionId, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.subscriptionId, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct TrimBlobExpiries { uint32 processed; bytes32 nextKey; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct TrimBlobExpiries { + #[allow(missing_docs)] + pub processed: u32, + #[allow(missing_docs)] + pub nextKey: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u32, ::alloy_sol_types::private::FixedBytes<32>); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: TrimBlobExpiries) -> Self { + (value.processed, value.nextKey) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for TrimBlobExpiries { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + processed: tuple.0, + nextKey: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for TrimBlobExpiries { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for TrimBlobExpiries { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.processed), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.nextKey), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for TrimBlobExpiries { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for TrimBlobExpiries { + const NAME: &'static str = "TrimBlobExpiries"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "TrimBlobExpiries(uint32 processed,bytes32 nextKey)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.processed) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.nextKey) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for TrimBlobExpiries { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.processed, + ) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.nextKey, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.processed, + out, + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.nextKey, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `BlobAdded(address,bytes32,uint256,uint256,uint256)` and selector `0xd42c7814518f1b7f5919557d327e88cddb7b02fc91085b402e94083243a06a8d`. + ```solidity + event BlobAdded(address indexed subscriber, bytes32 hash, uint256 size, uint256 expiry, uint256 bytesUsed); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobAdded { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub expiry: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub bytesUsed: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobAdded { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobAdded(address,bytes32,uint256,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 212u8, 44u8, 120u8, 20u8, 81u8, 143u8, 27u8, 127u8, 89u8, 25u8, 85u8, 125u8, + 50u8, 126u8, 136u8, 205u8, 219u8, 123u8, 2u8, 252u8, 145u8, 8u8, 91u8, 64u8, + 46u8, 148u8, 8u8, 50u8, 67u8, 160u8, 106u8, 141u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + size: data.1, + expiry: data.2, + bytesUsed: data.3, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.expiry), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.bytesUsed), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobAdded { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobAdded> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobAdded) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `BlobDeleted(address,bytes32,uint256,uint256)` and selector `0x2e6567b73082b547dc70b1e1697dc20d2c21c44915c3af4efd6ce7cc9905a1ce`. + ```solidity + event BlobDeleted(address indexed subscriber, bytes32 hash, uint256 size, uint256 bytesReleased); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobDeleted { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub bytesReleased: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobDeleted { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobDeleted(address,bytes32,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 46u8, 101u8, 103u8, 183u8, 48u8, 130u8, 181u8, 71u8, 220u8, 112u8, 177u8, + 225u8, 105u8, 125u8, 194u8, 13u8, 44u8, 33u8, 196u8, 73u8, 21u8, 195u8, 175u8, + 78u8, 253u8, 108u8, 231u8, 204u8, 153u8, 5u8, 161u8, 206u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + size: data.1, + bytesReleased: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.bytesReleased), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobDeleted { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobDeleted> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobDeleted) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `BlobFinalized(address,bytes32,bool)` and selector `0x74accb1da870635a4e757ed45bf2f8016f9b08bfb46a9f6183bb74b2a362c280`. + ```solidity + event BlobFinalized(address indexed subscriber, bytes32 hash, bool resolved); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobFinalized { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub resolved: bool, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobFinalized { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Bool, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobFinalized(address,bytes32,bool)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 116u8, 172u8, 203u8, 29u8, 168u8, 112u8, 99u8, 90u8, 78u8, 117u8, 126u8, 212u8, + 91u8, 242u8, 248u8, 1u8, 111u8, 155u8, 8u8, 191u8, 180u8, 106u8, 159u8, 97u8, + 131u8, 187u8, 116u8, 178u8, 163u8, 98u8, 194u8, 128u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + resolved: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::Bool as alloy_sol_types::SolType>::tokenize( + &self.resolved, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobFinalized { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobFinalized> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobFinalized) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `BlobPending(address,bytes32,bytes32)` and selector `0x57e4769774fa6b36c8faf32c5b177a5c15d70775d3729a530b8ec17009f31122`. + ```solidity + event BlobPending(address indexed subscriber, bytes32 hash, bytes32 sourceId); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct BlobPending { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub sourceId: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for BlobPending { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "BlobPending(address,bytes32,bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 87u8, 228u8, 118u8, 151u8, 116u8, 250u8, 107u8, 54u8, 200u8, 250u8, 243u8, + 44u8, 91u8, 23u8, 122u8, 92u8, 21u8, 215u8, 7u8, 117u8, 211u8, 114u8, 154u8, + 83u8, 11u8, 142u8, 193u8, 112u8, 9u8, 243u8, 17u8, 34u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + subscriber: topics.1, + hash: data.0, + sourceId: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.sourceId), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.subscriber.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.subscriber, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for BlobPending { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&BlobPending> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &BlobPending) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `addBlob(address,bytes32,bytes32,bytes32,string,uint64,uint64)` and selector `0x5b5cc14f`. + ```solidity + function addBlob(address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addBlobCall { + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub metadataHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub ttl: u64, + } + ///Container type for the return parameters of the [`addBlob(address,bytes32,bytes32,bytes32,string,uint64,uint64)`](addBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addBlobReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + u64, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addBlobCall) -> Self { + ( + value.sponsor, + value.source, + value.blobHash, + value.metadataHash, + value.subscriptionId, + value.size, + value.ttl, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + sponsor: tuple.0, + source: tuple.1, + blobHash: tuple.2, + metadataHash: tuple.3, + subscriptionId: tuple.4, + size: tuple.5, + ttl: tuple.6, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addBlobReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for addBlobCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = addBlobReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "addBlob(address,bytes32,bytes32,bytes32,string,uint64,uint64)"; + const SELECTOR: [u8; 4] = [91u8, 92u8, 193u8, 79u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.metadataHash), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `deleteBlob(address,bytes32,string)` and selector `0xbea9016a`. + ```solidity + function deleteBlob(address subscriber, bytes32 blobHash, string memory subscriptionId) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteBlobCall { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`deleteBlob(address,bytes32,string)`](deleteBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteBlobReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteBlobCall) -> Self { + (value.subscriber, value.blobHash, value.subscriptionId) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriber: tuple.0, + blobHash: tuple.1, + subscriptionId: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteBlobReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for deleteBlobCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = deleteBlobReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "deleteBlob(address,bytes32,string)"; + const SELECTOR: [u8; 4] = [190u8, 169u8, 1u8, 106u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.subscriber, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getBlob(bytes32)` and selector `0x8a4d1ad4`. + ```solidity + function getBlob(bytes32 blobHash) external view returns (Blob memory blob); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getBlobCall { + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + } + ///Container type for the return parameters of the [`getBlob(bytes32)`](getBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getBlobReturn { + #[allow(missing_docs)] + pub blob: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::FixedBytes<32>,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getBlobCall) -> Self { + (value.blobHash,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { blobHash: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Blob,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getBlobReturn) -> Self { + (value.blob,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { blob: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getBlobCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::FixedBytes<32>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getBlobReturn; + type ReturnTuple<'a> = (Blob,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getBlob(bytes32)"; + const SELECTOR: [u8; 4] = [138u8, 77u8, 26u8, 212u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getStats()` and selector `0xc59d4847`. + ```solidity + function getStats() external view returns (SubnetStats memory stats); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getStatsCall {} + ///Container type for the return parameters of the [`getStats()`](getStatsCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getStatsReturn { + #[allow(missing_docs)] + pub stats: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getStatsCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getStatsCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (SubnetStats,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getStatsReturn) -> Self { + (value.stats,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getStatsReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { stats: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getStatsCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getStatsReturn; + type ReturnTuple<'a> = (SubnetStats,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getStats()"; + const SELECTOR: [u8; 4] = [197u8, 157u8, 72u8, 71u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `overwriteBlob(bytes32,address,bytes32,bytes32,bytes32,string,uint64,uint64)` and selector `0x434fc5a4`. + ```solidity + function overwriteBlob(bytes32 oldHash, address sponsor, bytes32 source, bytes32 blobHash, bytes32 metadataHash, string memory subscriptionId, uint64 size, uint64 ttl) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct overwriteBlobCall { + #[allow(missing_docs)] + pub oldHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub metadataHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub subscriptionId: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub ttl: u64, + } + ///Container type for the return parameters of the [`overwriteBlob(bytes32,address,bytes32,bytes32,bytes32,string,uint64,uint64)`](overwriteBlobCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct overwriteBlobReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + u64, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: overwriteBlobCall) -> Self { + ( + value.oldHash, + value.sponsor, + value.source, + value.blobHash, + value.metadataHash, + value.subscriptionId, + value.size, + value.ttl, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for overwriteBlobCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + oldHash: tuple.0, + sponsor: tuple.1, + source: tuple.2, + blobHash: tuple.3, + metadataHash: tuple.4, + subscriptionId: tuple.5, + size: tuple.6, + ttl: tuple.7, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: overwriteBlobReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for overwriteBlobReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for overwriteBlobCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = overwriteBlobReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "overwriteBlob(bytes32,address,bytes32,bytes32,bytes32,string,uint64,uint64)"; + const SELECTOR: [u8; 4] = [67u8, 79u8, 197u8, 164u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.oldHash), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.metadataHash), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.subscriptionId, + ), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `trimBlobExpiries(address,bytes32,uint32)` and selector `0x78f8af85`. + ```solidity + function trimBlobExpiries(address subscriber, bytes32 startingHash, uint32 limit) external returns (TrimBlobExpiries memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct trimBlobExpiriesCall { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub startingHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub limit: u32, + } + ///Container type for the return parameters of the [`trimBlobExpiries(address,bytes32,uint32)`](trimBlobExpiriesCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct trimBlobExpiriesReturn { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<32>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::FixedBytes<32>, + u32, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: trimBlobExpiriesCall) -> Self { + (value.subscriber, value.startingHash, value.limit) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for trimBlobExpiriesCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriber: tuple.0, + startingHash: tuple.1, + limit: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (TrimBlobExpiries,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = + (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: trimBlobExpiriesReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for trimBlobExpiriesReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for trimBlobExpiriesCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<32>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = trimBlobExpiriesReturn; + type ReturnTuple<'a> = (TrimBlobExpiries,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "trimBlobExpiries(address,bytes32,uint32)"; + const SELECTOR: [u8; 4] = [120u8, 248u8, 175u8, 133u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.subscriber, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.startingHash), + <::alloy_sol_types::sol_data::Uint< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.limit), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`IBlobsFacade`](self) function calls. + pub enum IBlobsFacadeCalls { + #[allow(missing_docs)] + addBlob(addBlobCall), + #[allow(missing_docs)] + deleteBlob(deleteBlobCall), + #[allow(missing_docs)] + getBlob(getBlobCall), + #[allow(missing_docs)] + getStats(getStatsCall), + #[allow(missing_docs)] + overwriteBlob(overwriteBlobCall), + #[allow(missing_docs)] + trimBlobExpiries(trimBlobExpiriesCall), + } + #[automatically_derived] + impl IBlobsFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [67u8, 79u8, 197u8, 164u8], + [91u8, 92u8, 193u8, 79u8], + [120u8, 248u8, 175u8, 133u8], + [138u8, 77u8, 26u8, 212u8], + [190u8, 169u8, 1u8, 106u8], + [197u8, 157u8, 72u8, 71u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IBlobsFacadeCalls { + const NAME: &'static str = "IBlobsFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 6usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::addBlob(_) => ::SELECTOR, + Self::deleteBlob(_) => ::SELECTOR, + Self::getBlob(_) => ::SELECTOR, + Self::getStats(_) => ::SELECTOR, + Self::overwriteBlob(_) => ::SELECTOR, + Self::trimBlobExpiries(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn overwriteBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBlobsFacadeCalls::overwriteBlob) + } + overwriteBlob + }, + { + fn addBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::addBlob) + } + addBlob + }, + { + fn trimBlobExpiries( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBlobsFacadeCalls::trimBlobExpiries) + } + trimBlobExpiries + }, + { + fn getBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::getBlob) + } + getBlob + }, + { + fn deleteBlob( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::deleteBlob) + } + deleteBlob + }, + { + fn getStats( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBlobsFacadeCalls::getStats) + } + getStats + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::addBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::deleteBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::getBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::getStats(inner) => { + ::abi_encoded_size(inner) + } + Self::overwriteBlob(inner) => { + ::abi_encoded_size(inner) + } + Self::trimBlobExpiries(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::addBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::deleteBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getStats(inner) => { + ::abi_encode_raw(inner, out) + } + Self::overwriteBlob(inner) => { + ::abi_encode_raw(inner, out) + } + Self::trimBlobExpiries(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`IBlobsFacade`](self) events. + pub enum IBlobsFacadeEvents { + #[allow(missing_docs)] + BlobAdded(BlobAdded), + #[allow(missing_docs)] + BlobDeleted(BlobDeleted), + #[allow(missing_docs)] + BlobFinalized(BlobFinalized), + #[allow(missing_docs)] + BlobPending(BlobPending), + } + #[automatically_derived] + impl IBlobsFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 46u8, 101u8, 103u8, 183u8, 48u8, 130u8, 181u8, 71u8, 220u8, 112u8, 177u8, 225u8, + 105u8, 125u8, 194u8, 13u8, 44u8, 33u8, 196u8, 73u8, 21u8, 195u8, 175u8, 78u8, + 253u8, 108u8, 231u8, 204u8, 153u8, 5u8, 161u8, 206u8, + ], + [ + 87u8, 228u8, 118u8, 151u8, 116u8, 250u8, 107u8, 54u8, 200u8, 250u8, 243u8, 44u8, + 91u8, 23u8, 122u8, 92u8, 21u8, 215u8, 7u8, 117u8, 211u8, 114u8, 154u8, 83u8, 11u8, + 142u8, 193u8, 112u8, 9u8, 243u8, 17u8, 34u8, + ], + [ + 116u8, 172u8, 203u8, 29u8, 168u8, 112u8, 99u8, 90u8, 78u8, 117u8, 126u8, 212u8, + 91u8, 242u8, 248u8, 1u8, 111u8, 155u8, 8u8, 191u8, 180u8, 106u8, 159u8, 97u8, + 131u8, 187u8, 116u8, 178u8, 163u8, 98u8, 194u8, 128u8, + ], + [ + 212u8, 44u8, 120u8, 20u8, 81u8, 143u8, 27u8, 127u8, 89u8, 25u8, 85u8, 125u8, 50u8, + 126u8, 136u8, 205u8, 219u8, 123u8, 2u8, 252u8, 145u8, 8u8, 91u8, 64u8, 46u8, 148u8, + 8u8, 50u8, 67u8, 160u8, 106u8, 141u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IBlobsFacadeEvents { + const NAME: &'static str = "IBlobsFacadeEvents"; + const COUNT: usize = 4usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data, validate) + .map(Self::BlobAdded) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::BlobDeleted) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::BlobFinalized) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::BlobPending) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IBlobsFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::BlobAdded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), + Self::BlobDeleted(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::BlobFinalized(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::BlobPending(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::BlobAdded(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::BlobDeleted(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::BlobFinalized(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::BlobPending(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/blobs_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/blobs_facade/mod.rs new file mode 100644 index 0000000000..9cff741d07 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/blobs_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#iblobsfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/bucket_facade/ibucketfacade.rs b/ipc-storage/sol-facade/crates/facade/src/bucket_facade/ibucketfacade.rs new file mode 100644 index 0000000000..1105c7f0ee --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/bucket_facade/ibucketfacade.rs @@ -0,0 +1,4018 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface IBucketFacade { + struct KeyValue { + string key; + string value; + } + struct Object { + string key; + ObjectState state; + } + struct ObjectState { + bytes32 blobHash; + uint64 size; + uint64 expiry; + KeyValue[] metadata; + } + struct ObjectValue { + bytes32 blobHash; + bytes32 recoveryHash; + uint64 size; + uint64 expiry; + KeyValue[] metadata; + } + struct Query { + Object[] objects; + string[] commonPrefixes; + string nextKey; + } + + event ObjectAdded(bytes key, bytes32 blobHash, bytes metadata); + event ObjectDeleted(bytes key, bytes32 blobHash); + event ObjectMetadataUpdated(bytes key, bytes metadata); + + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size) external; + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size, uint64 ttl, KeyValue[] memory metadata, bool overwrite) external; + function deleteObject(string memory key) external; + function getObject(string memory key) external view returns (ObjectValue memory); + function queryObjects(string memory prefix, string memory delimiter, string memory startKey, uint64 limit) external view returns (Query memory); + function queryObjects(string memory prefix, string memory delimiter, string memory startKey) external view returns (Query memory); + function queryObjects(string memory prefix) external view returns (Query memory); + function queryObjects() external view returns (Query memory); + function queryObjects(string memory prefix, string memory delimiter) external view returns (Query memory); + function updateObjectMetadata(string memory key, KeyValue[] memory metadata) external; +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "addObject", + "inputs": [ + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "hash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "recoveryHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "addObject", + "inputs": [ + { + "name": "source", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "hash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "recoveryHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + }, + { + "name": "overwrite", + "type": "bool", + "internalType": "bool" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "deleteObject", + "inputs": [ + { + "name": "key", + "type": "string", + "internalType": "string" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "getObject", + "inputs": [ + { + "name": "key", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct ObjectValue", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "recoveryHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + }, + { + "name": "delimiter", + "type": "string", + "internalType": "string" + }, + { + "name": "startKey", + "type": "string", + "internalType": "string" + }, + { + "name": "limit", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + }, + { + "name": "delimiter", + "type": "string", + "internalType": "string" + }, + { + "name": "startKey", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "queryObjects", + "inputs": [ + { + "name": "prefix", + "type": "string", + "internalType": "string" + }, + { + "name": "delimiter", + "type": "string", + "internalType": "string" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple", + "internalType": "struct Query", + "components": [ + { + "name": "objects", + "type": "tuple[]", + "internalType": "struct Object[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "state", + "type": "tuple", + "internalType": "struct ObjectState", + "components": [ + { + "name": "blobHash", + "type": "bytes32", + "internalType": "bytes32" + }, + { + "name": "size", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ] + }, + { + "name": "commonPrefixes", + "type": "string[]", + "internalType": "string[]" + }, + { + "name": "nextKey", + "type": "string", + "internalType": "string" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "updateObjectMetadata", + "inputs": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "ObjectAdded", + "inputs": [ + { + "name": "key", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + }, + { + "name": "blobHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + }, + { + "name": "metadata", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ObjectDeleted", + "inputs": [ + { + "name": "key", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + }, + { + "name": "blobHash", + "type": "bytes32", + "indexed": false, + "internalType": "bytes32" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ObjectMetadataUpdated", + "inputs": [ + { + "name": "key", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + }, + { + "name": "metadata", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IBucketFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**```solidity + struct KeyValue { string key; string value; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct KeyValue { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub value: ::alloy_sol_types::private::String, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: KeyValue) -> Self { + (value.key, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for KeyValue { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + value: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for KeyValue { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for KeyValue { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.value, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for KeyValue { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for KeyValue { + const NAME: &'static str = "KeyValue"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("KeyValue(string key,string value)") + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.key, + ) + .0, + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.value, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for KeyValue { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.key, + ) + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.value, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.key, + out, + ); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.value, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Object { string key; ObjectState state; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Object { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub state: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String, ObjectState); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Object) -> Self { + (value.key, value.state) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Object { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + state: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Object { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Object { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + ::tokenize(&self.state), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Object { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Object { + const NAME: &'static str = "Object"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("Object(string key,ObjectState state)") + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.key, + ) + .0, + ::eip712_data_word( + &self.state, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Object { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.key, + ) + + ::topic_preimage_length( + &rust.state, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.key, + out, + ); + ::encode_topic_preimage( + &rust.state, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct ObjectState { bytes32 blobHash; uint64 size; uint64 expiry; KeyValue[] metadata; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct ObjectState { + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub expiry: u64, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + u64, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: ObjectState) -> Self { + (value.blobHash, value.size, value.expiry, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for ObjectState { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + blobHash: tuple.0, + size: tuple.1, + expiry: tuple.2, + metadata: tuple.3, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for ObjectState { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for ObjectState { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.expiry), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for ObjectState { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for ObjectState { + const NAME: &'static str = "ObjectState"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "ObjectState(bytes32 blobHash,uint64 size,uint64 expiry,KeyValue[] metadata)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.blobHash) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.size) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadata) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for ObjectState { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.blobHash, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.size) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadata, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.blobHash, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.size, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadata, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct ObjectValue { bytes32 blobHash; bytes32 recoveryHash; uint64 size; uint64 expiry; KeyValue[] metadata; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct ObjectValue { + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub recoveryHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub expiry: u64, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + u64, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: ObjectValue) -> Self { + ( + value.blobHash, + value.recoveryHash, + value.size, + value.expiry, + value.metadata, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for ObjectValue { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + blobHash: tuple.0, + recoveryHash: tuple.1, + size: tuple.2, + expiry: tuple.3, + metadata: tuple.4, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for ObjectValue { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for ObjectValue { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.recoveryHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.expiry), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for ObjectValue { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for ObjectValue { + const NAME: &'static str = "ObjectValue"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "ObjectValue(bytes32 blobHash,bytes32 recoveryHash,uint64 size,uint64 expiry,KeyValue[] metadata)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.blobHash) + .0, + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::eip712_data_word(&self.recoveryHash) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.size) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadata) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for ObjectValue { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.blobHash, + ) + + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.recoveryHash, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length(&rust.size) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadata, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.blobHash, + out, + ); + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.recoveryHash, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.size, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadata, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Query { Object[] objects; string[] commonPrefixes; string nextKey; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Query { + #[allow(missing_docs)] + pub objects: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub commonPrefixes: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::String>, + #[allow(missing_docs)] + pub nextKey: ::alloy_sol_types::private::String, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::String>, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec<::RustType>, + ::alloy_sol_types::private::Vec<::alloy_sol_types::private::String>, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Query) -> Self { + (value.objects, value.commonPrefixes, value.nextKey) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Query { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + objects: tuple.0, + commonPrefixes: tuple.1, + nextKey: tuple.2, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Query { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Query { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::SolType>::tokenize(&self.objects), + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::SolType>::tokenize(&self.commonPrefixes), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.nextKey, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Query { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Query { + const NAME: &'static str = "Query"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Query(Object[] objects,string[] commonPrefixes,string nextKey)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::SolType>::eip712_data_word(&self.objects) + .0, + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.commonPrefixes, + ) + .0, + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.nextKey, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Query { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.objects, + ) + + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.commonPrefixes, + ) + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.nextKey, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Array< + Object, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.objects, + out, + ); + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::String, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.commonPrefixes, + out, + ); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.nextKey, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `ObjectAdded(bytes,bytes32,bytes)` and selector `0x3cf4a57a6c61242c0926d9fc09a382dba36a6e92628c777f1244c459b809793c`. + ```solidity + event ObjectAdded(bytes key, bytes32 blobHash, bytes metadata); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ObjectAdded { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub metadata: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ObjectAdded { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ObjectAdded(bytes,bytes32,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 60u8, 244u8, 165u8, 122u8, 108u8, 97u8, 36u8, 44u8, 9u8, 38u8, 217u8, 252u8, + 9u8, 163u8, 130u8, 219u8, 163u8, 106u8, 110u8, 146u8, 98u8, 140u8, 119u8, + 127u8, 18u8, 68u8, 196u8, 89u8, 184u8, 9u8, 121u8, 60u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + key: data.0, + blobHash: data.1, + metadata: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.metadata, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ObjectAdded { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ObjectAdded> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ObjectAdded) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ObjectDeleted(bytes,bytes32)` and selector `0x712864228f369cc20045ca173aab7455af58fa9f6dba07491092c93d2cf7fb06`. + ```solidity + event ObjectDeleted(bytes key, bytes32 blobHash); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ObjectDeleted { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub blobHash: ::alloy_sol_types::private::FixedBytes<32>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ObjectDeleted { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ObjectDeleted(bytes,bytes32)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 113u8, 40u8, 100u8, 34u8, 143u8, 54u8, 156u8, 194u8, 0u8, 69u8, 202u8, 23u8, + 58u8, 171u8, 116u8, 85u8, 175u8, 88u8, 250u8, 159u8, 109u8, 186u8, 7u8, 73u8, + 16u8, 146u8, 201u8, 61u8, 44u8, 247u8, 251u8, 6u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + key: data.0, + blobHash: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.blobHash), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ObjectDeleted { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ObjectDeleted> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ObjectDeleted) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ObjectMetadataUpdated(bytes,bytes)` and selector `0xa53f68921d8ba6356e423077a756ff2a282ae6de5d4ecc617da09b01ead5d640`. + ```solidity + event ObjectMetadataUpdated(bytes key, bytes metadata); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ObjectMetadataUpdated { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub metadata: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ObjectMetadataUpdated { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ObjectMetadataUpdated(bytes,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 165u8, 63u8, 104u8, 146u8, 29u8, 139u8, 166u8, 53u8, 110u8, 66u8, 48u8, 119u8, + 167u8, 86u8, 255u8, 42u8, 40u8, 42u8, 230u8, 222u8, 93u8, 78u8, 204u8, 97u8, + 125u8, 160u8, 155u8, 1u8, 234u8, 213u8, 214u8, 64u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + key: data.0, + metadata: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.metadata, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ObjectMetadataUpdated { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ObjectMetadataUpdated> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ObjectMetadataUpdated) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `addObject(bytes32,string,bytes32,bytes32,uint64)` and selector `0x2d6f2550`. + ```solidity + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_0Call { + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub recoveryHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + } + ///Container type for the return parameters of the [`addObject(bytes32,string,bytes32,bytes32,uint64)`](addObject_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_0Call) -> Self { + ( + value.source, + value.key, + value.hash, + value.recoveryHash, + value.size, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + source: tuple.0, + key: tuple.1, + hash: tuple.2, + recoveryHash: tuple.3, + size: tuple.4, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for addObject_0Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = addObject_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "addObject(bytes32,string,bytes32,bytes32,uint64)"; + const SELECTOR: [u8; 4] = [45u8, 111u8, 37u8, 80u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.recoveryHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `addObject(bytes32,string,bytes32,bytes32,uint64,uint64,(string,string)[],bool)` and selector `0x774343fe`. + ```solidity + function addObject(bytes32 source, string memory key, bytes32 hash, bytes32 recoveryHash, uint64 size, uint64 ttl, KeyValue[] memory metadata, bool overwrite) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_1Call { + #[allow(missing_docs)] + pub source: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub hash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub recoveryHash: ::alloy_sol_types::private::FixedBytes<32>, + #[allow(missing_docs)] + pub size: u64, + #[allow(missing_docs)] + pub ttl: u64, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub overwrite: bool, + } + ///Container type for the return parameters of the [`addObject(bytes32,string,bytes32,bytes32,uint64,uint64,(string,string)[],bool)`](addObject_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct addObject_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Bool, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::FixedBytes<32>, + ::alloy_sol_types::private::FixedBytes<32>, + u64, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + bool, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_1Call) -> Self { + ( + value.source, + value.key, + value.hash, + value.recoveryHash, + value.size, + value.ttl, + value.metadata, + value.overwrite, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + source: tuple.0, + key: tuple.1, + hash: tuple.2, + recoveryHash: tuple.3, + size: tuple.4, + ttl: tuple.5, + metadata: tuple.6, + overwrite: tuple.7, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: addObject_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for addObject_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for addObject_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Bool, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = addObject_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "addObject(bytes32,string,bytes32,bytes32,uint64,uint64,(string,string)[],bool)"; + const SELECTOR: [u8; 4] = [119u8, 67u8, 67u8, 254u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.source), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.hash), + <::alloy_sol_types::sol_data::FixedBytes< + 32, + > as alloy_sol_types::SolType>::tokenize(&self.recoveryHash), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.size), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + <::alloy_sol_types::sol_data::Bool as alloy_sol_types::SolType>::tokenize( + &self.overwrite, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `deleteObject(string)` and selector `0x2d7cb600`. + ```solidity + function deleteObject(string memory key) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteObjectCall { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`deleteObject(string)`](deleteObjectCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct deleteObjectReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteObjectCall) -> Self { + (value.key,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteObjectCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { key: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: deleteObjectReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for deleteObjectReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for deleteObjectCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::String,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = deleteObjectReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "deleteObject(string)"; + const SELECTOR: [u8; 4] = [45u8, 124u8, 182u8, 0u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getObject(string)` and selector `0x0153ea91`. + ```solidity + function getObject(string memory key) external view returns (ObjectValue memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getObjectCall { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`getObject(string)`](getObjectCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getObjectReturn { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getObjectCall) -> Self { + (value.key,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getObjectCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { key: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (ObjectValue,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getObjectReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getObjectReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getObjectCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::String,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getObjectReturn; + type ReturnTuple<'a> = (ObjectValue,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getObject(string)"; + const SELECTOR: [u8; 4] = [1u8, 83u8, 234u8, 145u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string,string,string,uint64)` and selector `0x17d352c0`. + ```solidity + function queryObjects(string memory prefix, string memory delimiter, string memory startKey, uint64 limit) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_0Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub delimiter: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub startKey: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub limit: u64, + } + ///Container type for the return parameters of the [`queryObjects(string,string,string,uint64)`](queryObjects_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_0Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_0Call) -> Self { + (value.prefix, value.delimiter, value.startKey, value.limit) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + prefix: tuple.0, + delimiter: tuple.1, + startKey: tuple.2, + limit: tuple.3, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_0Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_0Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string,string,string,uint64)"; + const SELECTOR: [u8; 4] = [23u8, 211u8, 82u8, 192u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.delimiter, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.startKey, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.limit, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string,string,string)` and selector `0x4c53eab5`. + ```solidity + function queryObjects(string memory prefix, string memory delimiter, string memory startKey) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_1Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub delimiter: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub startKey: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`queryObjects(string,string,string)`](queryObjects_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_1Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_1Call) -> Self { + (value.prefix, value.delimiter, value.startKey) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + prefix: tuple.0, + delimiter: tuple.1, + startKey: tuple.2, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_1Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string,string,string)"; + const SELECTOR: [u8; 4] = [76u8, 83u8, 234u8, 181u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.delimiter, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.startKey, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string)` and selector `0x6294e9a3`. + ```solidity + function queryObjects(string memory prefix) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_2Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`queryObjects(string)`](queryObjects_2Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_2Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::String,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::String,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_2Call) -> Self { + (value.prefix,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_2Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { prefix: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_2Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_2Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_2Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::String,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_2Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string)"; + const SELECTOR: [u8; 4] = [98u8, 148u8, 233u8, 163u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects()` and selector `0xa443a83f`. + ```solidity + function queryObjects() external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_3Call {} + ///Container type for the return parameters of the [`queryObjects()`](queryObjects_3Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_3Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_3Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_3Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_3Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_3Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_3Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_3Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects()"; + const SELECTOR: [u8; 4] = [164u8, 67u8, 168u8, 63u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `queryObjects(string,string)` and selector `0xc9aeef81`. + ```solidity + function queryObjects(string memory prefix, string memory delimiter) external view returns (Query memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_4Call { + #[allow(missing_docs)] + pub prefix: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub delimiter: ::alloy_sol_types::private::String, + } + ///Container type for the return parameters of the [`queryObjects(string,string)`](queryObjects_4Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct queryObjects_4Return { + #[allow(missing_docs)] + pub _0: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_4Call) -> Self { + (value.prefix, value.delimiter) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_4Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + prefix: tuple.0, + delimiter: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Query,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: queryObjects_4Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for queryObjects_4Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for queryObjects_4Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = queryObjects_4Return; + type ReturnTuple<'a> = (Query,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "queryObjects(string,string)"; + const SELECTOR: [u8; 4] = [201u8, 174u8, 239u8, 129u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.prefix, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.delimiter, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `updateObjectMetadata(string,(string,string)[])` and selector `0x6f0a4ff4`. + ```solidity + function updateObjectMetadata(string memory key, KeyValue[] memory metadata) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct updateObjectMetadataCall { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + ///Container type for the return parameters of the [`updateObjectMetadata(string,(string,string)[])`](updateObjectMetadataCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct updateObjectMetadataReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: updateObjectMetadataCall) -> Self { + (value.key, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for updateObjectMetadataCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + metadata: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: updateObjectMetadataReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for updateObjectMetadataReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for updateObjectMetadataCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::Array, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = updateObjectMetadataReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "updateObjectMetadata(string,(string,string)[])"; + const SELECTOR: [u8; 4] = [111u8, 10u8, 79u8, 244u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`IBucketFacade`](self) function calls. + pub enum IBucketFacadeCalls { + #[allow(missing_docs)] + addObject_0(addObject_0Call), + #[allow(missing_docs)] + addObject_1(addObject_1Call), + #[allow(missing_docs)] + deleteObject(deleteObjectCall), + #[allow(missing_docs)] + getObject(getObjectCall), + #[allow(missing_docs)] + queryObjects_0(queryObjects_0Call), + #[allow(missing_docs)] + queryObjects_1(queryObjects_1Call), + #[allow(missing_docs)] + queryObjects_2(queryObjects_2Call), + #[allow(missing_docs)] + queryObjects_3(queryObjects_3Call), + #[allow(missing_docs)] + queryObjects_4(queryObjects_4Call), + #[allow(missing_docs)] + updateObjectMetadata(updateObjectMetadataCall), + } + #[automatically_derived] + impl IBucketFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [1u8, 83u8, 234u8, 145u8], + [23u8, 211u8, 82u8, 192u8], + [45u8, 111u8, 37u8, 80u8], + [45u8, 124u8, 182u8, 0u8], + [76u8, 83u8, 234u8, 181u8], + [98u8, 148u8, 233u8, 163u8], + [111u8, 10u8, 79u8, 244u8], + [119u8, 67u8, 67u8, 254u8], + [164u8, 67u8, 168u8, 63u8], + [201u8, 174u8, 239u8, 129u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IBucketFacadeCalls { + const NAME: &'static str = "IBucketFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 10usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::addObject_0(_) => ::SELECTOR, + Self::addObject_1(_) => ::SELECTOR, + Self::deleteObject(_) => ::SELECTOR, + Self::getObject(_) => ::SELECTOR, + Self::queryObjects_0(_) => { + ::SELECTOR + } + Self::queryObjects_1(_) => { + ::SELECTOR + } + Self::queryObjects_2(_) => { + ::SELECTOR + } + Self::queryObjects_3(_) => { + ::SELECTOR + } + Self::queryObjects_4(_) => { + ::SELECTOR + } + Self::updateObjectMetadata(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn getObject( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(IBucketFacadeCalls::getObject) + } + getObject + }, + { + fn queryObjects_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_0) + } + queryObjects_0 + }, + { + fn addObject_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::addObject_0) + } + addObject_0 + }, + { + fn deleteObject( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::deleteObject) + } + deleteObject + }, + { + fn queryObjects_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_1) + } + queryObjects_1 + }, + { + fn queryObjects_2( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_2) + } + queryObjects_2 + }, + { + fn updateObjectMetadata( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::updateObjectMetadata) + } + updateObjectMetadata + }, + { + fn addObject_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::addObject_1) + } + addObject_1 + }, + { + fn queryObjects_3( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_3) + } + queryObjects_3 + }, + { + fn queryObjects_4( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IBucketFacadeCalls::queryObjects_4) + } + queryObjects_4 + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::addObject_0(inner) => { + ::abi_encoded_size(inner) + } + Self::addObject_1(inner) => { + ::abi_encoded_size(inner) + } + Self::deleteObject(inner) => { + ::abi_encoded_size(inner) + } + Self::getObject(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_0(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_1(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_2(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_3(inner) => { + ::abi_encoded_size(inner) + } + Self::queryObjects_4(inner) => { + ::abi_encoded_size(inner) + } + Self::updateObjectMetadata(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::addObject_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::addObject_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::deleteObject(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getObject(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_2(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_3(inner) => { + ::abi_encode_raw(inner, out) + } + Self::queryObjects_4(inner) => { + ::abi_encode_raw(inner, out) + } + Self::updateObjectMetadata(inner) => { + ::abi_encode_raw( + inner, out, + ) + } + } + } + } + ///Container for all the [`IBucketFacade`](self) events. + pub enum IBucketFacadeEvents { + #[allow(missing_docs)] + ObjectAdded(ObjectAdded), + #[allow(missing_docs)] + ObjectDeleted(ObjectDeleted), + #[allow(missing_docs)] + ObjectMetadataUpdated(ObjectMetadataUpdated), + } + #[automatically_derived] + impl IBucketFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 60u8, 244u8, 165u8, 122u8, 108u8, 97u8, 36u8, 44u8, 9u8, 38u8, 217u8, 252u8, 9u8, + 163u8, 130u8, 219u8, 163u8, 106u8, 110u8, 146u8, 98u8, 140u8, 119u8, 127u8, 18u8, + 68u8, 196u8, 89u8, 184u8, 9u8, 121u8, 60u8, + ], + [ + 113u8, 40u8, 100u8, 34u8, 143u8, 54u8, 156u8, 194u8, 0u8, 69u8, 202u8, 23u8, 58u8, + 171u8, 116u8, 85u8, 175u8, 88u8, 250u8, 159u8, 109u8, 186u8, 7u8, 73u8, 16u8, + 146u8, 201u8, 61u8, 44u8, 247u8, 251u8, 6u8, + ], + [ + 165u8, 63u8, 104u8, 146u8, 29u8, 139u8, 166u8, 53u8, 110u8, 66u8, 48u8, 119u8, + 167u8, 86u8, 255u8, 42u8, 40u8, 42u8, 230u8, 222u8, 93u8, 78u8, 204u8, 97u8, 125u8, + 160u8, 155u8, 1u8, 234u8, 213u8, 214u8, 64u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IBucketFacadeEvents { + const NAME: &'static str = "IBucketFacadeEvents"; + const COUNT: usize = 3usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ObjectAdded) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ObjectDeleted) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ObjectMetadataUpdated) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IBucketFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::ObjectAdded(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ObjectDeleted(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ObjectMetadataUpdated(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::ObjectAdded(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ObjectDeleted(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ObjectMetadataUpdated(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/bucket_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/bucket_facade/mod.rs new file mode 100644 index 0000000000..ec8b915d5d --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/bucket_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#ibucketfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/config_facade/iconfigfacade.rs b/ipc-storage/sol-facade/crates/facade/src/config_facade/iconfigfacade.rs new file mode 100644 index 0000000000..f59fcd5f54 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/config_facade/iconfigfacade.rs @@ -0,0 +1,434 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface IConfigFacade { + event ConfigAdminSet(address admin); + event ConfigSet(uint256 blobCapacity, uint256 tokenCreditRate, uint256 blobCreditDebitInterval, uint256 blobMinTtl, uint256 blobDefaultTtl, uint256 blobDeleteBatchSize, uint256 accountDebitBatchSize); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "event", + "name": "ConfigAdminSet", + "inputs": [ + { + "name": "admin", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "ConfigSet", + "inputs": [ + { + "name": "blobCapacity", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "tokenCreditRate", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobCreditDebitInterval", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobMinTtl", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobDefaultTtl", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "blobDeleteBatchSize", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "accountDebitBatchSize", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IConfigFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `ConfigAdminSet(address)` and selector `0x17e2ccbcd78b64c943d403837b55290b3de8fd19c8df1c0ab9cf665b934292d4`. + ```solidity + event ConfigAdminSet(address admin); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ConfigAdminSet { + #[allow(missing_docs)] + pub admin: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ConfigAdminSet { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "ConfigAdminSet(address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 23u8, 226u8, 204u8, 188u8, 215u8, 139u8, 100u8, 201u8, 67u8, 212u8, 3u8, 131u8, + 123u8, 85u8, 41u8, 11u8, 61u8, 232u8, 253u8, 25u8, 200u8, 223u8, 28u8, 10u8, + 185u8, 207u8, 102u8, 91u8, 147u8, 66u8, 146u8, 212u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { admin: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.admin, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ConfigAdminSet { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ConfigAdminSet> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ConfigAdminSet) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `ConfigSet(uint256,uint256,uint256,uint256,uint256,uint256,uint256)` and selector `0x3e8ad89b763b9839647a482aef0ebd06350b9fe255fd58263b81888ff1717488`. + ```solidity + event ConfigSet(uint256 blobCapacity, uint256 tokenCreditRate, uint256 blobCreditDebitInterval, uint256 blobMinTtl, uint256 blobDefaultTtl, uint256 blobDeleteBatchSize, uint256 accountDebitBatchSize); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct ConfigSet { + #[allow(missing_docs)] + pub blobCapacity: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub tokenCreditRate: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobCreditDebitInterval: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobMinTtl: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobDefaultTtl: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub blobDeleteBatchSize: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub accountDebitBatchSize: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for ConfigSet { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = + "ConfigSet(uint256,uint256,uint256,uint256,uint256,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 62u8, 138u8, 216u8, 155u8, 118u8, 59u8, 152u8, 57u8, 100u8, 122u8, 72u8, 42u8, + 239u8, 14u8, 189u8, 6u8, 53u8, 11u8, 159u8, 226u8, 85u8, 253u8, 88u8, 38u8, + 59u8, 129u8, 136u8, 143u8, 241u8, 113u8, 116u8, 136u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + blobCapacity: data.0, + tokenCreditRate: data.1, + blobCreditDebitInterval: data.2, + blobMinTtl: data.3, + blobDefaultTtl: data.4, + blobDeleteBatchSize: data.5, + accountDebitBatchSize: data.6, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobCapacity, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.tokenCreditRate, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobCreditDebitInterval, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobMinTtl, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobDefaultTtl, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.blobDeleteBatchSize, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.accountDebitBatchSize, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ConfigSet { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&ConfigSet> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &ConfigSet) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + ///Container for all the [`IConfigFacade`](self) events. + pub enum IConfigFacadeEvents { + #[allow(missing_docs)] + ConfigAdminSet(ConfigAdminSet), + #[allow(missing_docs)] + ConfigSet(ConfigSet), + } + #[automatically_derived] + impl IConfigFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 23u8, 226u8, 204u8, 188u8, 215u8, 139u8, 100u8, 201u8, 67u8, 212u8, 3u8, 131u8, + 123u8, 85u8, 41u8, 11u8, 61u8, 232u8, 253u8, 25u8, 200u8, 223u8, 28u8, 10u8, 185u8, + 207u8, 102u8, 91u8, 147u8, 66u8, 146u8, 212u8, + ], + [ + 62u8, 138u8, 216u8, 155u8, 118u8, 59u8, 152u8, 57u8, 100u8, 122u8, 72u8, 42u8, + 239u8, 14u8, 189u8, 6u8, 53u8, 11u8, 159u8, 226u8, 85u8, 253u8, 88u8, 38u8, 59u8, + 129u8, 136u8, 143u8, 241u8, 113u8, 116u8, 136u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IConfigFacadeEvents { + const NAME: &'static str = "IConfigFacadeEvents"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::ConfigAdminSet) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log(topics, data, validate) + .map(Self::ConfigSet) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IConfigFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::ConfigAdminSet(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::ConfigSet(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::ConfigAdminSet(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::ConfigSet(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/config_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/config_facade/mod.rs new file mode 100644 index 0000000000..258d9b76ab --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/config_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#iconfigfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/credit_facade/icreditfacade.rs b/ipc-storage/sol-facade/crates/facade/src/credit_facade/icreditfacade.rs new file mode 100644 index 0000000000..9e416fe29d --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/credit_facade/icreditfacade.rs @@ -0,0 +1,3763 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface ICreditFacade { + type TtlStatus is uint8; + struct Account { + uint64 capacityUsed; + uint256 creditFree; + uint256 creditCommitted; + address creditSponsor; + uint64 lastDebitEpoch; + Approval[] approvalsTo; + Approval[] approvalsFrom; + uint64 maxTtl; + uint256 gasAllowance; + } + struct Approval { + address addr; + CreditApproval approval; + } + struct CreditApproval { + uint256 creditLimit; + uint256 gasFeeLimit; + uint64 expiry; + uint256 creditUsed; + uint256 gasFeeUsed; + } + + event CreditApproved(address from, address to, uint256 creditLimit, uint256 gasFeeLimit, uint256 expiry); + event CreditDebited(uint256 amount, uint256 numAccounts, bool moreAccounts); + event CreditPurchased(address from, uint256 amount); + event CreditRevoked(address from, address to); + + function approveCredit(address to) external; + function approveCredit(address to, address[] memory caller, uint256 creditLimit, uint256 gasFeeLimit, uint64 ttl) external; + function approveCredit(address to, address[] memory caller) external; + function buyCredit() external payable; + function buyCredit(address recipient) external payable; + function getAccount(address addr) external view returns (Account memory account); + function getCreditApproval(address from, address to) external view returns (CreditApproval memory approval); + function revokeCredit(address to, address caller) external; + function revokeCredit(address to) external; + function setAccountSponsor(address sponsor) external; + function setAccountStatus(address subscriber, TtlStatus ttlStatus) external; +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "approveCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "approveCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "caller", + "type": "address[]", + "internalType": "address[]" + }, + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "ttl", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "approveCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "caller", + "type": "address[]", + "internalType": "address[]" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "buyCredit", + "inputs": [], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "buyCredit", + "inputs": [ + { + "name": "recipient", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "payable" + }, + { + "type": "function", + "name": "getAccount", + "inputs": [ + { + "name": "addr", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "account", + "type": "tuple", + "internalType": "struct Account", + "components": [ + { + "name": "capacityUsed", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditFree", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditCommitted", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "creditSponsor", + "type": "address", + "internalType": "address" + }, + { + "name": "lastDebitEpoch", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "approvalsTo", + "type": "tuple[]", + "internalType": "struct Approval[]", + "components": [ + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "approval", + "type": "tuple", + "internalType": "struct CreditApproval", + "components": [ + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditUsed", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeUsed", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ] + }, + { + "name": "approvalsFrom", + "type": "tuple[]", + "internalType": "struct Approval[]", + "components": [ + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "approval", + "type": "tuple", + "internalType": "struct CreditApproval", + "components": [ + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditUsed", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeUsed", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ] + }, + { + "name": "maxTtl", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "gasAllowance", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getCreditApproval", + "inputs": [ + { + "name": "from", + "type": "address", + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "approval", + "type": "tuple", + "internalType": "struct CreditApproval", + "components": [ + { + "name": "creditLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "creditUsed", + "type": "uint256", + "internalType": "uint256" + }, + { + "name": "gasFeeUsed", + "type": "uint256", + "internalType": "uint256" + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "revokeCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + }, + { + "name": "caller", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "revokeCredit", + "inputs": [ + { + "name": "to", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setAccountSponsor", + "inputs": [ + { + "name": "sponsor", + "type": "address", + "internalType": "address" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "setAccountStatus", + "inputs": [ + { + "name": "subscriber", + "type": "address", + "internalType": "address" + }, + { + "name": "ttlStatus", + "type": "uint8", + "internalType": "enum TtlStatus" + } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "CreditApproved", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "creditLimit", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "gasFeeLimit", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "expiry", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "CreditDebited", + "inputs": [ + { + "name": "amount", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "numAccounts", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "moreAccounts", + "type": "bool", + "indexed": false, + "internalType": "bool" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "CreditPurchased", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "amount", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "CreditRevoked", + "inputs": [ + { + "name": "from", + "type": "address", + "indexed": false, + "internalType": "address" + }, + { + "name": "to", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod ICreditFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct TtlStatus(u8); + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'_> + { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::tokenize(self).0 + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::abi_encoded_size( + self, + ) + } + } + #[automatically_derived] + impl TtlStatus { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for TtlStatus { + type RustType = u8; + type Token<'a> = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::type_check( + token, + ) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::detokenize( + token, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for TtlStatus { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::EventTopic>::encode_topic( + rust, + ) + } + } + }; + /**```solidity + struct Account { uint64 capacityUsed; uint256 creditFree; uint256 creditCommitted; address creditSponsor; uint64 lastDebitEpoch; Approval[] approvalsTo; Approval[] approvalsFrom; uint64 maxTtl; uint256 gasAllowance; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Account { + #[allow(missing_docs)] + pub capacityUsed: u64, + #[allow(missing_docs)] + pub creditFree: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditCommitted: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub creditSponsor: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub lastDebitEpoch: u64, + #[allow(missing_docs)] + pub approvalsTo: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub approvalsFrom: + ::alloy_sol_types::private::Vec<::RustType>, + #[allow(missing_docs)] + pub maxTtl: u64, + #[allow(missing_docs)] + pub gasAllowance: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Array, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::Address, + u64, + ::alloy_sol_types::private::Vec<::RustType>, + ::alloy_sol_types::private::Vec<::RustType>, + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Account) -> Self { + ( + value.capacityUsed, + value.creditFree, + value.creditCommitted, + value.creditSponsor, + value.lastDebitEpoch, + value.approvalsTo, + value.approvalsFrom, + value.maxTtl, + value.gasAllowance, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Account { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + capacityUsed: tuple.0, + creditFree: tuple.1, + creditCommitted: tuple.2, + creditSponsor: tuple.3, + lastDebitEpoch: tuple.4, + approvalsTo: tuple.5, + approvalsFrom: tuple.6, + maxTtl: tuple.7, + gasAllowance: tuple.8, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Account { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Account { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.capacityUsed), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.creditFree), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.creditCommitted), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.creditSponsor, + ), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.lastDebitEpoch), + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::tokenize(&self.approvalsTo), + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::tokenize(&self.approvalsFrom), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.maxTtl), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.gasAllowance), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Account { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Account { + const NAME: &'static str = "Account"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Account(uint64 capacityUsed,uint256 creditFree,uint256 creditCommitted,address creditSponsor,uint64 lastDebitEpoch,Approval[] approvalsTo,Approval[] approvalsFrom,uint64 maxTtl,uint256 gasAllowance)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(2); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.capacityUsed) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditFree) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.creditCommitted, + ) + .0, + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.creditSponsor, + ) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word( + &self.lastDebitEpoch, + ) + .0, + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::eip712_data_word(&self.approvalsTo) + .0, + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::SolType>::eip712_data_word(&self.approvalsFrom) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.maxTtl) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.gasAllowance) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Account { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.capacityUsed, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditFree, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditCommitted, + ) + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditSponsor, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.lastDebitEpoch, + ) + + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.approvalsTo, + ) + + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.approvalsFrom, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.maxTtl, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.gasAllowance, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.capacityUsed, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditFree, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditCommitted, + out, + ); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditSponsor, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.lastDebitEpoch, + out, + ); + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.approvalsTo, + out, + ); + <::alloy_sol_types::sol_data::Array< + Approval, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.approvalsFrom, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.maxTtl, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.gasAllowance, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Approval { address addr; CreditApproval approval; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Approval { + #[allow(missing_docs)] + pub addr: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub approval: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address, CreditApproval); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Approval) -> Self { + (value.addr, value.approval) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Approval { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + addr: tuple.0, + approval: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Approval { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Approval { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.addr, + ), + ::tokenize(&self.approval), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Approval { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Approval { + const NAME: &'static str = "Approval"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Approval(address addr,CreditApproval approval)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components + .extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.addr, + ) + .0, + ::eip712_data_word( + &self.approval, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Approval { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.addr, + ) + + ::topic_preimage_length( + &rust.approval, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.addr, + out, + ); + ::encode_topic_preimage( + &rust.approval, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct CreditApproval { uint256 creditLimit; uint256 gasFeeLimit; uint64 expiry; uint256 creditUsed; uint256 gasFeeUsed; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct CreditApproval { + #[allow(missing_docs)] + pub creditLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub expiry: u64, + #[allow(missing_docs)] + pub creditUsed: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeUsed: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: CreditApproval) -> Self { + ( + value.creditLimit, + value.gasFeeLimit, + value.expiry, + value.creditUsed, + value.gasFeeUsed, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for CreditApproval { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + creditLimit: tuple.0, + gasFeeLimit: tuple.1, + expiry: tuple.2, + creditUsed: tuple.3, + gasFeeUsed: tuple.4, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for CreditApproval { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for CreditApproval { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditLimit, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.gasFeeLimit, + ), + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.expiry, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditUsed, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.gasFeeUsed, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for CreditApproval { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for CreditApproval { + const NAME: &'static str = "CreditApproval"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "CreditApproval(uint256 creditLimit,uint256 gasFeeLimit,uint64 expiry,uint256 creditUsed,uint256 gasFeeUsed)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditLimit) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.gasFeeLimit) + .0, + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::eip712_data_word(&self.expiry) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.creditUsed) + .0, + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::eip712_data_word(&self.gasFeeUsed) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for CreditApproval { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditLimit, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.gasFeeLimit, + ) + + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.expiry, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.creditUsed, + ) + + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.gasFeeUsed, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditLimit, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.gasFeeLimit, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.expiry, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.creditUsed, + out, + ); + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.gasFeeUsed, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `CreditApproved(address,address,uint256,uint256,uint256)` and selector `0xc69709e6f767dad7ccb19c605c3c602bf482ecb426059d7cdb5e5737d05b22f8`. + ```solidity + event CreditApproved(address from, address to, uint256 creditLimit, uint256 gasFeeLimit, uint256 expiry); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditApproved { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub creditLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub expiry: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditApproved { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = + "CreditApproved(address,address,uint256,uint256,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 198u8, 151u8, 9u8, 230u8, 247u8, 103u8, 218u8, 215u8, 204u8, 177u8, 156u8, + 96u8, 92u8, 60u8, 96u8, 43u8, 244u8, 130u8, 236u8, 180u8, 38u8, 5u8, 157u8, + 124u8, 219u8, 94u8, 87u8, 55u8, 208u8, 91u8, 34u8, 248u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: data.0, + to: data.1, + creditLimit: data.2, + gasFeeLimit: data.3, + expiry: data.4, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.creditLimit, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.gasFeeLimit, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.expiry, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditApproved { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditApproved> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditApproved) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `CreditDebited(uint256,uint256,bool)` and selector `0x5cc1b5286143c9d1f8e1c090b5d7302388ab94fb45b1e18e63d8b08ef8c0f7c3`. + ```solidity + event CreditDebited(uint256 amount, uint256 numAccounts, bool moreAccounts); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditDebited { + #[allow(missing_docs)] + pub amount: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub numAccounts: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub moreAccounts: bool, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditDebited { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bool, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "CreditDebited(uint256,uint256,bool)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 92u8, 193u8, 181u8, 40u8, 97u8, 67u8, 201u8, 209u8, 248u8, 225u8, 192u8, 144u8, + 181u8, 215u8, 48u8, 35u8, 136u8, 171u8, 148u8, 251u8, 69u8, 177u8, 225u8, + 142u8, 99u8, 216u8, 176u8, 142u8, 248u8, 192u8, 247u8, 195u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + amount: data.0, + numAccounts: data.1, + moreAccounts: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.amount, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.numAccounts, + ), + <::alloy_sol_types::sol_data::Bool as alloy_sol_types::SolType>::tokenize( + &self.moreAccounts, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditDebited { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditDebited> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditDebited) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `CreditPurchased(address,uint256)` and selector `0xacf2bdc99696da35cbfe300e8b7d3d337ffc9918d8547c58ef8b58a20ec075df`. + ```solidity + event CreditPurchased(address from, uint256 amount); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditPurchased { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub amount: ::alloy_sol_types::private::primitives::aliases::U256, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditPurchased { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Uint<256>, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "CreditPurchased(address,uint256)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 172u8, 242u8, 189u8, 201u8, 150u8, 150u8, 218u8, 53u8, 203u8, 254u8, 48u8, + 14u8, 139u8, 125u8, 61u8, 51u8, 127u8, 252u8, 153u8, 24u8, 216u8, 84u8, 124u8, + 88u8, 239u8, 139u8, 88u8, 162u8, 14u8, 192u8, 117u8, 223u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: data.0, + amount: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.amount, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditPurchased { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditPurchased> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditPurchased) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `CreditRevoked(address,address)` and selector `0xe63d1a905c0cbc7f25c8f71af5ecb744b771b20f954f39e1654d4d838f93b89e`. + ```solidity + event CreditRevoked(address from, address to); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct CreditRevoked { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for CreditRevoked { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "CreditRevoked(address,address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 230u8, 61u8, 26u8, 144u8, 92u8, 12u8, 188u8, 127u8, 37u8, 200u8, 247u8, 26u8, + 245u8, 236u8, 183u8, 68u8, 183u8, 113u8, 178u8, 15u8, 149u8, 79u8, 57u8, 225u8, + 101u8, 77u8, 77u8, 131u8, 143u8, 147u8, 184u8, 158u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + from: data.0, + to: data.1, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for CreditRevoked { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&CreditRevoked> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &CreditRevoked) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `approveCredit(address)` and selector `0x01e98bfa`. + ```solidity + function approveCredit(address to) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_0Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`approveCredit(address)`](approveCredit_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_0Call) -> Self { + (value.to,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { to: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCredit_0Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveCredit_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "approveCredit(address)"; + const SELECTOR: [u8; 4] = [1u8, 233u8, 139u8, 250u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `approveCredit(address,address[],uint256,uint256,uint64)` and selector `0x112b6517`. + ```solidity + function approveCredit(address to, address[] memory caller, uint256 creditLimit, uint256 gasFeeLimit, uint64 ttl) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_1Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub caller: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + #[allow(missing_docs)] + pub creditLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub gasFeeLimit: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub ttl: u64, + } + ///Container type for the return parameters of the [`approveCredit(address,address[],uint256,uint256,uint64)`](approveCredit_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + ::alloy_sol_types::private::primitives::aliases::U256, + ::alloy_sol_types::private::primitives::aliases::U256, + u64, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_1Call) -> Self { + ( + value.to, + value.caller, + value.creditLimit, + value.gasFeeLimit, + value.ttl, + ) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + caller: tuple.1, + creditLimit: tuple.2, + gasFeeLimit: tuple.3, + ttl: tuple.4, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCredit_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveCredit_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = + "approveCredit(address,address[],uint256,uint256,uint64)"; + const SELECTOR: [u8; 4] = [17u8, 43u8, 101u8, 23u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::Address, + > as alloy_sol_types::SolType>::tokenize(&self.caller), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.creditLimit), + <::alloy_sol_types::sol_data::Uint< + 256, + > as alloy_sol_types::SolType>::tokenize(&self.gasFeeLimit), + <::alloy_sol_types::sol_data::Uint< + 64, + > as alloy_sol_types::SolType>::tokenize(&self.ttl), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `approveCredit(address,address[])` and selector `0xa0aa2b65`. + ```solidity + function approveCredit(address to, address[] memory caller) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_2Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub caller: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + } + ///Container type for the return parameters of the [`approveCredit(address,address[])`](approveCredit_2Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct approveCredit_2Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Address>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_2Call) -> Self { + (value.to, value.caller) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_2Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + caller: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: approveCredit_2Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for approveCredit_2Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for approveCredit_2Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Address>, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = approveCredit_2Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "approveCredit(address,address[])"; + const SELECTOR: [u8; 4] = [160u8, 170u8, 43u8, 101u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Array< + ::alloy_sol_types::sol_data::Address, + > as alloy_sol_types::SolType>::tokenize(&self.caller), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `buyCredit()` and selector `0x8e4e6f06`. + ```solidity + function buyCredit() external payable; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_0Call {} + ///Container type for the return parameters of the [`buyCredit()`](buyCredit_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for buyCredit_0Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = buyCredit_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "buyCredit()"; + const SELECTOR: [u8; 4] = [142u8, 78u8, 111u8, 6u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `buyCredit(address)` and selector `0xa38eae9f`. + ```solidity + function buyCredit(address recipient) external payable; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_1Call { + #[allow(missing_docs)] + pub recipient: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`buyCredit(address)`](buyCredit_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct buyCredit_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_1Call) -> Self { + (value.recipient,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { recipient: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: buyCredit_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for buyCredit_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for buyCredit_1Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = buyCredit_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "buyCredit(address)"; + const SELECTOR: [u8; 4] = [163u8, 142u8, 174u8, 159u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.recipient, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getAccount(address)` and selector `0xfbcbc0f1`. + ```solidity + function getAccount(address addr) external view returns (Account memory account); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getAccountCall { + #[allow(missing_docs)] + pub addr: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`getAccount(address)`](getAccountCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getAccountReturn { + #[allow(missing_docs)] + pub account: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getAccountCall) -> Self { + (value.addr,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getAccountCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { addr: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (Account,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getAccountReturn) -> Self { + (value.account,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getAccountReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { account: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getAccountCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getAccountReturn; + type ReturnTuple<'a> = (Account,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getAccount(address)"; + const SELECTOR: [u8; 4] = [251u8, 203u8, 192u8, 241u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.addr, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getCreditApproval(address,address)` and selector `0xcd9be80f`. + ```solidity + function getCreditApproval(address from, address to) external view returns (CreditApproval memory approval); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCreditApprovalCall { + #[allow(missing_docs)] + pub from: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`getCreditApproval(address,address)`](getCreditApprovalCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCreditApprovalReturn { + #[allow(missing_docs)] + pub approval: ::RustType, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Address, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCreditApprovalCall) -> Self { + (value.from, value.to) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCreditApprovalCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + from: tuple.0, + to: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (CreditApproval,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = + (::RustType,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCreditApprovalReturn) -> Self { + (value.approval,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCreditApprovalReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { approval: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getCreditApprovalCall { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getCreditApprovalReturn; + type ReturnTuple<'a> = (CreditApproval,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getCreditApproval(address,address)"; + const SELECTOR: [u8; 4] = [205u8, 155u8, 232u8, 15u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.from, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `revokeCredit(address,address)` and selector `0xa84a1535`. + ```solidity + function revokeCredit(address to, address caller) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_0Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub caller: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`revokeCredit(address,address)`](revokeCredit_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_0Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Address, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_0Call) -> Self { + (value.to, value.caller) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + to: tuple.0, + caller: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_0Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for revokeCredit_0Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Address, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = revokeCredit_0Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "revokeCredit(address,address)"; + const SELECTOR: [u8; 4] = [168u8, 74u8, 21u8, 53u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.caller, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `revokeCredit(address)` and selector `0xa8ef8caf`. + ```solidity + function revokeCredit(address to) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_1Call { + #[allow(missing_docs)] + pub to: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`revokeCredit(address)`](revokeCredit_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct revokeCredit_1Return {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_1Call) -> Self { + (value.to,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { to: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: revokeCredit_1Return) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for revokeCredit_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for revokeCredit_1Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = revokeCredit_1Return; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "revokeCredit(address)"; + const SELECTOR: [u8; 4] = [168u8, 239u8, 140u8, 175u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.to, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `setAccountSponsor(address)` and selector `0x8e0948b6`. + ```solidity + function setAccountSponsor(address sponsor) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountSponsorCall { + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`setAccountSponsor(address)`](setAccountSponsorCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountSponsorReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountSponsorCall) -> Self { + (value.sponsor,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountSponsorCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { sponsor: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountSponsorReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountSponsorReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for setAccountSponsorCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = setAccountSponsorReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "setAccountSponsor(address)"; + const SELECTOR: [u8; 4] = [142u8, 9u8, 72u8, 182u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `setAccountStatus(address,uint8)` and selector `0x0ad2b0a1`. + ```solidity + function setAccountStatus(address subscriber, TtlStatus ttlStatus) external; + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountStatusCall { + #[allow(missing_docs)] + pub subscriber: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub ttlStatus: ::RustType, + } + ///Container type for the return parameters of the [`setAccountStatus(address,uint8)`](setAccountStatusCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct setAccountStatusReturn {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address, TtlStatus); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::RustType, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountStatusCall) -> Self { + (value.subscriber, value.ttlStatus) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountStatusCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + subscriber: tuple.0, + ttlStatus: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: setAccountStatusReturn) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for setAccountStatusReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for setAccountStatusCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address, TtlStatus); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = setAccountStatusReturn; + type ReturnTuple<'a> = (); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "setAccountStatus(address,uint8)"; + const SELECTOR: [u8; 4] = [10u8, 210u8, 176u8, 161u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.subscriber, + ), + ::tokenize(&self.ttlStatus), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`ICreditFacade`](self) function calls. + pub enum ICreditFacadeCalls { + #[allow(missing_docs)] + approveCredit_0(approveCredit_0Call), + #[allow(missing_docs)] + approveCredit_1(approveCredit_1Call), + #[allow(missing_docs)] + approveCredit_2(approveCredit_2Call), + #[allow(missing_docs)] + buyCredit_0(buyCredit_0Call), + #[allow(missing_docs)] + buyCredit_1(buyCredit_1Call), + #[allow(missing_docs)] + getAccount(getAccountCall), + #[allow(missing_docs)] + getCreditApproval(getCreditApprovalCall), + #[allow(missing_docs)] + revokeCredit_0(revokeCredit_0Call), + #[allow(missing_docs)] + revokeCredit_1(revokeCredit_1Call), + #[allow(missing_docs)] + setAccountSponsor(setAccountSponsorCall), + #[allow(missing_docs)] + setAccountStatus(setAccountStatusCall), + } + #[automatically_derived] + impl ICreditFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [1u8, 233u8, 139u8, 250u8], + [10u8, 210u8, 176u8, 161u8], + [17u8, 43u8, 101u8, 23u8], + [142u8, 9u8, 72u8, 182u8], + [142u8, 78u8, 111u8, 6u8], + [160u8, 170u8, 43u8, 101u8], + [163u8, 142u8, 174u8, 159u8], + [168u8, 74u8, 21u8, 53u8], + [168u8, 239u8, 140u8, 175u8], + [205u8, 155u8, 232u8, 15u8], + [251u8, 203u8, 192u8, 241u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for ICreditFacadeCalls { + const NAME: &'static str = "ICreditFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 11usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::approveCredit_0(_) => { + ::SELECTOR + } + Self::approveCredit_1(_) => { + ::SELECTOR + } + Self::approveCredit_2(_) => { + ::SELECTOR + } + Self::buyCredit_0(_) => ::SELECTOR, + Self::buyCredit_1(_) => ::SELECTOR, + Self::getAccount(_) => ::SELECTOR, + Self::getCreditApproval(_) => { + ::SELECTOR + } + Self::revokeCredit_0(_) => { + ::SELECTOR + } + Self::revokeCredit_1(_) => { + ::SELECTOR + } + Self::setAccountSponsor(_) => { + ::SELECTOR + } + Self::setAccountStatus(_) => { + ::SELECTOR + } + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn approveCredit_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::approveCredit_0) + } + approveCredit_0 + }, + { + fn setAccountStatus( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::setAccountStatus) + } + setAccountStatus + }, + { + fn approveCredit_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::approveCredit_1) + } + approveCredit_1 + }, + { + fn setAccountSponsor( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::setAccountSponsor) + } + setAccountSponsor + }, + { + fn buyCredit_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::buyCredit_0) + } + buyCredit_0 + }, + { + fn approveCredit_2( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::approveCredit_2) + } + approveCredit_2 + }, + { + fn buyCredit_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::buyCredit_1) + } + buyCredit_1 + }, + { + fn revokeCredit_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::revokeCredit_0) + } + revokeCredit_0 + }, + { + fn revokeCredit_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::revokeCredit_1) + } + revokeCredit_1 + }, + { + fn getCreditApproval( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(ICreditFacadeCalls::getCreditApproval) + } + getCreditApproval + }, + { + fn getAccount( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ICreditFacadeCalls::getAccount) + } + getAccount + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::approveCredit_0(inner) => { + ::abi_encoded_size(inner) + } + Self::approveCredit_1(inner) => { + ::abi_encoded_size(inner) + } + Self::approveCredit_2(inner) => { + ::abi_encoded_size(inner) + } + Self::buyCredit_0(inner) => { + ::abi_encoded_size(inner) + } + Self::buyCredit_1(inner) => { + ::abi_encoded_size(inner) + } + Self::getAccount(inner) => { + ::abi_encoded_size(inner) + } + Self::getCreditApproval(inner) => { + ::abi_encoded_size(inner) + } + Self::revokeCredit_0(inner) => { + ::abi_encoded_size(inner) + } + Self::revokeCredit_1(inner) => { + ::abi_encoded_size(inner) + } + Self::setAccountSponsor(inner) => { + ::abi_encoded_size(inner) + } + Self::setAccountStatus(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::approveCredit_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::approveCredit_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::approveCredit_2(inner) => { + ::abi_encode_raw(inner, out) + } + Self::buyCredit_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::buyCredit_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getAccount(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getCreditApproval(inner) => { + ::abi_encode_raw(inner, out) + } + Self::revokeCredit_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::revokeCredit_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::setAccountSponsor(inner) => { + ::abi_encode_raw(inner, out) + } + Self::setAccountStatus(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`ICreditFacade`](self) events. + pub enum ICreditFacadeEvents { + #[allow(missing_docs)] + CreditApproved(CreditApproved), + #[allow(missing_docs)] + CreditDebited(CreditDebited), + #[allow(missing_docs)] + CreditPurchased(CreditPurchased), + #[allow(missing_docs)] + CreditRevoked(CreditRevoked), + } + #[automatically_derived] + impl ICreditFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 92u8, 193u8, 181u8, 40u8, 97u8, 67u8, 201u8, 209u8, 248u8, 225u8, 192u8, 144u8, + 181u8, 215u8, 48u8, 35u8, 136u8, 171u8, 148u8, 251u8, 69u8, 177u8, 225u8, 142u8, + 99u8, 216u8, 176u8, 142u8, 248u8, 192u8, 247u8, 195u8, + ], + [ + 172u8, 242u8, 189u8, 201u8, 150u8, 150u8, 218u8, 53u8, 203u8, 254u8, 48u8, 14u8, + 139u8, 125u8, 61u8, 51u8, 127u8, 252u8, 153u8, 24u8, 216u8, 84u8, 124u8, 88u8, + 239u8, 139u8, 88u8, 162u8, 14u8, 192u8, 117u8, 223u8, + ], + [ + 198u8, 151u8, 9u8, 230u8, 247u8, 103u8, 218u8, 215u8, 204u8, 177u8, 156u8, 96u8, + 92u8, 60u8, 96u8, 43u8, 244u8, 130u8, 236u8, 180u8, 38u8, 5u8, 157u8, 124u8, 219u8, + 94u8, 87u8, 55u8, 208u8, 91u8, 34u8, 248u8, + ], + [ + 230u8, 61u8, 26u8, 144u8, 92u8, 12u8, 188u8, 127u8, 37u8, 200u8, 247u8, 26u8, + 245u8, 236u8, 183u8, 68u8, 183u8, 113u8, 178u8, 15u8, 149u8, 79u8, 57u8, 225u8, + 101u8, 77u8, 77u8, 131u8, 143u8, 147u8, 184u8, 158u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for ICreditFacadeEvents { + const NAME: &'static str = "ICreditFacadeEvents"; + const COUNT: usize = 4usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditApproved) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditDebited) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditPurchased) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::CreditRevoked) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ICreditFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::CreditApproved(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::CreditDebited(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::CreditPurchased(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::CreditRevoked(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::CreditApproved(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::CreditDebited(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::CreditPurchased(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::CreditRevoked(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/credit_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/credit_facade/mod.rs new file mode 100644 index 0000000000..914ef02d3f --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/credit_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#icreditfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/gas_facade/igasfacade.rs b/ipc-storage/sol-facade/crates/facade/src/gas_facade/igasfacade.rs new file mode 100644 index 0000000000..bc63f90d4f --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/gas_facade/igasfacade.rs @@ -0,0 +1,341 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface IGasFacade { + event GasSponsorSet(address sponsor); + event GasSponsorUnset(); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "event", + "name": "GasSponsorSet", + "inputs": [ + { + "name": "sponsor", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "GasSponsorUnset", + "inputs": [], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IGasFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `GasSponsorSet(address)` and selector `0xe9c438da6edc711056efd08e60609c24627b30c4a355a568d36d3cc0add0bfe1`. + ```solidity + event GasSponsorSet(address sponsor); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct GasSponsorSet { + #[allow(missing_docs)] + pub sponsor: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for GasSponsorSet { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "GasSponsorSet(address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 233u8, 196u8, 56u8, 218u8, 110u8, 220u8, 113u8, 16u8, 86u8, 239u8, 208u8, + 142u8, 96u8, 96u8, 156u8, 36u8, 98u8, 123u8, 48u8, 196u8, 163u8, 85u8, 165u8, + 104u8, 211u8, 109u8, 60u8, 192u8, 173u8, 208u8, 191u8, 225u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { sponsor: data.0 } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.sponsor, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for GasSponsorSet { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&GasSponsorSet> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &GasSponsorSet) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `GasSponsorUnset()` and selector `0xd10f5c7821677a4b8658a83a5d5ac1c78324b2a44a9f634d5c53fbebc13674c4`. + ```solidity + event GasSponsorUnset(); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct GasSponsorUnset {} + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for GasSponsorUnset { + type DataTuple<'a> = (); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "GasSponsorUnset()"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 209u8, 15u8, 92u8, 120u8, 33u8, 103u8, 122u8, 75u8, 134u8, 88u8, 168u8, 58u8, + 93u8, 90u8, 193u8, 199u8, 131u8, 36u8, 178u8, 164u8, 74u8, 159u8, 99u8, 77u8, + 92u8, 83u8, 251u8, 235u8, 193u8, 54u8, 116u8, 196u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self {} + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + () + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for GasSponsorUnset { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&GasSponsorUnset> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &GasSponsorUnset) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + ///Container for all the [`IGasFacade`](self) events. + pub enum IGasFacadeEvents { + #[allow(missing_docs)] + GasSponsorSet(GasSponsorSet), + #[allow(missing_docs)] + GasSponsorUnset(GasSponsorUnset), + } + #[automatically_derived] + impl IGasFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 209u8, 15u8, 92u8, 120u8, 33u8, 103u8, 122u8, 75u8, 134u8, 88u8, 168u8, 58u8, 93u8, + 90u8, 193u8, 199u8, 131u8, 36u8, 178u8, 164u8, 74u8, 159u8, 99u8, 77u8, 92u8, 83u8, + 251u8, 235u8, 193u8, 54u8, 116u8, 196u8, + ], + [ + 233u8, 196u8, 56u8, 218u8, 110u8, 220u8, 113u8, 16u8, 86u8, 239u8, 208u8, 142u8, + 96u8, 96u8, 156u8, 36u8, 98u8, 123u8, 48u8, 196u8, 163u8, 85u8, 165u8, 104u8, + 211u8, 109u8, 60u8, 192u8, 173u8, 208u8, 191u8, 225u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IGasFacadeEvents { + const NAME: &'static str = "IGasFacadeEvents"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::GasSponsorSet) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::GasSponsorUnset) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IGasFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::GasSponsorSet(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::GasSponsorUnset(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::GasSponsorSet(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::GasSponsorUnset(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/gas_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/gas_facade/mod.rs new file mode 100644 index 0000000000..a58eefe02a --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/gas_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#igasfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/lib.rs b/ipc-storage/sol-facade/crates/facade/src/lib.rs new file mode 100644 index 0000000000..7de19e7d6b --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/lib.rs @@ -0,0 +1,217 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2025 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +#![allow(dead_code)] + +pub use alloy_primitives as primitives; + +pub mod types; + +#[cfg(feature = "blob-reader")] +mod blobreader_facade; +#[cfg(feature = "blob-reader")] +pub mod blob_reader { + pub type Events = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::IBlobReaderFacadeEvents; + pub type ReadRequestClosed = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::ReadRequestClosed; + pub type ReadRequestOpened = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::ReadRequestOpened; + pub type ReadRequestPending = + crate::blobreader_facade::iblobreaderfacade::IBlobReaderFacade::ReadRequestPending; +} + +#[cfg(feature = "blobs")] +mod blobs_facade; +#[cfg(feature = "blobs")] +pub mod blobs { + pub type Events = crate::blobs_facade::iblobsfacade::IBlobsFacade::IBlobsFacadeEvents; + pub type BlobAdded = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobAdded; + pub type BlobDeleted = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobDeleted; + pub type BlobFinalized = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobFinalized; + pub type BlobPending = crate::blobs_facade::iblobsfacade::IBlobsFacade::BlobPending; + + pub type Calls = crate::blobs_facade::iblobsfacade::IBlobsFacade::IBlobsFacadeCalls; + #[allow(non_camel_case_types)] + pub type addBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::addBlobCall; + #[allow(non_camel_case_types)] + pub type deleteBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::deleteBlobCall; + #[allow(non_camel_case_types)] + pub type getBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::getBlobCall; + #[allow(non_camel_case_types)] + pub type getStatsCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::getStatsCall; + #[allow(non_camel_case_types)] + pub type overwriteBlobCall = crate::blobs_facade::iblobsfacade::IBlobsFacade::overwriteBlobCall; + #[allow(non_camel_case_types)] + pub type trimBlobExpiriesCall = + crate::blobs_facade::iblobsfacade::IBlobsFacade::trimBlobExpiriesCall; + + pub type Subscription = crate::blobs_facade::iblobsfacade::IBlobsFacade::Subscription; + pub type Blob = crate::blobs_facade::iblobsfacade::IBlobsFacade::Blob; + pub type SubnetStats = crate::blobs_facade::iblobsfacade::IBlobsFacade::SubnetStats; + pub type TrimBlobExpiries = crate::blobs_facade::iblobsfacade::IBlobsFacade::TrimBlobExpiries; +} + +#[cfg(feature = "bucket")] +mod bucket_facade; +#[cfg(feature = "bucket")] +pub mod bucket { + pub type Events = crate::bucket_facade::ibucketfacade::IBucketFacade::IBucketFacadeEvents; + pub type ObjectAdded = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectAdded; + pub type ObjectDeleted = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectDeleted; + pub type ObjectMetadataUpdated = + crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectMetadataUpdated; + + pub type Calls = crate::bucket_facade::ibucketfacade::IBucketFacade::IBucketFacadeCalls; + #[allow(non_camel_case_types)] + pub type addObject_0Call = crate::bucket_facade::ibucketfacade::IBucketFacade::addObject_0Call; + #[allow(non_camel_case_types)] + pub type addObject_1Call = crate::bucket_facade::ibucketfacade::IBucketFacade::addObject_1Call; + #[allow(non_camel_case_types)] + pub type deleteObjectCall = + crate::bucket_facade::ibucketfacade::IBucketFacade::deleteObjectCall; + #[allow(non_camel_case_types)] + pub type getObjectCall = crate::bucket_facade::ibucketfacade::IBucketFacade::getObjectCall; + #[allow(non_camel_case_types)] + pub type queryObjects_0Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_0Call; + #[allow(non_camel_case_types)] + pub type queryObjects_1Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_1Call; + #[allow(non_camel_case_types)] + pub type queryObjects_2Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_2Call; + #[allow(non_camel_case_types)] + pub type queryObjects_3Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_3Call; + #[allow(non_camel_case_types)] + pub type queryObjects_4Call = + crate::bucket_facade::ibucketfacade::IBucketFacade::queryObjects_4Call; + #[allow(non_camel_case_types)] + pub type updateObjectMetadataCall = + crate::bucket_facade::ibucketfacade::IBucketFacade::updateObjectMetadataCall; + + pub type ObjectValue = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectValue; + pub type KeyValue = crate::bucket_facade::ibucketfacade::IBucketFacade::KeyValue; + pub type Query = crate::bucket_facade::ibucketfacade::IBucketFacade::Query; + pub type Object = crate::bucket_facade::ibucketfacade::IBucketFacade::Object; + pub type ObjectState = crate::bucket_facade::ibucketfacade::IBucketFacade::ObjectState; +} + +#[cfg(feature = "config")] +mod config_facade; +#[cfg(feature = "config")] +pub mod config { + pub type Events = crate::config_facade::iconfigfacade::IConfigFacade::IConfigFacadeEvents; + pub type ConfigAdminSet = crate::config_facade::iconfigfacade::IConfigFacade::ConfigAdminSet; + pub type ConfigSet = crate::config_facade::iconfigfacade::IConfigFacade::ConfigSet; +} + +#[cfg(feature = "credit")] +mod credit_facade; +#[cfg(feature = "credit")] +pub mod credit { + pub type Events = crate::credit_facade::icreditfacade::ICreditFacade::ICreditFacadeEvents; + pub type CreditApproved = crate::credit_facade::icreditfacade::ICreditFacade::CreditApproved; + pub type CreditDebited = crate::credit_facade::icreditfacade::ICreditFacade::CreditDebited; + pub type CreditPurchased = crate::credit_facade::icreditfacade::ICreditFacade::CreditPurchased; + pub type CreditRevoked = crate::credit_facade::icreditfacade::ICreditFacade::CreditRevoked; + + pub type Calls = crate::credit_facade::icreditfacade::ICreditFacade::ICreditFacadeCalls; + #[allow(non_camel_case_types)] + pub type buyCredit_0Call = crate::credit_facade::icreditfacade::ICreditFacade::buyCredit_0Call; + #[allow(non_camel_case_types)] + pub type buyCredit_1Call = crate::credit_facade::icreditfacade::ICreditFacade::buyCredit_1Call; + #[allow(non_camel_case_types)] + pub type approveCredit_0Call = + crate::credit_facade::icreditfacade::ICreditFacade::approveCredit_0Call; + #[allow(non_camel_case_types)] + pub type approveCredit_1Call = + crate::credit_facade::icreditfacade::ICreditFacade::approveCredit_1Call; + #[allow(non_camel_case_types)] + pub type approveCredit_2Call = + crate::credit_facade::icreditfacade::ICreditFacade::approveCredit_2Call; + #[allow(non_camel_case_types)] + pub type revokeCredit_0Call = + crate::credit_facade::icreditfacade::ICreditFacade::revokeCredit_0Call; + #[allow(non_camel_case_types)] + pub type revokeCredit_1Call = + crate::credit_facade::icreditfacade::ICreditFacade::revokeCredit_1Call; + #[allow(non_camel_case_types)] + pub type setAccountSponsorCall = + crate::credit_facade::icreditfacade::ICreditFacade::setAccountSponsorCall; + #[allow(non_camel_case_types)] + pub type getAccountCall = crate::credit_facade::icreditfacade::ICreditFacade::getAccountCall; + #[allow(non_camel_case_types)] + pub type getCreditApprovalCall = + crate::credit_facade::icreditfacade::ICreditFacade::getCreditApprovalCall; + #[allow(non_camel_case_types)] + pub type setAccountStatusCall = + crate::credit_facade::icreditfacade::ICreditFacade::setAccountStatusCall; + + pub type Account = crate::credit_facade::icreditfacade::ICreditFacade::Account; + pub type Approval = crate::credit_facade::icreditfacade::ICreditFacade::Approval; + pub type CreditApproval = crate::credit_facade::icreditfacade::ICreditFacade::CreditApproval; + pub type TtlStatus = crate::credit_facade::icreditfacade::ICreditFacade::TtlStatus; +} + +#[cfg(feature = "gas")] +mod gas_facade; +#[cfg(feature = "gas")] +pub mod gas { + pub type Events = crate::gas_facade::igasfacade::IGasFacade::IGasFacadeEvents; + pub type GasSponsorSet = crate::gas_facade::igasfacade::IGasFacade::GasSponsorSet; + pub type GasSponsorUnset = crate::gas_facade::igasfacade::IGasFacade::GasSponsorUnset; +} + +#[cfg(feature = "machine")] +mod machine_facade; +#[cfg(feature = "machine")] +pub mod machine { + pub type Events = crate::machine_facade::imachinefacade::IMachineFacade::IMachineFacadeEvents; + pub type MachineCreated = crate::machine_facade::imachinefacade::IMachineFacade::MachineCreated; + pub type MachineInitialized = + crate::machine_facade::imachinefacade::IMachineFacade::MachineInitialized; + + pub type Calls = crate::machine_facade::imachinefacade::IMachineFacade::IMachineFacadeCalls; + #[allow(non_camel_case_types)] + pub type createBucket_0Call = + crate::machine_facade::imachinefacade::IMachineFacade::createBucket_0Call; + #[allow(non_camel_case_types)] + pub type createBucket_1Call = + crate::machine_facade::imachinefacade::IMachineFacade::createBucket_1Call; + #[allow(non_camel_case_types)] + pub type createBucket_2Call = + crate::machine_facade::imachinefacade::IMachineFacade::createBucket_2Call; + #[allow(non_camel_case_types)] + pub type listBuckets_0Call = + crate::machine_facade::imachinefacade::IMachineFacade::listBuckets_0Call; + #[allow(non_camel_case_types)] + pub type listBuckets_1Call = + crate::machine_facade::imachinefacade::IMachineFacade::listBuckets_1Call; + + pub type Machine = crate::machine_facade::imachinefacade::IMachineFacade::Machine; + pub type Kind = crate::machine_facade::imachinefacade::IMachineFacade::Kind; + pub type KeyValue = crate::machine_facade::imachinefacade::IMachineFacade::KeyValue; +} + +#[cfg(feature = "timehub")] +mod timehub_facade; +#[cfg(feature = "timehub")] +pub mod timehub { + pub type Events = crate::timehub_facade::itimehubfacade::ITimehubFacade::ITimehubFacadeEvents; + pub type EventPushed = crate::timehub_facade::itimehubfacade::ITimehubFacade::EventPushed; + + pub type Calls = crate::timehub_facade::itimehubfacade::ITimehubFacade::ITimehubFacadeCalls; + #[allow(non_camel_case_types)] + pub type pushCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::pushCall; + #[allow(non_camel_case_types)] + pub type getLeafAtCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getLeafAtCall; + #[allow(non_camel_case_types)] + pub type getRootCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getRootCall; + #[allow(non_camel_case_types)] + pub type getPeaksCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getPeaksCall; + #[allow(non_camel_case_types)] + pub type getCountCall = crate::timehub_facade::itimehubfacade::ITimehubFacade::getCountCall; +} diff --git a/ipc-storage/sol-facade/crates/facade/src/machine_facade/imachinefacade.rs b/ipc-storage/sol-facade/crates/facade/src/machine_facade/imachinefacade.rs new file mode 100644 index 0000000000..5956ed1633 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/machine_facade/imachinefacade.rs @@ -0,0 +1,1871 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface IMachineFacade { + type Kind is uint8; + struct KeyValue { + string key; + string value; + } + struct Machine { + Kind kind; + address addr; + KeyValue[] metadata; + } + + event MachineCreated(uint8 indexed kind, address indexed owner, bytes metadata); + event MachineInitialized(uint8 indexed kind, address machineAddress); + + function createBucket() external returns (address); + function createBucket(address owner, KeyValue[] memory metadata) external returns (address); + function createBucket(address owner) external returns (address); + function listBuckets() external view returns (Machine[] memory); + function listBuckets(address owner) external view returns (Machine[] memory); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "createBucket", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "createBucket", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "createBucket", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "address", + "internalType": "address" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "listBuckets", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "tuple[]", + "internalType": "struct Machine[]", + "components": [ + { + "name": "kind", + "type": "uint8", + "internalType": "enum Kind" + }, + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "listBuckets", + "inputs": [ + { + "name": "owner", + "type": "address", + "internalType": "address" + } + ], + "outputs": [ + { + "name": "", + "type": "tuple[]", + "internalType": "struct Machine[]", + "components": [ + { + "name": "kind", + "type": "uint8", + "internalType": "enum Kind" + }, + { + "name": "addr", + "type": "address", + "internalType": "address" + }, + { + "name": "metadata", + "type": "tuple[]", + "internalType": "struct KeyValue[]", + "components": [ + { + "name": "key", + "type": "string", + "internalType": "string" + }, + { + "name": "value", + "type": "string", + "internalType": "string" + } + ] + } + ] + } + ], + "stateMutability": "view" + }, + { + "type": "event", + "name": "MachineCreated", + "inputs": [ + { + "name": "kind", + "type": "uint8", + "indexed": true, + "internalType": "uint8" + }, + { + "name": "owner", + "type": "address", + "indexed": true, + "internalType": "address" + }, + { + "name": "metadata", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + }, + { + "type": "event", + "name": "MachineInitialized", + "inputs": [ + { + "name": "kind", + "type": "uint8", + "indexed": true, + "internalType": "uint8" + }, + { + "name": "machineAddress", + "type": "address", + "indexed": false, + "internalType": "address" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod IMachineFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Kind(u8); + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for u8 { + #[inline] + fn stv_to_tokens( + &self, + ) -> <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'_> + { + alloy_sol_types::private::SolTypeValue::< + ::alloy_sol_types::sol_data::Uint<8>, + >::stv_to_tokens(self) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::tokenize(self).0 + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::abi_encode_packed_to(self, out) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::abi_encoded_size( + self, + ) + } + } + #[automatically_derived] + impl Kind { + /// The Solidity type name. + pub const NAME: &'static str = stringify!(@ name); + /// Convert from the underlying value type. + #[inline] + pub const fn from(value: u8) -> Self { + Self(value) + } + /// Return the underlying value. + #[inline] + pub const fn into(self) -> u8 { + self.0 + } + /// Return the single encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode(&self) -> alloy_sol_types::private::Vec { + ::abi_encode(&self.0) + } + /// Return the packed encoding of this value, delegating to the + /// underlying type. + #[inline] + pub fn abi_encode_packed(&self) -> alloy_sol_types::private::Vec { + ::abi_encode_packed(&self.0) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Kind { + type RustType = u8; + type Token<'a> = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = Self::NAME; + const ENCODED_SIZE: Option = + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + Self::type_check(token).is_ok() + } + #[inline] + fn type_check(token: &Self::Token<'_>) -> alloy_sol_types::Result<()> { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::type_check( + token, + ) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::SolType>::detokenize( + token, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Kind { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::topic_preimage_length(rust) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic_preimage(rust, out) + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + <::alloy_sol_types::sol_data::Uint<8> as alloy_sol_types::EventTopic>::encode_topic( + rust, + ) + } + } + }; + /**```solidity + struct KeyValue { string key; string value; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct KeyValue { + #[allow(missing_docs)] + pub key: ::alloy_sol_types::private::String, + #[allow(missing_docs)] + pub value: ::alloy_sol_types::private::String, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::String, + ::alloy_sol_types::sol_data::String, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::String, + ::alloy_sol_types::private::String, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: KeyValue) -> Self { + (value.key, value.value) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for KeyValue { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + key: tuple.0, + value: tuple.1, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for KeyValue { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for KeyValue { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.key, + ), + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::tokenize( + &self.value, + ), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for KeyValue { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for KeyValue { + const NAME: &'static str = "KeyValue"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed("KeyValue(string key,string value)") + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + alloy_sol_types::private::Vec::new() + } + #[inline] + fn eip712_encode_type() -> alloy_sol_types::private::Cow<'static, str> { + ::eip712_root_type() + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.key, + ) + .0, + <::alloy_sol_types::sol_data::String as alloy_sol_types::SolType>::eip712_data_word( + &self.value, + ) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for KeyValue { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.key, + ) + + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.value, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.key, + out, + ); + <::alloy_sol_types::sol_data::String as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.value, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**```solidity + struct Machine { Kind kind; address addr; KeyValue[] metadata; } + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct Machine { + #[allow(missing_docs)] + pub kind: ::RustType, + #[allow(missing_docs)] + pub addr: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + Kind, + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::RustType, + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: Machine) -> Self { + (value.kind, value.addr, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for Machine { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + kind: tuple.0, + addr: tuple.1, + metadata: tuple.2, + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolValue for Machine { + type SolType = Self; + } + #[automatically_derived] + impl alloy_sol_types::private::SolTypeValue for Machine { + #[inline] + fn stv_to_tokens(&self) -> ::Token<'_> { + ( + ::tokenize(&self.kind), + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.addr, + ), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn stv_abi_encoded_size(&self) -> usize { + if let Some(size) = ::ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encoded_size(&tuple) + } + #[inline] + fn stv_eip712_data_word(&self) -> alloy_sol_types::Word { + ::eip712_hash_struct(self) + } + #[inline] + fn stv_abi_encode_packed_to(&self, out: &mut alloy_sol_types::private::Vec) { + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_encode_packed_to( + &tuple, out, + ) + } + #[inline] + fn stv_abi_packed_encoded_size(&self) -> usize { + if let Some(size) = ::PACKED_ENCODED_SIZE { + return size; + } + let tuple = + as ::core::convert::From>::from(self.clone()); + as alloy_sol_types::SolType>::abi_packed_encoded_size( + &tuple, + ) + } + } + #[automatically_derived] + impl alloy_sol_types::SolType for Machine { + type RustType = Self; + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SOL_NAME: &'static str = ::NAME; + const ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::ENCODED_SIZE; + const PACKED_ENCODED_SIZE: Option = + as alloy_sol_types::SolType>::PACKED_ENCODED_SIZE; + #[inline] + fn valid_token(token: &Self::Token<'_>) -> bool { + as alloy_sol_types::SolType>::valid_token(token) + } + #[inline] + fn detokenize(token: Self::Token<'_>) -> Self::RustType { + let tuple = as alloy_sol_types::SolType>::detokenize(token); + >>::from(tuple) + } + } + #[automatically_derived] + impl alloy_sol_types::SolStruct for Machine { + const NAME: &'static str = "Machine"; + #[inline] + fn eip712_root_type() -> alloy_sol_types::private::Cow<'static, str> { + alloy_sol_types::private::Cow::Borrowed( + "Machine(uint8 kind,address addr,KeyValue[] metadata)", + ) + } + #[inline] + fn eip712_components( + ) -> alloy_sol_types::private::Vec> + { + let mut components = alloy_sol_types::private::Vec::with_capacity(1); + components.push(::eip712_root_type()); + components.extend(::eip712_components()); + components + } + #[inline] + fn eip712_encode_data(&self) -> alloy_sol_types::private::Vec { + [ + ::eip712_data_word(&self.kind).0, + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::eip712_data_word( + &self.addr, + ) + .0, + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::eip712_data_word(&self.metadata) + .0, + ] + .concat() + } + } + #[automatically_derived] + impl alloy_sol_types::EventTopic for Machine { + #[inline] + fn topic_preimage_length(rust: &Self::RustType) -> usize { + 0usize + + ::topic_preimage_length( + &rust.kind, + ) + + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.addr, + ) + + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::topic_preimage_length( + &rust.metadata, + ) + } + #[inline] + fn encode_topic_preimage( + rust: &Self::RustType, + out: &mut alloy_sol_types::private::Vec, + ) { + out.reserve(::topic_preimage_length(rust)); + ::encode_topic_preimage(&rust.kind, out); + <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.addr, + out, + ); + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::EventTopic>::encode_topic_preimage( + &rust.metadata, + out, + ); + } + #[inline] + fn encode_topic(rust: &Self::RustType) -> alloy_sol_types::abi::token::WordToken { + let mut out = alloy_sol_types::private::Vec::new(); + ::encode_topic_preimage(rust, &mut out); + alloy_sol_types::abi::token::WordToken(alloy_sol_types::private::keccak256(out)) + } + } + }; + /**Event with signature `MachineCreated(uint8,address,bytes)` and selector `0x78344973573899e5da988496ab97476b3702ecfca371c6b25a61460f989d40d1`. + ```solidity + event MachineCreated(uint8 indexed kind, address indexed owner, bytes metadata); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct MachineCreated { + #[allow(missing_docs)] + pub kind: u8, + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub metadata: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for MachineCreated { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<8>, + ::alloy_sol_types::sol_data::Address, + ); + const SIGNATURE: &'static str = "MachineCreated(uint8,address,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 120u8, 52u8, 73u8, 115u8, 87u8, 56u8, 153u8, 229u8, 218u8, 152u8, 132u8, 150u8, + 171u8, 151u8, 71u8, 107u8, 55u8, 2u8, 236u8, 252u8, 163u8, 113u8, 198u8, 178u8, + 90u8, 97u8, 70u8, 15u8, 152u8, 157u8, 64u8, 209u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + kind: topics.1, + owner: topics.2, + metadata: data.0, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.metadata, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + ( + Self::SIGNATURE_HASH.into(), + self.kind.clone(), + self.owner.clone(), + ) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic(&self.kind); + out[2usize] = <::alloy_sol_types::sol_data::Address as alloy_sol_types::EventTopic>::encode_topic( + &self.owner, + ); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for MachineCreated { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&MachineCreated> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &MachineCreated) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Event with signature `MachineInitialized(uint8,address)` and selector `0x8f7252642373d5f0b89a0c5cd9cd242e5cd5bb1a36aec623756e4f52a8c1ea6e`. + ```solidity + event MachineInitialized(uint8 indexed kind, address machineAddress); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct MachineInitialized { + #[allow(missing_docs)] + pub kind: u8, + #[allow(missing_docs)] + pub machineAddress: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for MachineInitialized { + type DataTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = ( + alloy_sol_types::sol_data::FixedBytes<32>, + ::alloy_sol_types::sol_data::Uint<8>, + ); + const SIGNATURE: &'static str = "MachineInitialized(uint8,address)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 143u8, 114u8, 82u8, 100u8, 35u8, 115u8, 213u8, 240u8, 184u8, 154u8, 12u8, 92u8, + 217u8, 205u8, 36u8, 46u8, 92u8, 213u8, 187u8, 26u8, 54u8, 174u8, 198u8, 35u8, + 117u8, 110u8, 79u8, 82u8, 168u8, 193u8, 234u8, 110u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + kind: topics.1, + machineAddress: data.0, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.machineAddress, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(), self.kind.clone()) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + out[1usize] = <::alloy_sol_types::sol_data::Uint< + 8, + > as alloy_sol_types::EventTopic>::encode_topic(&self.kind); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for MachineInitialized { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&MachineInitialized> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &MachineInitialized) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `createBucket()` and selector `0x4aa82ff5`. + ```solidity + function createBucket() external returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_0Call {} + ///Container type for the return parameters of the [`createBucket()`](createBucket_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_0Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createBucket_0Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = createBucket_0Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "createBucket()"; + const SELECTOR: [u8; 4] = [74u8, 168u8, 47u8, 245u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `createBucket(address,(string,string)[])` and selector `0xe129ed90`. + ```solidity + function createBucket(address owner, KeyValue[] memory metadata) external returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_1Call { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + #[allow(missing_docs)] + pub metadata: + ::alloy_sol_types::private::Vec<::RustType>, + } + ///Container type for the return parameters of the [`createBucket(address,(string,string)[])`](createBucket_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_1Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Address, + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_1Call) -> Self { + (value.owner, value.metadata) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + owner: tuple.0, + metadata: tuple.1, + } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createBucket_1Call { + type Parameters<'a> = ( + ::alloy_sol_types::sol_data::Address, + ::alloy_sol_types::sol_data::Array, + ); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = createBucket_1Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "createBucket(address,(string,string)[])"; + const SELECTOR: [u8; 4] = [225u8, 41u8, 237u8, 144u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + <::alloy_sol_types::sol_data::Array< + KeyValue, + > as alloy_sol_types::SolType>::tokenize(&self.metadata), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `createBucket(address)` and selector `0xf6d6c420`. + ```solidity + function createBucket(address owner) external returns (address); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_2Call { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`createBucket(address)`](createBucket_2Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct createBucket_2Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Address, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_2Call) -> Self { + (value.owner,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_2Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { owner: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: createBucket_2Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for createBucket_2Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for createBucket_2Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = createBucket_2Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Address,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "createBucket(address)"; + const SELECTOR: [u8; 4] = [246u8, 214u8, 196u8, 32u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `listBuckets()` and selector `0x63c244c2`. + ```solidity + function listBuckets() external view returns (Machine[] memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_0Call {} + ///Container type for the return parameters of the [`listBuckets()`](listBuckets_0Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_0Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_0Call) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_0Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Array,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_0Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_0Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for listBuckets_0Call { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = listBuckets_0Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Array,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "listBuckets()"; + const SELECTOR: [u8; 4] = [99u8, 194u8, 68u8, 194u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `listBuckets(address)` and selector `0xd120303f`. + ```solidity + function listBuckets(address owner) external view returns (Machine[] memory); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_1Call { + #[allow(missing_docs)] + pub owner: ::alloy_sol_types::private::Address, + } + ///Container type for the return parameters of the [`listBuckets(address)`](listBuckets_1Call) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct listBuckets_1Return { + #[allow(missing_docs)] + pub _0: ::alloy_sol_types::private::Vec<::RustType>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Address,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Address,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_1Call) -> Self { + (value.owner,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_1Call { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { owner: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Array,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = ( + ::alloy_sol_types::private::Vec<::RustType>, + ); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: listBuckets_1Return) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for listBuckets_1Return { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for listBuckets_1Call { + type Parameters<'a> = (::alloy_sol_types::sol_data::Address,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = listBuckets_1Return; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Array,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "listBuckets(address)"; + const SELECTOR: [u8; 4] = [209u8, 32u8, 48u8, 63u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Address as alloy_sol_types::SolType>::tokenize( + &self.owner, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`IMachineFacade`](self) function calls. + pub enum IMachineFacadeCalls { + #[allow(missing_docs)] + createBucket_0(createBucket_0Call), + #[allow(missing_docs)] + createBucket_1(createBucket_1Call), + #[allow(missing_docs)] + createBucket_2(createBucket_2Call), + #[allow(missing_docs)] + listBuckets_0(listBuckets_0Call), + #[allow(missing_docs)] + listBuckets_1(listBuckets_1Call), + } + #[automatically_derived] + impl IMachineFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [74u8, 168u8, 47u8, 245u8], + [99u8, 194u8, 68u8, 194u8], + [209u8, 32u8, 48u8, 63u8], + [225u8, 41u8, 237u8, 144u8], + [246u8, 214u8, 196u8, 32u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for IMachineFacadeCalls { + const NAME: &'static str = "IMachineFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 5usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::createBucket_0(_) => { + ::SELECTOR + } + Self::createBucket_1(_) => { + ::SELECTOR + } + Self::createBucket_2(_) => { + ::SELECTOR + } + Self::listBuckets_0(_) => ::SELECTOR, + Self::listBuckets_1(_) => ::SELECTOR, + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn createBucket_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::createBucket_0) + } + createBucket_0 + }, + { + fn listBuckets_0( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::listBuckets_0) + } + listBuckets_0 + }, + { + fn listBuckets_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::listBuckets_1) + } + listBuckets_1 + }, + { + fn createBucket_1( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::createBucket_1) + } + createBucket_1 + }, + { + fn createBucket_2( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw( + data, validate, + ) + .map(IMachineFacadeCalls::createBucket_2) + } + createBucket_2 + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::createBucket_0(inner) => { + ::abi_encoded_size(inner) + } + Self::createBucket_1(inner) => { + ::abi_encoded_size(inner) + } + Self::createBucket_2(inner) => { + ::abi_encoded_size(inner) + } + Self::listBuckets_0(inner) => { + ::abi_encoded_size(inner) + } + Self::listBuckets_1(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::createBucket_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::createBucket_1(inner) => { + ::abi_encode_raw(inner, out) + } + Self::createBucket_2(inner) => { + ::abi_encode_raw(inner, out) + } + Self::listBuckets_0(inner) => { + ::abi_encode_raw(inner, out) + } + Self::listBuckets_1(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`IMachineFacade`](self) events. + pub enum IMachineFacadeEvents { + #[allow(missing_docs)] + MachineCreated(MachineCreated), + #[allow(missing_docs)] + MachineInitialized(MachineInitialized), + } + #[automatically_derived] + impl IMachineFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[ + [ + 120u8, 52u8, 73u8, 115u8, 87u8, 56u8, 153u8, 229u8, 218u8, 152u8, 132u8, 150u8, + 171u8, 151u8, 71u8, 107u8, 55u8, 2u8, 236u8, 252u8, 163u8, 113u8, 198u8, 178u8, + 90u8, 97u8, 70u8, 15u8, 152u8, 157u8, 64u8, 209u8, + ], + [ + 143u8, 114u8, 82u8, 100u8, 35u8, 115u8, 213u8, 240u8, 184u8, 154u8, 12u8, 92u8, + 217u8, 205u8, 36u8, 46u8, 92u8, 213u8, 187u8, 26u8, 54u8, 174u8, 198u8, 35u8, + 117u8, 110u8, 79u8, 82u8, 168u8, 193u8, 234u8, 110u8, + ], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for IMachineFacadeEvents { + const NAME: &'static str = "IMachineFacadeEvents"; + const COUNT: usize = 2usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::MachineCreated) + } + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::MachineInitialized) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for IMachineFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::MachineCreated(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + Self::MachineInitialized(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::MachineCreated(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + Self::MachineInitialized(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/machine_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/machine_facade/mod.rs new file mode 100644 index 0000000000..d8129e4521 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/machine_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#imachinefacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/timehub_facade/itimehubfacade.rs b/ipc-storage/sol-facade/crates/facade/src/timehub_facade/itimehubfacade.rs new file mode 100644 index 0000000000..a84c7dd2a9 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/timehub_facade/itimehubfacade.rs @@ -0,0 +1,1103 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +/** + +Generated by the following Solidity interface... +```solidity +interface ITimehubFacade { + event EventPushed(uint256 index, uint256 timestamp, bytes cid); + + function getCount() external view returns (uint64); + function getLeafAt(uint64 index) external view returns (uint64 timestamp, bytes memory witnessed); + function getPeaks() external view returns (bytes[] memory cids); + function getRoot() external view returns (bytes memory cid); + function push(bytes memory cid) external returns (bytes memory root, uint64 index); +} +``` + +...which was generated by the following JSON ABI: +```json +[ + { + "type": "function", + "name": "getCount", + "inputs": [], + "outputs": [ + { + "name": "", + "type": "uint64", + "internalType": "uint64" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getLeafAt", + "inputs": [ + { + "name": "index", + "type": "uint64", + "internalType": "uint64" + } + ], + "outputs": [ + { + "name": "timestamp", + "type": "uint64", + "internalType": "uint64" + }, + { + "name": "witnessed", + "type": "bytes", + "internalType": "bytes" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getPeaks", + "inputs": [], + "outputs": [ + { + "name": "cids", + "type": "bytes[]", + "internalType": "bytes[]" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "getRoot", + "inputs": [], + "outputs": [ + { + "name": "cid", + "type": "bytes", + "internalType": "bytes" + } + ], + "stateMutability": "view" + }, + { + "type": "function", + "name": "push", + "inputs": [ + { + "name": "cid", + "type": "bytes", + "internalType": "bytes" + } + ], + "outputs": [ + { + "name": "root", + "type": "bytes", + "internalType": "bytes" + }, + { + "name": "index", + "type": "uint64", + "internalType": "uint64" + } + ], + "stateMutability": "nonpayable" + }, + { + "type": "event", + "name": "EventPushed", + "inputs": [ + { + "name": "index", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "timestamp", + "type": "uint256", + "indexed": false, + "internalType": "uint256" + }, + { + "name": "cid", + "type": "bytes", + "indexed": false, + "internalType": "bytes" + } + ], + "anonymous": false + } +] +```*/ +#[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style, + clippy::empty_structs_with_brackets +)] +pub mod ITimehubFacade { + use super::*; + use ::alloy_sol_types; + /// The creation / init bytecode of the contract. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /// The runtime bytecode of the contract, as deployed on the network. + /// + /// ```text + ///0x + /// ``` + #[rustfmt::skip] + #[allow(clippy::all)] + pub static DEPLOYED_BYTECODE: alloy_sol_types::private::Bytes = alloy_sol_types::private::Bytes::from_static( + b"", + ); + /**Event with signature `EventPushed(uint256,uint256,bytes)` and selector `0x9f2453a8c6b2912a42d606880c3eeaadcc940925c2af1349422a17b816155415`. + ```solidity + event EventPushed(uint256 index, uint256 timestamp, bytes cid); + ```*/ + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + #[derive(Clone)] + pub struct EventPushed { + #[allow(missing_docs)] + pub index: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub timestamp: ::alloy_sol_types::private::primitives::aliases::U256, + #[allow(missing_docs)] + pub cid: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + #[automatically_derived] + impl alloy_sol_types::SolEvent for EventPushed { + type DataTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Uint<256>, + ::alloy_sol_types::sol_data::Bytes, + ); + type DataToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + type TopicList = (alloy_sol_types::sol_data::FixedBytes<32>,); + const SIGNATURE: &'static str = "EventPushed(uint256,uint256,bytes)"; + const SIGNATURE_HASH: alloy_sol_types::private::B256 = + alloy_sol_types::private::B256::new([ + 159u8, 36u8, 83u8, 168u8, 198u8, 178u8, 145u8, 42u8, 66u8, 214u8, 6u8, 136u8, + 12u8, 62u8, 234u8, 173u8, 204u8, 148u8, 9u8, 37u8, 194u8, 175u8, 19u8, 73u8, + 66u8, 42u8, 23u8, 184u8, 22u8, 21u8, 84u8, 21u8, + ]); + const ANONYMOUS: bool = false; + #[allow(unused_variables)] + #[inline] + fn new( + topics: ::RustType, + data: as alloy_sol_types::SolType>::RustType, + ) -> Self { + Self { + index: data.0, + timestamp: data.1, + cid: data.2, + } + } + #[inline] + fn check_signature( + topics: &::RustType, + ) -> alloy_sol_types::Result<()> { + if topics.0 != Self::SIGNATURE_HASH { + return Err(alloy_sol_types::Error::invalid_event_signature_hash( + Self::SIGNATURE, + topics.0, + Self::SIGNATURE_HASH, + )); + } + Ok(()) + } + #[inline] + fn tokenize_body(&self) -> Self::DataToken<'_> { + ( + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.index, + ), + <::alloy_sol_types::sol_data::Uint<256> as alloy_sol_types::SolType>::tokenize( + &self.timestamp, + ), + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.cid, + ), + ) + } + #[inline] + fn topics(&self) -> ::RustType { + (Self::SIGNATURE_HASH.into(),) + } + #[inline] + fn encode_topics_raw( + &self, + out: &mut [alloy_sol_types::abi::token::WordToken], + ) -> alloy_sol_types::Result<()> { + if out.len() < ::COUNT { + return Err(alloy_sol_types::Error::Overrun); + } + out[0usize] = alloy_sol_types::abi::token::WordToken(Self::SIGNATURE_HASH); + Ok(()) + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for EventPushed { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + From::from(self) + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + From::from(&self) + } + } + #[automatically_derived] + impl From<&EventPushed> for alloy_sol_types::private::LogData { + #[inline] + fn from(this: &EventPushed) -> alloy_sol_types::private::LogData { + alloy_sol_types::SolEvent::encode_log_data(this) + } + } + }; + /**Function with signature `getCount()` and selector `0xa87d942c`. + ```solidity + function getCount() external view returns (uint64); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCountCall {} + ///Container type for the return parameters of the [`getCount()`](getCountCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getCountReturn { + #[allow(missing_docs)] + pub _0: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCountCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCountCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u64,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getCountReturn) -> Self { + (value._0,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getCountReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { _0: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getCountCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getCountReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getCount()"; + const SELECTOR: [u8; 4] = [168u8, 125u8, 148u8, 44u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getLeafAt(uint64)` and selector `0x19fa4966`. + ```solidity + function getLeafAt(uint64 index) external view returns (uint64 timestamp, bytes memory witnessed); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getLeafAtCall { + #[allow(missing_docs)] + pub index: u64, + } + ///Container type for the return parameters of the [`getLeafAt(uint64)`](getLeafAtCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getLeafAtReturn { + #[allow(missing_docs)] + pub timestamp: u64, + #[allow(missing_docs)] + pub witnessed: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u64,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getLeafAtCall) -> Self { + (value.index,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getLeafAtCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { index: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Bytes, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (u64, ::alloy_sol_types::private::Bytes); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getLeafAtReturn) -> Self { + (value.timestamp, value.witnessed) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getLeafAtReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + timestamp: tuple.0, + witnessed: tuple.1, + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getLeafAtCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Uint<64>,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getLeafAtReturn; + type ReturnTuple<'a> = ( + ::alloy_sol_types::sol_data::Uint<64>, + ::alloy_sol_types::sol_data::Bytes, + ); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getLeafAt(uint64)"; + const SELECTOR: [u8; 4] = [25u8, 250u8, 73u8, 102u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Uint<64> as alloy_sol_types::SolType>::tokenize( + &self.index, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getPeaks()` and selector `0x0ae06fba`. + ```solidity + function getPeaks() external view returns (bytes[] memory cids); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getPeaksCall {} + ///Container type for the return parameters of the [`getPeaks()`](getPeaksCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getPeaksReturn { + #[allow(missing_docs)] + pub cids: ::alloy_sol_types::private::Vec<::alloy_sol_types::private::Bytes>, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getPeaksCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getPeaksCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = + (::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Bytes>,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = + (::alloy_sol_types::private::Vec<::alloy_sol_types::private::Bytes>,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getPeaksReturn) -> Self { + (value.cids,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getPeaksReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { cids: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getPeaksCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getPeaksReturn; + type ReturnTuple<'a> = + (::alloy_sol_types::sol_data::Array<::alloy_sol_types::sol_data::Bytes>,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getPeaks()"; + const SELECTOR: [u8; 4] = [10u8, 224u8, 111u8, 186u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `getRoot()` and selector `0x5ca1e165`. + ```solidity + function getRoot() external view returns (bytes memory cid); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getRootCall {} + ///Container type for the return parameters of the [`getRoot()`](getRootCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct getRootReturn { + #[allow(missing_docs)] + pub cid: ::alloy_sol_types::private::Bytes, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getRootCall) -> Self { + () + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getRootCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self {} + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: getRootReturn) -> Self { + (value.cid,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for getRootReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { cid: tuple.0 } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for getRootCall { + type Parameters<'a> = (); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = getRootReturn; + type ReturnTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "getRoot()"; + const SELECTOR: [u8; 4] = [92u8, 161u8, 225u8, 101u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + () + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + /**Function with signature `push(bytes)` and selector `0x7dacda03`. + ```solidity + function push(bytes memory cid) external returns (bytes memory root, uint64 index); + ```*/ + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct pushCall { + #[allow(missing_docs)] + pub cid: ::alloy_sol_types::private::Bytes, + } + ///Container type for the return parameters of the [`push(bytes)`](pushCall) function. + #[allow(non_camel_case_types, non_snake_case, clippy::pub_underscore_fields)] + #[derive(Clone)] + pub struct pushReturn { + #[allow(missing_docs)] + pub root: ::alloy_sol_types::private::Bytes, + #[allow(missing_docs)] + pub index: u64, + } + #[allow( + non_camel_case_types, + non_snake_case, + clippy::pub_underscore_fields, + clippy::style + )] + const _: () = { + use ::alloy_sol_types; + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = (::alloy_sol_types::sol_data::Bytes,); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes,); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: pushCall) -> Self { + (value.cid,) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for pushCall { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { cid: tuple.0 } + } + } + } + { + #[doc(hidden)] + type UnderlyingSolTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Uint<64>, + ); + #[doc(hidden)] + type UnderlyingRustTuple<'a> = (::alloy_sol_types::private::Bytes, u64); + #[cfg(test)] + #[allow(dead_code, unreachable_patterns)] + fn _type_assertion(_t: alloy_sol_types::private::AssertTypeEq) { + match _t { + alloy_sol_types::private::AssertTypeEq::< + ::RustType, + >(_) => {} + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From for UnderlyingRustTuple<'_> { + fn from(value: pushReturn) -> Self { + (value.root, value.index) + } + } + #[automatically_derived] + #[doc(hidden)] + impl ::core::convert::From> for pushReturn { + fn from(tuple: UnderlyingRustTuple<'_>) -> Self { + Self { + root: tuple.0, + index: tuple.1, + } + } + } + } + #[automatically_derived] + impl alloy_sol_types::SolCall for pushCall { + type Parameters<'a> = (::alloy_sol_types::sol_data::Bytes,); + type Token<'a> = as alloy_sol_types::SolType>::Token<'a>; + type Return = pushReturn; + type ReturnTuple<'a> = ( + ::alloy_sol_types::sol_data::Bytes, + ::alloy_sol_types::sol_data::Uint<64>, + ); + type ReturnToken<'a> = as alloy_sol_types::SolType>::Token<'a>; + const SIGNATURE: &'static str = "push(bytes)"; + const SELECTOR: [u8; 4] = [125u8, 172u8, 218u8, 3u8]; + #[inline] + fn new<'a>( + tuple: as alloy_sol_types::SolType>::RustType, + ) -> Self { + tuple.into() + } + #[inline] + fn tokenize(&self) -> Self::Token<'_> { + ( + <::alloy_sol_types::sol_data::Bytes as alloy_sol_types::SolType>::tokenize( + &self.cid, + ), + ) + } + #[inline] + fn abi_decode_returns( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + as alloy_sol_types::SolType>::abi_decode_sequence( + data, validate, + ) + .map(Into::into) + } + } + }; + ///Container for all the [`ITimehubFacade`](self) function calls. + pub enum ITimehubFacadeCalls { + #[allow(missing_docs)] + getCount(getCountCall), + #[allow(missing_docs)] + getLeafAt(getLeafAtCall), + #[allow(missing_docs)] + getPeaks(getPeaksCall), + #[allow(missing_docs)] + getRoot(getRootCall), + #[allow(missing_docs)] + push(pushCall), + } + #[automatically_derived] + impl ITimehubFacadeCalls { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 4usize]] = &[ + [10u8, 224u8, 111u8, 186u8], + [25u8, 250u8, 73u8, 102u8], + [92u8, 161u8, 225u8, 101u8], + [125u8, 172u8, 218u8, 3u8], + [168u8, 125u8, 148u8, 44u8], + ]; + } + #[automatically_derived] + impl alloy_sol_types::SolInterface for ITimehubFacadeCalls { + const NAME: &'static str = "ITimehubFacadeCalls"; + const MIN_DATA_LENGTH: usize = 0usize; + const COUNT: usize = 5usize; + #[inline] + fn selector(&self) -> [u8; 4] { + match self { + Self::getCount(_) => ::SELECTOR, + Self::getLeafAt(_) => ::SELECTOR, + Self::getPeaks(_) => ::SELECTOR, + Self::getRoot(_) => ::SELECTOR, + Self::push(_) => ::SELECTOR, + } + } + #[inline] + fn selector_at(i: usize) -> ::core::option::Option<[u8; 4]> { + Self::SELECTORS.get(i).copied() + } + #[inline] + fn valid_selector(selector: [u8; 4]) -> bool { + Self::SELECTORS.binary_search(&selector).is_ok() + } + #[inline] + #[allow(non_snake_case)] + fn abi_decode_raw( + selector: [u8; 4], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + static DECODE_SHIMS: &[fn( + &[u8], + bool, + ) + -> alloy_sol_types::Result] = &[ + { + fn getPeaks( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getPeaks) + } + getPeaks + }, + { + fn getLeafAt( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getLeafAt) + } + getLeafAt + }, + { + fn getRoot( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getRoot) + } + getRoot + }, + { + fn push( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::push) + } + push + }, + { + fn getCount( + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + ::abi_decode_raw(data, validate) + .map(ITimehubFacadeCalls::getCount) + } + getCount + }, + ]; + let Ok(idx) = Self::SELECTORS.binary_search(&selector) else { + return Err(alloy_sol_types::Error::unknown_selector( + ::NAME, + selector, + )); + }; + DECODE_SHIMS[idx](data, validate) + } + #[inline] + fn abi_encoded_size(&self) -> usize { + match self { + Self::getCount(inner) => { + ::abi_encoded_size(inner) + } + Self::getLeafAt(inner) => { + ::abi_encoded_size(inner) + } + Self::getPeaks(inner) => { + ::abi_encoded_size(inner) + } + Self::getRoot(inner) => { + ::abi_encoded_size(inner) + } + Self::push(inner) => { + ::abi_encoded_size(inner) + } + } + } + #[inline] + fn abi_encode_raw(&self, out: &mut alloy_sol_types::private::Vec) { + match self { + Self::getCount(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getLeafAt(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getPeaks(inner) => { + ::abi_encode_raw(inner, out) + } + Self::getRoot(inner) => { + ::abi_encode_raw(inner, out) + } + Self::push(inner) => { + ::abi_encode_raw(inner, out) + } + } + } + } + ///Container for all the [`ITimehubFacade`](self) events. + pub enum ITimehubFacadeEvents { + #[allow(missing_docs)] + EventPushed(EventPushed), + } + #[automatically_derived] + impl ITimehubFacadeEvents { + /// All the selectors of this enum. + /// + /// Note that the selectors might not be in the same order as the variants. + /// No guarantees are made about the order of the selectors. + /// + /// Prefer using `SolInterface` methods instead. + pub const SELECTORS: &'static [[u8; 32usize]] = &[[ + 159u8, 36u8, 83u8, 168u8, 198u8, 178u8, 145u8, 42u8, 66u8, 214u8, 6u8, 136u8, 12u8, + 62u8, 234u8, 173u8, 204u8, 148u8, 9u8, 37u8, 194u8, 175u8, 19u8, 73u8, 66u8, 42u8, + 23u8, 184u8, 22u8, 21u8, 84u8, 21u8, + ]]; + } + #[automatically_derived] + impl alloy_sol_types::SolEventInterface for ITimehubFacadeEvents { + const NAME: &'static str = "ITimehubFacadeEvents"; + const COUNT: usize = 1usize; + fn decode_raw_log( + topics: &[alloy_sol_types::Word], + data: &[u8], + validate: bool, + ) -> alloy_sol_types::Result { + match topics.first().copied() { + Some(::SIGNATURE_HASH) => { + ::decode_raw_log( + topics, data, validate, + ) + .map(Self::EventPushed) + } + _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { + name: ::NAME, + log: alloy_sol_types::private::Box::new( + alloy_sol_types::private::LogData::new_unchecked( + topics.to_vec(), + data.to_vec().into(), + ), + ), + }), + } + } + } + #[automatically_derived] + impl alloy_sol_types::private::IntoLogData for ITimehubFacadeEvents { + fn to_log_data(&self) -> alloy_sol_types::private::LogData { + match self { + Self::EventPushed(inner) => { + alloy_sol_types::private::IntoLogData::to_log_data(inner) + } + } + } + fn into_log_data(self) -> alloy_sol_types::private::LogData { + match self { + Self::EventPushed(inner) => { + alloy_sol_types::private::IntoLogData::into_log_data(inner) + } + } + } + } +} diff --git a/ipc-storage/sol-facade/crates/facade/src/timehub_facade/mod.rs b/ipc-storage/sol-facade/crates/facade/src/timehub_facade/mod.rs new file mode 100644 index 0000000000..4a4ebc0668 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/timehub_facade/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#![allow(unused_imports, clippy::all, rustdoc::all)] +//! This module contains the sol! generated bindings for solidity contracts. +//! This is autogenerated code. +//! Do not manually edit these files. +//! These files may be overwritten by the codegen system at any time. +pub mod r#itimehubfacade; diff --git a/ipc-storage/sol-facade/crates/facade/src/types.rs b/ipc-storage/sol-facade/crates/facade/src/types.rs new file mode 100644 index 0000000000..5ee51bf8d4 --- /dev/null +++ b/ipc-storage/sol-facade/crates/facade/src/types.rs @@ -0,0 +1,170 @@ +// Copyright 2022-2024 Protocol Labs +// Copyright 2025 Recall Contributors +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt; + +use alloy_primitives::{Sign, I256, U256}; +use anyhow::anyhow; +use fvm_shared::{ + address::{Address as FvmAddress, Payload}, + bigint::{BigInt, BigUint, Sign as BigSign}, + econ::TokenAmount, + ActorID, +}; + +pub use alloy_primitives::Address; +pub use alloy_sol_types::SolCall; +pub use alloy_sol_types::SolInterface; + +const EAM_ACTOR_ID: ActorID = 10; + +/// Fixed-size uninterpreted hash type with 20 bytes (160 bits) size. +#[derive(Default)] +pub struct H160([u8; 20]); + +impl H160 { + pub fn from_slice(slice: &[u8]) -> Self { + if slice.len() != 20 { + panic!("slice length must be exactly 20 bytes"); + } + let mut buf = [0u8; 20]; + buf.copy_from_slice(slice); + H160(buf) + } + + pub fn from_actor_id(id: ActorID) -> Self { + let mut buf = [0u8; 20]; + buf[0] = 0xff; + buf[12..].copy_from_slice(&id.to_be_bytes()); + H160(buf) + } + + pub fn to_fixed_bytes(&self) -> [u8; 20] { + self.0 + } + + /// Return true if it is a "0x00" address. + pub fn is_null(&self) -> bool { + self.0 == [0; 20] + } + + pub fn as_option(&self) -> Option { + if self.is_null() { + None + } else { + Some(H160(self.0)) + } + } +} + +impl TryFrom<&[u8]> for H160 { + type Error = anyhow::Error; + fn try_from(slice: &[u8]) -> Result { + if slice.len() != 20 { + return Err(anyhow!("slice length must be exactly 20 bytes")); + } + let mut buf = [0u8; 20]; + buf.copy_from_slice(slice); + Ok(H160(buf)) + } +} + +impl fmt::Debug for H160 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "H160({:?})", &self.0) + } +} + +impl TryFrom for H160 { + type Error = anyhow::Error; + + fn try_from(value: FvmAddress) -> Result { + match value.payload() { + Payload::Delegated(d) + if d.namespace() == EAM_ACTOR_ID && d.subaddress().len() == 20 => + { + Ok(H160::from_slice(d.subaddress())) + } + Payload::ID(id) => Ok(H160::from_actor_id(*id)), + _ => Err(anyhow!("not an evm address: {}", value)), + } + } +} + +impl From for FvmAddress { + fn from(value: H160) -> Self { + // Copied from fil_actors_evm_shared + let bytes = value.to_fixed_bytes(); + if bytes[0] == 0xff && bytes[1..12].iter().all(|&b| b == 0x00) { + let id = u64::from_be_bytes(bytes[12..].try_into().unwrap()); + FvmAddress::new_id(id) + } else { + FvmAddress::new_delegated(EAM_ACTOR_ID, bytes.as_slice()).unwrap() + } + } +} + +impl From
for H160 { + fn from(address: Address) -> Self { + H160::from_slice(address.as_ref()) + } +} + +impl From for Address { + fn from(value: H160) -> Self { + Address::from(value.to_fixed_bytes()) + } +} + +#[derive(Default)] +pub struct BigUintWrapper(pub BigUint); + +impl From for BigUintWrapper { + fn from(value: TokenAmount) -> Self { + let signed: BigInt = value.atto().clone(); + let unsigned = signed.to_biguint().unwrap_or_default(); + BigUintWrapper(unsigned) + } +} + +impl From for BigUintWrapper { + fn from(value: U256) -> Self { + BigUintWrapper(BigUint::from_bytes_be( + &value.to_be_bytes::<{ U256::BYTES }>(), + )) + } +} + +impl From for TokenAmount { + fn from(value: BigUintWrapper) -> Self { + TokenAmount::from_atto(value.0) + } +} + +impl From for U256 { + fn from(value: BigUintWrapper) -> Self { + let digits = value.0.to_u64_digits(); + match U256::overflowing_from_limbs_slice(&digits) { + (n, false) => n, + (_, true) => U256::MAX, + } + } +} + +pub struct BigIntWrapper(pub BigInt); + +impl From for I256 { + fn from(value: BigIntWrapper) -> Self { + let (sign, digits) = value.0.to_u64_digits(); + let sign = match sign { + BigSign::Minus => Sign::Negative, + BigSign::NoSign | BigSign::Plus => Sign::Positive, + }; + let uint = U256::saturating_from_limbs_slice(&digits); + match I256::overflowing_from_sign_and_abs(sign, uint) { + (n, false) => n, + (_, true) => I256::MAX, + } + } +} diff --git a/ipc/provider/src/config/mod.rs b/ipc/provider/src/config/mod.rs index cbb9810995..baa4a9ea3b 100644 --- a/ipc/provider/src/config/mod.rs +++ b/ipc/provider/src/config/mod.rs @@ -67,8 +67,7 @@ impl Config { ) })?; - let config: Config = - Config::from_toml_str(contents.as_str()).context("failed to parse config TOML")?; + let config: Config = Config::from_toml_str(contents.as_str())?; Ok(config) } diff --git a/ipc/rust-toolchain.toml b/ipc/rust-toolchain.toml deleted file mode 100644 index a59cf37c5f..0000000000 --- a/ipc/rust-toolchain.toml +++ /dev/null @@ -1,4 +0,0 @@ -[toolchain] -channel = "stable" -components = ["clippy", "llvm-tools", "rustfmt"] -targets = ["wasm32-unknown-unknown"] diff --git a/patches/netwatch/.cargo-ok b/patches/netwatch/.cargo-ok new file mode 100644 index 0000000000..5f8b795830 --- /dev/null +++ b/patches/netwatch/.cargo-ok @@ -0,0 +1 @@ +{"v":1} \ No newline at end of file diff --git a/patches/netwatch/.cargo_vcs_info.json b/patches/netwatch/.cargo_vcs_info.json new file mode 100644 index 0000000000..412c60033f --- /dev/null +++ b/patches/netwatch/.cargo_vcs_info.json @@ -0,0 +1,6 @@ +{ + "git": { + "sha1": "c17eb28b7d4bdd8d206a84c9d746f53c18c1f8a5" + }, + "path_in_vcs": "netwatch" +} \ No newline at end of file diff --git a/patches/netwatch/Cargo.lock b/patches/netwatch/Cargo.lock new file mode 100644 index 0000000000..0ba491c770 --- /dev/null +++ b/patches/netwatch/Cargo.lock @@ -0,0 +1,2003 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "anymap2" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d301b3b94cb4b2f23d7917810addbbaff90738e0ca2be692bd027e70d7e0330c" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" +dependencies = [ + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + +[[package]] +name = "cordyceps" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0392f465ceba1713d30708f61c160ebf4dc1cf86bb166039d16b11ad4f3b5b6" +dependencies = [ + "loom", + "tracing", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", + "unicode-xid", +] + +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + +[[package]] +name = "dlopen2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" +dependencies = [ + "libc", + "once_cell", + "winapi", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-buffered" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe940397c8b744b9c2c974791c2c08bca2c3242ce0290393249e98f215a00472" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "gloo" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28999cda5ef6916ffd33fb4a7b87e1de633c47c0dc6d97905fee1cdaa142b94d" +dependencies = [ + "gloo-console", + "gloo-dialogs", + "gloo-events", + "gloo-file", + "gloo-history", + "gloo-net", + "gloo-render", + "gloo-storage", + "gloo-timers", + "gloo-utils", + "gloo-worker", +] + +[[package]] +name = "gloo-console" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b7ce3c05debe147233596904981848862b068862e9ec3e34be446077190d3f" +dependencies = [ + "gloo-utils", + "js-sys", + "serde", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-dialogs" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67062364ac72d27f08445a46cab428188e2e224ec9e37efdba48ae8c289002e6" +dependencies = [ + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-events" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68b107f8abed8105e4182de63845afcc7b69c098b7852a813ea7462a320992fc" +dependencies = [ + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-file" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d5564e570a38b43d78bdc063374a0c3098c4f0d64005b12f9bbe87e869b6d7" +dependencies = [ + "gloo-events", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-history" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85725d90bf0ed47063b3930ef28e863658a7905989e9929a8708aab74a1d5e7f" +dependencies = [ + "gloo-events", + "gloo-utils", + "serde", + "serde-wasm-bindgen", + "serde_urlencoded", + "thiserror 1.0.69", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-net" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a66b4e3c7d9ed8d315fd6b97c8b1f74a7c6ecbbc2320e65ae7ed38b7068cc620" +dependencies = [ + "futures-channel", + "futures-core", + "futures-sink", + "gloo-utils", + "http", + "js-sys", + "pin-project", + "serde", + "serde_json", + "thiserror 1.0.69", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "gloo-render" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd9306aef67cfd4449823aadcd14e3958e0800aa2183955a309112a84ec7764" +dependencies = [ + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-storage" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6ab60bf5dbfd6f0ed1f7843da31b41010515c745735c970e821945ca91e480" +dependencies = [ + "gloo-utils", + "js-sys", + "serde", + "serde_json", + "thiserror 1.0.69", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "gloo-utils" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "gloo-worker" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13471584da78061a28306d1359dd0178d8d6fc1c7c80e5e35d27260346e0516a" +dependencies = [ + "anymap2", + "bincode", + "gloo-console", + "gloo-utils", + "js-sys", + "serde", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core 0.61.0", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iroh-quinn-udp" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c53afaa1049f7c83ea1331f5ebb9e6ebc5fdd69c468b7a22dd598b02c9bcc973" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.172" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minicov" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.52.0", +] + +[[package]] +name = "n0-future" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bb0e5d99e681ab3c938842b96fcb41bf8a7bb4bfdb11ccbd653a7e83e06c794" +dependencies = [ + "cfg_aliases", + "derive_more", + "futures-buffered", + "futures-lite", + "futures-util", + "js-sys", + "pin-project", + "send_wrapper", + "tokio", + "tokio-util", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-time", +] + +[[package]] +name = "nested_enum_utils" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43fa9161ed44d30e9702fe42bd78693bceac0fed02f647da749f36109023d3a3" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "netdev" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f901362e84cd407be6f8cd9d3a46bccf09136b095792785401ea7d283c79b91d" +dependencies = [ + "dlopen2", + "ipnet", + "libc", + "netlink-packet-core", + "netlink-packet-route 0.17.1", + "netlink-sys", + "once_cell", + "system-configuration", + "windows-sys 0.52.0", +] + +[[package]] +name = "netlink-packet-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +dependencies = [ + "anyhow", + "byteorder", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0800eae8638a299eaa67476e1c6b6692922273e0f7939fd188fc861c837b9cd2" +dependencies = [ + "anyhow", + "bitflags 2.9.0", + "byteorder", + "libc", + "log", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror 1.0.69", +] + +[[package]] +name = "netlink-proto" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror 2.0.12", +] + +[[package]] +name = "netlink-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" +dependencies = [ + "bytes", + "futures", + "libc", + "log", + "tokio", +] + +[[package]] +name = "netwatch" +version = "0.5.0" +dependencies = [ + "atomic-waker", + "bytes", + "cfg_aliases", + "derive_more", + "iroh-quinn-udp", + "js-sys", + "libc", + "n0-future", + "nested_enum_utils", + "netdev", + "netlink-packet-core", + "netlink-packet-route 0.23.0", + "netlink-proto", + "netlink-sys", + "serde", + "snafu", + "socket2", + "testresult", + "time", + "tokio", + "tokio-util", + "tracing", + "tracing-subscriber", + "tracing-subscriber-wasm", + "wasm-bindgen-test", + "web-sys", + "windows 0.59.0", + "windows-result 0.3.2", + "wmi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-wasm-bindgen" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3b143e2833c57ab9ad3ea280d21fd34e285a42837aeb0ee301f4f41890fa00e" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" + +[[package]] +name = "snafu" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "223891c85e2a29c3fe8fb900c1fae5e69c2e42415e3177752e8718475efa5019" +dependencies = [ + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "socket2" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.0", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "testresult" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614b328ff036a4ef882c61570f72918f7e9c5bee1da33f8e7f91e01daee7e56c" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "num-conv", + "powerfmt", + "serde", + "time-core", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "tokio" +version = "1.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "tokio-util" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "futures-util", + "hashbrown 0.14.5", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" + +[[package]] +name = "toml_edit" +version = "0.22.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +dependencies = [ + "indexmap", + "toml_datetime", + "winnow", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "tracing-subscriber-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79804e80980173c6c8e53d98508eb24a2dbc4ee17a3e8d2ca8e5bad6bf13a898" +dependencies = [ + "gloo", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.100", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +dependencies = [ + "js-sys", + "minicov", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f919aee0a93304be7f62e8e5027811bbba96bcb1de84d6618be56e43f8a32a1" +dependencies = [ + "windows-core 0.59.0", + "windows-targets 0.53.0", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings 0.1.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "810ce18ed2112484b0d4e15d022e5f598113e220c53e373fb31e67e21670c1ce" +dependencies = [ + "windows-implement 0.59.0", + "windows-interface 0.59.1", + "windows-result 0.3.2", + "windows-strings 0.3.1", + "windows-targets 0.53.0", +] + +[[package]] +name = "windows-core" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +dependencies = [ + "windows-implement 0.60.0", + "windows-interface 0.59.1", + "windows-link", + "windows-result 0.3.2", + "windows-strings 0.4.0", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-implement" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63d3fcd9bba44b03821e7d699eeee959f3126dcc4aa8e4ae18ec617c2a5cea10" +dependencies = [ + "memchr", +] + +[[package]] +name = "wmi" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7787dacdd8e71cbc104658aade4009300777f9b5fda6a75f19145fedb8a18e71" +dependencies = [ + "chrono", + "futures", + "log", + "serde", + "thiserror 2.0.12", + "windows 0.59.0", + "windows-core 0.59.0", +] diff --git a/patches/netwatch/Cargo.toml b/patches/netwatch/Cargo.toml new file mode 100644 index 0000000000..64a392179b --- /dev/null +++ b/patches/netwatch/Cargo.toml @@ -0,0 +1,201 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.81" +name = "netwatch" +version = "0.5.0" +authors = ["n0 team"] +build = "build.rs" +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Cross-platform monitoring for network interface changes" +readme = "README.md" +keywords = [ + "networking", + "interfaces", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/n0-computer/net-tools" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "iroh_docsrs", +] + +[lib] +name = "netwatch" +path = "src/lib.rs" + +[[test]] +name = "smoke" +path = "tests/smoke.rs" + +[dependencies.atomic-waker] +version = "1.1.2" + +[dependencies.bytes] +version = "1.7" + +[dependencies.n0-future] +version = "0.1.3" + +[dependencies.nested_enum_utils] +version = "0.2.0" + +[dependencies.snafu] +version = "0.8.5" + +[dependencies.time] +version = "0.3.20" + +[dependencies.tokio] +version = "1" +features = [ + "io-util", + "macros", + "sync", + "time", +] + +[dependencies.tokio-util] +version = "0.7" +features = ["rt"] + +[dependencies.tracing] +version = "0.1" + +[dev-dependencies.testresult] +version = "0.4.1" + +[dev-dependencies.tracing-subscriber] +version = "0.3" +features = ["env-filter"] + +[build-dependencies.cfg_aliases] +version = "0.2.1" + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.derive_more] +version = "1.0.0" +features = ["display"] + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.js-sys] +version = "0.3" + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies.web-sys] +version = "0.3.70" +features = [ + "EventListener", + "EventTarget", +] + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dev-dependencies.tracing-subscriber-wasm] +version = "0.1.0" + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dev-dependencies.wasm-bindgen-test] +version = "0.3" + +[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies.netlink-packet-core] +version = "0.7.0" + +[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies.netlink-packet-route] +version = "0.23.0" + +[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies.netlink-proto] +version = "0.11.5" + +[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies.netlink-sys] +version = "0.8.7" + +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies.libc] +version = "0.2.139" + +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies.netdev] +version = "0.31.0" + +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies.quinn-udp] +version = "0.5.5" +package = "iroh-quinn-udp" + +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies.socket2] +version = "0.5.3" + +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies.tokio] +version = "1" +features = [ + "io-util", + "macros", + "sync", + "rt", + "net", + "fs", + "io-std", + "signal", + "process", + "time", +] + +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dev-dependencies.tokio] +version = "1" +features = [ + "io-util", + "sync", + "rt", + "net", + "fs", + "macros", + "time", + "test-util", +] + +[target.'cfg(target_os = "android")'.dependencies.derive_more] +version = "1.0.0" +features = ["display"] + +[target.'cfg(target_os = "windows")'.dependencies.derive_more] +version = "1.0.0" +features = ["debug"] + +[target.'cfg(target_os = "windows")'.dependencies.serde] +version = "1" +features = ["derive"] + +[target.'cfg(target_os = "windows")'.dependencies.windows] +version = "0.59" +features = [ + "Win32_NetworkManagement_IpHelper", + "Win32_Foundation", + "Win32_NetworkManagement_Ndis", + "Win32_Networking_WinSock", +] + +[target.'cfg(target_os = "windows")'.dependencies.windows-result] +version = "0.3" + +[target.'cfg(target_os = "windows")'.dependencies.wmi] +version = "0.14" + +[lints.clippy] +unused-async = "warn" + +[lints.rust] +missing_debug_implementations = "warn" + +[lints.rust.unexpected_cfgs] +level = "warn" +priority = 0 +check-cfg = ["cfg(iroh_docsrs)"] diff --git a/patches/netwatch/Cargo.toml.orig b/patches/netwatch/Cargo.toml.orig new file mode 100644 index 0000000000..2925b87429 --- /dev/null +++ b/patches/netwatch/Cargo.toml.orig @@ -0,0 +1,102 @@ +[package] +name = "netwatch" +version = "0.5.0" +readme = "README.md" +description = "Cross-platform monitoring for network interface changes" +license = "MIT OR Apache-2.0" +authors = ["n0 team"] +repository = "https://github.com/n0-computer/net-tools" +keywords = ["networking", "interfaces"] +edition = "2021" + +# Sadly this also needs to be updated in .github/workflows/ci.yml +rust-version = "1.81" + +[lints] +workspace = true + +[dependencies] +atomic-waker = "1.1.2" +bytes = "1.7" +n0-future = "0.1.3" +nested_enum_utils = "0.2.0" +snafu = "0.8.5" +time = "0.3.20" +tokio = { version = "1", features = [ + "io-util", + "macros", + "sync", + "time", +] } +tokio-util = { version = "0.7", features = ["rt"] } +tracing = "0.1" + +# non-browser dependencies +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies] +quinn-udp = { package = "iroh-quinn-udp", version = "0.5.5" } +libc = "0.2.139" +netdev = "0.31.0" +socket2 = "0.5.3" +tokio = { version = "1", features = [ + "io-util", + "macros", + "sync", + "rt", + "net", + "fs", + "io-std", + "signal", + "process", + "time", +] } + +[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies] +netlink-packet-route = "0.23.0" +netlink-packet-core = "0.7.0" +netlink-proto = "0.11.5" +netlink-sys = "0.8.7" + +[target.'cfg(target_os = "android")'.dependencies] +derive_more = { version = "1.0.0", features = ["display"] } + +[target.'cfg(target_os = "windows")'.dependencies] +wmi = "0.14" +windows = { version = "0.59", features = ["Win32_NetworkManagement_IpHelper", "Win32_Foundation", "Win32_NetworkManagement_Ndis", "Win32_Networking_WinSock"] } +windows-result = "0.3" +serde = { version = "1", features = ["derive"] } +derive_more = { version = "1.0.0", features = ["debug"] } + +# wasm-in-browser dependencies +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] +derive_more = { version = "1.0.0", features = ["display"] } +js-sys = "0.3" +web-sys = { version = "0.3.70", features = ["EventListener", "EventTarget"] } + +[dev-dependencies] +testresult = "0.4.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +# *non*-wasm-in-browser test/dev dependencies +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dev-dependencies] +tokio = { version = "1", features = [ + "io-util", + "sync", + "rt", + "net", + "fs", + "macros", + "time", + "test-util", +] } + +# wasm-in-browser test/dev dependencies +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dev-dependencies] +tracing-subscriber-wasm = "0.1.0" +wasm-bindgen-test = "0.3" + +[build-dependencies] +cfg_aliases = { version = "0.2.1" } + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "iroh_docsrs"] diff --git a/patches/netwatch/README.md b/patches/netwatch/README.md new file mode 100644 index 0000000000..e0c8f39b05 --- /dev/null +++ b/patches/netwatch/README.md @@ -0,0 +1,24 @@ +# Netwatch + +`netwatch` is a cross-platform library for monitoring of networking interfaces +and route changes. + +Used in [iroh](https://github.com/n0-computer/iroh), created with love by the +[n0 team](https://n0.computer/). + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in this project by you, as defined in the Apache-2.0 license, +shall be dual licensed as above, without any additional terms or conditions. diff --git a/patches/netwatch/build.rs b/patches/netwatch/build.rs new file mode 100644 index 0000000000..39db4fa856 --- /dev/null +++ b/patches/netwatch/build.rs @@ -0,0 +1,11 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use cfg_aliases::cfg_aliases; + +fn main() { + // Setup cfg aliases + cfg_aliases! { + // Convenience aliases + wasm_browser: { all(target_family = "wasm", target_os = "unknown") }, + } +} diff --git a/patches/netwatch/src/interfaces.rs b/patches/netwatch/src/interfaces.rs new file mode 100644 index 0000000000..fb8f77b01d --- /dev/null +++ b/patches/netwatch/src/interfaces.rs @@ -0,0 +1,412 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Contains helpers for looking up system network interfaces. + +use std::{collections::HashMap, fmt, net::IpAddr}; + +#[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" +))] +pub(super) mod bsd; +#[cfg(any(target_os = "linux", target_os = "android"))] +mod linux; +#[cfg(target_os = "windows")] +mod windows; + +pub(crate) use netdev::ipnet::{Ipv4Net, Ipv6Net}; + +#[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" +))] +use self::bsd::default_route; +#[cfg(any(target_os = "linux", target_os = "android"))] +use self::linux::default_route; +#[cfg(target_os = "windows")] +use self::windows::default_route; +use crate::ip::{is_private_v6, is_up}; + +/// Represents a network interface. +#[derive(Debug)] +pub struct Interface { + iface: netdev::interface::Interface, +} + +impl fmt::Display for Interface { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}. {} {:?} ipv4={:?} ipv6={:?}", + self.iface.index, self.iface.name, self.iface.if_type, self.iface.ipv4, self.iface.ipv6 + ) + } +} + +impl PartialEq for Interface { + fn eq(&self, other: &Self) -> bool { + self.iface.index == other.iface.index + && self.iface.name == other.iface.name + && self.iface.flags == other.iface.flags + && self.iface.mac_addr.as_ref().map(|a| a.octets()) + == other.iface.mac_addr.as_ref().map(|a| a.octets()) + } +} + +impl Eq for Interface {} + +impl Interface { + /// Is this interface up? + pub(crate) fn is_up(&self) -> bool { + is_up(&self.iface) + } + + /// The name of the interface. + pub(crate) fn name(&self) -> &str { + &self.iface.name + } + + /// A list of all ip addresses of this interface. + pub fn addrs(&self) -> impl Iterator + '_ { + self.iface + .ipv4 + .iter() + .cloned() + .map(IpNet::V4) + .chain(self.iface.ipv6.iter().cloned().map(IpNet::V6)) + } + + /// Creates a fake interface for usage in tests. + /// + /// This allows tests to be independent of the host interfaces. + pub(crate) fn fake() -> Self { + use std::net::Ipv4Addr; + + use netdev::{interface::InterfaceType, mac::MacAddr, NetworkDevice}; + + Self { + iface: netdev::Interface { + index: 2, + name: String::from("wifi0"), + friendly_name: None, + description: None, + if_type: InterfaceType::Ethernet, + mac_addr: Some(MacAddr::new(2, 3, 4, 5, 6, 7)), + ipv4: vec![Ipv4Net::new(Ipv4Addr::new(192, 168, 0, 189), 24).unwrap()], + ipv6: vec![], + flags: 69699, + transmit_speed: None, + receive_speed: None, + gateway: Some(NetworkDevice { + mac_addr: MacAddr::new(2, 3, 4, 5, 6, 8), + ipv4: vec![Ipv4Addr::from([192, 168, 0, 1])], + ipv6: vec![], + }), + dns_servers: vec![], + default: false, + }, + } + } +} + +/// Structure of an IP network, either IPv4 or IPv6. +#[derive(Clone, Debug)] +pub enum IpNet { + /// Structure of IPv4 Network. + V4(Ipv4Net), + /// Structure of IPv6 Network. + V6(Ipv6Net), +} + +impl PartialEq for IpNet { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (IpNet::V4(a), IpNet::V4(b)) => { + a.addr() == b.addr() + && a.prefix_len() == b.prefix_len() + && a.netmask() == b.netmask() + } + (IpNet::V6(a), IpNet::V6(b)) => { + a.addr() == b.addr() + && a.prefix_len() == b.prefix_len() + && a.netmask() == b.netmask() + } + _ => false, + } + } +} +impl Eq for IpNet {} + +impl IpNet { + /// The IP address of this structure. + pub fn addr(&self) -> IpAddr { + match self { + IpNet::V4(a) => IpAddr::V4(a.addr()), + IpNet::V6(a) => IpAddr::V6(a.addr()), + } + } +} + +/// Intended to store the state of the machine's network interfaces, routing table, and +/// other network configuration. For now it's pretty basic. +#[derive(Debug, PartialEq, Eq)] +pub struct State { + /// Maps from an interface name interface. + pub interfaces: HashMap, + + /// Whether this machine has an IPv6 Global or Unique Local Address + /// which might provide connectivity. + pub have_v6: bool, + + /// Whether the machine has some non-localhost, non-link-local IPv4 address. + pub have_v4: bool, + + //// Whether the current network interface is considered "expensive", which currently means LTE/etc + /// instead of Wifi. This field is not populated by `get_state`. + pub(crate) is_expensive: bool, + + /// The interface name for the machine's default route. + /// + /// It is not yet populated on all OSes. + /// + /// When set, its value is the map key into `interface` and `interface_ips`. + pub(crate) default_route_interface: Option, + + /// The HTTP proxy to use, if any. + pub(crate) http_proxy: Option, + + /// The URL to the Proxy Autoconfig URL, if applicable. + pub(crate) pac: Option, +} + +impl fmt::Display for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut ifaces: Vec<_> = self.interfaces.values().collect(); + ifaces.sort_by_key(|iface| iface.iface.index); + for iface in ifaces { + write!(f, "{iface}")?; + if let Some(ref default_if) = self.default_route_interface { + if iface.name() == default_if { + write!(f, " (default)")?; + } + } + if f.alternate() { + writeln!(f)?; + } else { + write!(f, "; ")?; + } + } + Ok(()) + } +} + +impl State { + /// Returns the state of all the current machine's network interfaces. + /// + /// It does not set the returned `State.is_expensive`. The caller can populate that. + pub async fn new() -> Self { + let mut interfaces = HashMap::new(); + let mut have_v6 = false; + let mut have_v4 = false; + + let ifaces = netdev::interface::get_interfaces(); + for iface in ifaces { + let ni = Interface { iface }; + let if_up = ni.is_up(); + let name = ni.iface.name.clone(); + let pfxs: Vec<_> = ni.addrs().collect(); + + if if_up { + for pfx in &pfxs { + if pfx.addr().is_loopback() { + continue; + } + have_v6 |= is_usable_v6(&pfx.addr()); + have_v4 |= is_usable_v4(&pfx.addr()); + } + } + + interfaces.insert(name, ni); + } + + let default_route_interface = default_route_interface().await; + + State { + interfaces, + have_v4, + have_v6, + is_expensive: false, + default_route_interface, + http_proxy: None, + pac: None, + } + } + + /// Creates a fake interface state for usage in tests. + /// + /// This allows tests to be independent of the host interfaces. + pub fn fake() -> Self { + let fake = Interface::fake(); + let ifname = fake.iface.name.clone(); + Self { + interfaces: [(ifname.clone(), fake)].into_iter().collect(), + have_v6: true, + have_v4: true, + is_expensive: false, + default_route_interface: Some(ifname), + http_proxy: None, + pac: None, + } + } +} + +/// Reports whether ip is a usable IPv4 address which should have Internet connectivity. +/// +/// Globally routable and private IPv4 addresses are always Usable, and link local +/// 169.254.x.x addresses are in some environments. +fn is_usable_v4(ip: &IpAddr) -> bool { + if !ip.is_ipv4() || ip.is_loopback() { + return false; + } + + true +} + +/// Reports whether ip is a usable IPv6 address which should have Internet connectivity. +/// +/// Globally routable IPv6 addresses are always Usable, and Unique Local Addresses +/// (fc00::/7) are in some environments used with address translation. +/// +/// We consider all 2000::/3 addresses to be routable, which is the interpretation of +/// +/// as well. However this probably includes some addresses which should not be routed, +/// e.g. documentation addresses. See also +/// for an +/// alternative implementation which is both stricter and laxer in some regards. +fn is_usable_v6(ip: &IpAddr) -> bool { + match ip { + IpAddr::V6(ip) => { + // V6 Global1 2000::/3 + let mask: u16 = 0b1110_0000_0000_0000; + let base: u16 = 0x2000; + let segment1 = ip.segments()[0]; + if (base & mask) == (segment1 & mask) { + return true; + } + + is_private_v6(ip) + } + IpAddr::V4(_) => false, + } +} + +/// The details about a default route. +#[derive(Debug, Clone)] +pub struct DefaultRouteDetails { + /// The interface name. + /// It's like "eth0" (Linux), "Ethernet 2" (Windows), "en0" (macOS). + pub interface_name: String, +} + +impl DefaultRouteDetails { + /// Reads the default route from the current system and returns the details. + pub async fn new() -> Option { + default_route().await + } +} + +/// Like `DefaultRoutDetails::new` but only returns the interface name. +pub async fn default_route_interface() -> Option { + DefaultRouteDetails::new().await.map(|v| v.interface_name) +} + +/// Likely IPs of the residentla router, and the ip address of the current +/// machine using it. +#[derive(Debug, Clone)] +pub struct HomeRouter { + /// Ip of the router. + pub gateway: IpAddr, + /// Our local Ip if known. + pub my_ip: Option, +} + +impl HomeRouter { + /// Returns the likely IP of the residential router, which will always + /// be a private address, if found. + /// In addition, it returns the IP address of the current machine on + /// the LAN using that gateway. + /// This is used as the destination for UPnP, NAT-PMP, PCP, etc queries. + pub fn new() -> Option { + let gateway = Self::get_default_gateway()?; + let my_ip = netdev::interface::get_local_ipaddr(); + + Some(HomeRouter { gateway, my_ip }) + } + + #[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" + ))] + fn get_default_gateway() -> Option { + // netdev doesn't work yet + // See: https://github.com/shellrow/default-net/issues/34 + bsd::likely_home_router() + } + + #[cfg(any(target_os = "linux", target_os = "android", target_os = "windows"))] + fn get_default_gateway() -> Option { + let gateway = netdev::get_default_gateway().ok()?; + gateway + .ipv4 + .iter() + .cloned() + .map(IpAddr::V4) + .chain(gateway.ipv6.iter().cloned().map(IpAddr::V6)) + .next() + } +} + +#[cfg(test)] +mod tests { + use std::net::Ipv6Addr; + + use super::*; + + #[tokio::test] + async fn test_default_route() { + let default_route = DefaultRouteDetails::new() + .await + .expect("missing default route"); + println!("default_route: {:#?}", default_route); + } + + #[tokio::test] + async fn test_likely_home_router() { + let home_router = HomeRouter::new().expect("missing home router"); + println!("home router: {:#?}", home_router); + } + + #[test] + fn test_is_usable_v6() { + let loopback = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1); + assert!(!is_usable_v6(&loopback.into())); + + let link_local = Ipv6Addr::new(0xfe80, 0, 0, 0, 0xcbc9, 0x6aff, 0x5b07, 0x4a9e); + assert!(!is_usable_v6(&link_local.into())); + + let relay_use1 = Ipv6Addr::new(0x2a01, 0x4ff, 0xf0, 0xc4a1, 0, 0, 0, 0x1); + assert!(is_usable_v6(&relay_use1.into())); + + let random_2603 = Ipv6Addr::new(0x2603, 0x3ff, 0xf1, 0xc3aa, 0x1, 0x2, 0x3, 0x1); + assert!(is_usable_v6(&random_2603.into())); + } +} diff --git a/patches/netwatch/src/interfaces/bsd.rs b/patches/netwatch/src/interfaces/bsd.rs new file mode 100644 index 0000000000..8efec2a99c --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd.rs @@ -0,0 +1,1120 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Based on + +#![allow(unused)] + +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, Ipv6Addr}, + sync::LazyLock, +}; + +use libc::{c_int, uintptr_t, AF_INET, AF_INET6, AF_LINK, AF_ROUTE, AF_UNSPEC, CTL_NET}; +#[cfg(any(target_os = "macos", target_os = "ios"))] +use libc::{ + NET_RT_DUMP, RTAX_BRD, RTAX_DST, RTAX_GATEWAY, RTAX_MAX, RTAX_NETMASK, RTA_IFP, RTF_GATEWAY, +}; +use nested_enum_utils::common_fields; +use snafu::{Backtrace, IntoError, OptionExt, Snafu}; +use tracing::warn; + +use super::DefaultRouteDetails; + +#[cfg(target_os = "freebsd")] +mod freebsd; +#[cfg(target_os = "freebsd")] +pub(crate) use self::freebsd::*; +#[cfg(target_os = "netbsd")] +mod netbsd; +#[cfg(target_os = "netbsd")] +pub(crate) use self::netbsd::*; +#[cfg(target_os = "openbsd")] +mod openbsd; +#[cfg(target_os = "openbsd")] +pub(crate) use self::openbsd::*; + +#[cfg(any(target_os = "macos", target_os = "ios"))] +mod macos; +#[cfg(any(target_os = "macos", target_os = "ios"))] +use self::macos::*; + +pub async fn default_route() -> Option { + let idx = default_route_interface_index()?; + let interfaces = netdev::get_interfaces(); + let iface = interfaces.into_iter().find(|i| i.index == idx)?; + + Some(DefaultRouteDetails { + interface_name: iface.name, + }) +} + +pub fn likely_home_router() -> Option { + let rib = fetch_routing_table()?; + let msgs = parse_routing_table(&rib)?; + for rm in msgs { + if !is_default_gateway(&rm) { + continue; + } + + if let Some(gw) = rm.addrs.get(RTAX_GATEWAY as usize) { + if let Addr::Inet4 { ip } = gw { + return Some(IpAddr::V4(*ip)); + } + + if let Addr::Inet6 { ip, .. } = gw { + return Some(IpAddr::V6(*ip)); + } + } + } + None +} + +/// Returns the index of the network interface that +/// owns the default route. It returns the first IPv4 or IPv6 default route it +/// finds (it does not prefer one or the other). +fn default_route_interface_index() -> Option { + // $ netstat -nr + // Routing tables + // Internet: + // Destination Gateway Flags Netif Expire + // default 10.0.0.1 UGSc en0 <-- want this one + // default 10.0.0.1 UGScI en1 + + // From man netstat: + // U RTF_UP Route usable + // G RTF_GATEWAY Destination requires forwarding by intermediary + // S RTF_STATIC Manually added + // c RTF_PRCLONING Protocol-specified generate new routes on use + // I RTF_IFSCOPE Route is associated with an interface scope + + let rib = fetch_routing_table()?; + let msgs = parse_routing_table(&rib)?; + for rm in msgs { + if is_default_gateway(&rm) { + return Some(rm.index as u32); + } + } + None +} + +const V4_DEFAULT: [u8; 4] = [0u8; 4]; +const V6_DEFAULT: [u8; 16] = [0u8; 16]; + +fn is_default_gateway(rm: &RouteMessage) -> bool { + if rm.flags & RTF_GATEWAY as u32 == 0 { + return false; + } + + #[cfg(any(target_os = "macos", target_os = "ios"))] + if rm.flags & libc::RTF_IFSCOPE as u32 != 0 { + return false; + } + + // Addrs is [RTAX_DST, RTAX_GATEWAY, RTAX_NETMASK, ...] + if rm.addrs.len() <= RTAX_NETMASK as usize { + return false; + } + + let Some(dst) = rm.addrs.get(RTAX_DST as usize) else { + return false; + }; + let Some(netmask) = rm.addrs.get(RTAX_NETMASK as usize) else { + return false; + }; + + match (dst, netmask) { + (Addr::Inet4 { ip: dst }, Addr::Inet4 { ip: netmask }) => { + if dst.octets() == V4_DEFAULT && netmask.octets() == V4_DEFAULT { + return true; + } + } + (Addr::Inet6 { ip: dst, .. }, Addr::Inet6 { ip: netmask, .. }) => { + if dst.octets() == V6_DEFAULT && netmask.octets() == V6_DEFAULT { + return true; + } + } + _ => {} + } + false +} + +#[cfg(any(target_os = "freebsd", target_os = "openbsd", target_os = "netbsd"))] +fn fetch_routing_table() -> Option> { + match fetch_rib(AF_UNSPEC, libc::NET_RT_DUMP, 0) { + Ok(res) => Some(res), + Err(err) => { + warn!("fetch_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "freebsd", target_os = "openbsd", target_os = "netbsd"))] +fn parse_routing_table(rib: &[u8]) -> Option> { + match parse_rib(libc::NET_RT_IFLIST, rib) { + Ok(res) => { + let res = res + .into_iter() + .filter_map(|m| match m { + WireMessage::Route(r) => Some(r), + _ => None, + }) + .collect(); + Some(res) + } + Err(err) => { + warn!("parse_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios",))] +fn fetch_routing_table() -> Option> { + const NET_RT_DUMP2: i32 = 7; + match fetch_rib(libc::AF_UNSPEC, NET_RT_DUMP2, 0) { + Ok(res) => Some(res), + Err(err) => { + warn!("fetch_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios",))] +fn parse_routing_table(rib: &[u8]) -> Option> { + match parse_rib(libc::NET_RT_IFLIST2, rib) { + Ok(res) => { + let res = res + .into_iter() + .filter_map(|m| match m { + WireMessage::Route(r) => Some(r), + _ => None, + }) + .collect(); + Some(res) + } + Err(err) => { + warn!("parse_rib failed: {:?}", err); + None + } + } +} + +#[cfg(any(target_os = "macos", target_os = "ios"))] +const fn is_valid_rib_type(typ: RIBType) -> bool { + const NET_RT_STAT: RIBType = 4; + const NET_RT_TRASH: RIBType = 5; + if typ == NET_RT_STAT || typ == NET_RT_TRASH { + return false; + } + true +} + +#[cfg(any(target_os = "freebsd", target_os = "netbsd"))] +const fn is_valid_rib_type(typ: RIBType) -> bool { + true +} + +#[cfg(target_os = "openbsd")] +const fn is_valid_rib_type(typ: RIBType) -> bool { + if typ == NET_RT_STATS || typ == NET_RT_TABLE { + return false; + } + true +} + +#[derive(Debug, Copy, Clone)] +struct WireFormat { + /// offset of header extension + ext_off: usize, + /// offset of message body + body_off: usize, + typ: MessageType, +} + +#[derive(Debug)] +pub enum WireMessage { + Route(RouteMessage), + Interface(InterfaceMessage), + InterfaceAddr(InterfaceAddrMessage), + InterfaceMulticastAddr(InterfaceMulticastAddrMessage), + InterfaceAnnounce(InterfaceAnnounceMessage), +} + +/// Safely convert a some bytes from a slice into a u16. +fn u16_from_ne_range( + data: &[u8], + range: impl std::slice::SliceIndex<[u8], Output = [u8]>, +) -> Result { + data.get(range) + .and_then(|s| TryInto::<[u8; 2]>::try_into(s).ok()) + .map(u16::from_ne_bytes) + .context(MessageTooShortSnafu) +} + +/// Safely convert some bytes from a slice into a u32. +fn u32_from_ne_range( + data: &[u8], + range: impl std::slice::SliceIndex<[u8], Output = [u8]>, +) -> Result { + data.get(range) + .and_then(|s| TryInto::<[u8; 4]>::try_into(s).ok()) + .map(u32::from_ne_bytes) + .context(MessageTooShortSnafu) +} + +impl WireFormat { + fn parse(&self, _typ: RIBType, data: &[u8]) -> Result, RouteError> { + match self.typ { + #[cfg(any( + target_os = "freebsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" + ))] + MessageType::Route => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + let attrs: i32 = u32_from_ne_range(data, 12..16)? + .try_into() + .map_err(|_| InvalidMessageSnafu.build())?; + let addrs = parse_addrs(attrs, parse_kernel_inet_addr, &data[self.body_off..])?; + let mut m = RouteMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)?, + index: u16_from_ne_range(data, 4..6)?, + id: u32_from_ne_range(data, 16..20)? as _, + seq: u32_from_ne_range(data, 20..24)?, + ext_off: self.ext_off, + error: None, + addrs, + }; + let errno = u32_from_ne_range(data, 28..32)?; + if errno != 0 { + m.error = Some(std::io::Error::from_raw_os_error(errno as _)); + } + + Ok(Some(WireMessage::Route(m))) + } + #[cfg(target_os = "openbsd")] + MessageType::Route => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + let ll = u16_from_ne_range(data, 4..6)? as usize; + snafu::ensure!(data.len() >= ll as usize, InvalidMessageSnafu); + + let addrs = parse_addrs( + u32_from_ne_range(data, 12..16)? as _, + parse_kernel_inet_addr, + &data[ll..], + )?; + + let mut m = RouteMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 16..20)?, + index: u16_from_ne_range(data, 6..8)?, + id: u32_from_ne_range(data, 24..28)? as _, + seq: u32_from_ne_range(data, 28..32)?, + ext_off: self.ext_off, + error: None, + addrs, + }; + let errno = u32_from_ne_range(data, 32..36)?; + if errno != 0 { + m.error = Some(std::io::Error::from_raw_os_error(errno as _)); + } + + Ok(Some(WireMessage::Route(m))) + } + MessageType::Interface => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, 0..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + let attrs = u32_from_ne_range(data, 4..8)?; + if attrs as c_int & RTA_IFP == 0 { + return Ok(None); + } + let addr = parse_link_addr(&data[self.body_off..])?; + let name = addr.name().map(|s| s.to_string()); + let m = InterfaceMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)? as _, + index: u16_from_ne_range(data, 12..14)? as _, + ext_off: self.ext_off, + addr_rtax_ifp: addr, + name, + }; + + Ok(Some(WireMessage::Interface(m))) + } + MessageType::InterfaceAddr => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + #[cfg(target_os = "netbsd")] + let index = u16_from_ne_range(data, 16..18)?; + #[cfg(not(target_os = "netbsd"))] + let index = u16_from_ne_range(data, 12..14)?; + + let addrs = parse_addrs( + u32_from_ne_range(data, 4..8)? as _, + parse_kernel_inet_addr, + &data[self.body_off..], + )?; + + let m = InterfaceAddrMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)? as _, + index: index as _, + addrs, + }; + Ok(Some(WireMessage::InterfaceAddr(m))) + } + MessageType::InterfaceMulticastAddr => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + let addrs = parse_addrs( + u32_from_ne_range(data, 4..8)? as _, + parse_kernel_inet_addr, + &data[self.body_off..], + )?; + let m = InterfaceMulticastAddrMessage { + version: data[2] as _, + r#type: data[3] as _, + flags: u32_from_ne_range(data, 8..12)? as _, + index: u16_from_ne_range(data, 12..14)? as _, + addrs, + }; + Ok(Some(WireMessage::InterfaceMulticastAddr(m))) + } + MessageType::InterfaceAnnounce => { + snafu::ensure!(data.len() >= self.body_off, MessageTooShortSnafu); + let l = u16_from_ne_range(data, ..2)?; + snafu::ensure!(data.len() >= l as usize, InvalidMessageSnafu); + + let mut name = String::new(); + for i in 0..16 { + if data[6 + i] != 0 { + continue; + } + name = std::str::from_utf8(&data[6..6 + i]) + .map_err(|_| InvalidAddressSnafu.build())? + .to_string(); + break; + } + + let m = InterfaceAnnounceMessage { + version: data[2] as _, + r#type: data[3] as _, + index: u16_from_ne_range(data, 4..6)? as _, + what: u16_from_ne_range(data, 22..24)? as _, + name, + }; + + Ok(Some(WireMessage::InterfaceAnnounce(m))) + } + } + } +} + +#[derive(Debug, Copy, Clone)] +enum MessageType { + Route, + Interface, + InterfaceAddr, + InterfaceMulticastAddr, + InterfaceAnnounce, +} + +static ROUTING_STACK: LazyLock = LazyLock::new(probe_routing_stack); + +struct RoutingStack { + rtm_version: i32, + kernel_align: usize, + wire_formats: HashMap, +} + +/// Parses b as a routing information base and returns a list of routing messages. +pub fn parse_rib(typ: RIBType, data: &[u8]) -> Result, RouteError> { + snafu::ensure!( + is_valid_rib_type(typ), + InvalidRibTypeSnafu { rib_type: typ } + ); + + let mut msgs = Vec::new(); + let mut nmsgs = 0; + let mut nskips = 0; + let mut b = data; + + while b.len() > 4 { + nmsgs += 1; + let l = u16_from_ne_range(b, ..2)?; + snafu::ensure!(l != 0, InvalidMessageSnafu); + snafu::ensure!(b.len() >= l as usize, MessageTooShortSnafu); + if b[2] as i32 != ROUTING_STACK.rtm_version { + // b = b[l:]; + continue; + } + match ROUTING_STACK.wire_formats.get(&(b[3] as i32)) { + Some(w) => { + let m = w.parse(typ, &b[..l as usize])?; + match m { + Some(m) => { + msgs.push(m); + } + None => { + nskips += 1; + } + } + } + None => { + nskips += 1; + } + } + b = &b[l as usize..]; + } + + // We failed to parse any of the messages - version mismatch? + snafu::ensure!(nmsgs == msgs.len() + nskips, MessageMismatchSnafu); + + Ok(msgs) +} + +/// A RouteMessage represents a message conveying an address prefix, a +/// nexthop address and an output interface. +/// +/// Unlike other messages, this message can be used to query adjacency +/// information for the given address prefix, to add a new route, and +/// to delete or modify the existing route from the routing information +/// base inside the kernel by writing and reading route messages on a +/// routing socket. +/// +/// For the manipulation of routing information, the route message must +/// contain appropriate fields that include: +/// +/// Version = +/// Type = +/// Flags = +/// Index = +/// ID = +/// Seq = +/// Addrs = +#[derive(Debug)] +pub struct RouteMessage { + /// message version + pub version: isize, + /// message type + pub r#type: isize, + /// route flags + pub flags: u32, + /// interface index when attached + pub index: u16, + /// sender's identifier; usually process ID + pub id: uintptr_t, + /// sequence number + pub seq: u32, + // error on requested operation + pub error: Option, + // addresses + pub addrs: Vec, + // offset of header extension + ext_off: usize, + // raw: []byte // raw message +} + +/// An interface message. +#[derive(Debug)] +pub struct InterfaceMessage { + /// Message version + pub version: isize, + /// Message type + pub r#type: isize, + // Interface flags + pub flags: isize, + // interface index + pub index: isize, + /// Interface name + pub name: Option, + /// Addresses + pub addr_rtax_ifp: Addr, + /// Offset of header extension + pub ext_off: usize, +} + +/// An interface address message. +#[derive(Debug)] +pub struct InterfaceAddrMessage { + /// Message version + pub version: isize, + /// Message type + pub r#type: isize, + /// Interface flags + pub flags: isize, + /// Interface index + pub index: isize, + /// Addresses + pub addrs: Vec, +} + +/// Interface multicast address message. +#[derive(Debug)] +pub struct InterfaceMulticastAddrMessage { + /// message version + pub version: isize, + /// message type + pub r#type: isize, + /// interface flags + pub flags: isize, + /// interface index + pub index: isize, + /// addresses + pub addrs: Vec, +} + +/// Interface announce message. +#[derive(Debug)] +pub struct InterfaceAnnounceMessage { + /// message version + pub version: isize, + /// message type + pub r#type: isize, + /// interface index + pub index: isize, + /// interface name + pub name: String, + /// what type of announcement + pub what: isize, +} + +/// Represents a type of routing information base. +type RIBType = i32; + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum RouteError { + #[snafu(display("message mismatch"))] + MessageMismatch {}, + #[snafu(display("message too short"))] + MessageTooShort {}, + #[snafu(display("invalid message"))] + InvalidMessage {}, + #[snafu(display("invalid address"))] + InvalidAddress {}, + #[snafu(display("invalid rib type {rib_type}"))] + InvalidRibType { rib_type: RIBType }, + #[snafu(display("io error calling '{name}'"))] + Io { + source: std::io::Error, + name: &'static str, + }, +} + +/// FetchRIB fetches a routing information base from the operating system. +/// +/// The provided af must be an address family. +/// +/// The provided arg must be a RIBType-specific argument. +/// When RIBType is related to routes, arg might be a set of route +/// flags. When RIBType is related to network interfaces, arg might be +/// an interface index or a set of interface flags. In most cases, zero +/// means a wildcard. +fn fetch_rib(af: i32, typ: RIBType, arg: i32) -> Result, RouteError> { + let mut round = 0; + loop { + round += 1; + + let mut mib: [i32; 6] = [CTL_NET, AF_ROUTE, 0, af, typ, arg]; + let mut n: libc::size_t = 0; + let err = unsafe { + libc::sysctl( + mib.as_mut_ptr() as *mut _, + 6, + std::ptr::null_mut(), + &mut n, + std::ptr::null_mut(), + 0, + ) + }; + if err != 0 { + return Err(IoSnafu { name: "sysctl" }.into_error(std::io::Error::last_os_error())); + } + if n == 0 { + // nothing available + return Ok(Vec::new()); + } + let mut b = vec![0u8; n]; + let err = unsafe { + libc::sysctl( + mib.as_mut_ptr() as _, + 6, + b.as_mut_ptr() as _, + &mut n, + std::ptr::null_mut(), + 0, + ) + }; + if err != 0 { + // If the sysctl failed because the data got larger + // between the two sysctl calls, try a few times + // before failing. (golang.org/issue/45736). + let io_err = std::io::Error::last_os_error(); + const MAX_TRIES: usize = 3; + if io_err.raw_os_error().unwrap_or_default() == libc::ENOMEM && round < MAX_TRIES { + continue; + } + return Err(IoSnafu { name: "sysctl" }.into_error(io_err)); + } + // Truncate b, to the new length + b.truncate(n); + + return Ok(b); + } +} + +/// Represents an address associated with packet routing. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Addr { + /// Represents a link-layer address. + Link { + /// interface index when attached + index: i32, + /// interface name when attached + name: Option, + /// link-layer address when attached + addr: Option>, + }, + /// Represents an internet address for IPv4. + Inet4 { ip: Ipv4Addr }, + /// Represents an internet address for IPv6. + Inet6 { ip: Ipv6Addr, zone: u32 }, + /// Represents an address of various operating system-specific features. + Default { + af: i32, + /// raw format of address + raw: Box<[u8]>, + }, +} + +impl Addr { + pub fn family(&self) -> i32 { + match self { + Addr::Link { .. } => AF_LINK, + Addr::Inet4 { .. } => AF_INET, + Addr::Inet6 { .. } => AF_INET6, + Addr::Default { af, .. } => *af, + } + } + + pub fn name(&self) -> Option<&str> { + match self { + Addr::Link { name, .. } => name.as_ref().map(|s| s.as_str()), + _ => None, + } + } + + pub fn ip(&self) -> Option { + match self { + Addr::Inet4 { ip } => Some(IpAddr::V4(*ip)), + Addr::Inet6 { ip, .. } => { + // TODO: how to add the zone? + Some(IpAddr::V6(*ip)) + } + _ => None, + } + } +} + +fn roundup(l: usize) -> usize { + if l == 0 { + return ROUTING_STACK.kernel_align; + } + let mut x = l + ROUTING_STACK.kernel_align - 1; + x &= !(ROUTING_STACK.kernel_align - 1); + x +} + +fn parse_addrs(attrs: i32, default_fn: F, data: &[u8]) -> Result, RouteError> +where + F: Fn(i32, &[u8]) -> Result<(i32, Addr), RouteError>, +{ + let mut addrs = Vec::with_capacity(RTAX_MAX as usize); + let af = AF_UNSPEC; + + let mut b = data; + for i in 0..RTAX_MAX as usize { + if b.len() < roundup(0) { + break; + } + + if attrs & (1 << i) == 0 { + continue; + } + if i <= RTAX_BRD as usize { + match b[1] as i32 { + AF_LINK => { + let a = parse_link_addr(b)?; + addrs.push(a); + let l = roundup(b[0] as usize); + snafu::ensure!(b.len() >= l, MessageTooShortSnafu); + b = &b[l..]; + } + AF_INET | AF_INET6 => { + let af = b[1] as i32; + let a = parse_inet_addr(af, b)?; + addrs.push(a); + let l = roundup(b[0] as usize); + snafu::ensure!(b.len() >= l, MessageTooShortSnafu); + b = &b[l..]; + } + _ => { + let (l, a) = default_fn(af, b)?; + addrs.push(a); + let ll = roundup(l as usize); + if b.len() < ll { + b = &b[l as usize..]; + } else { + b = &b[ll..]; + } + } + } + } else { + let a = parse_default_addr(b)?; + addrs.push(a); + let l = roundup(b[0] as usize); + snafu::ensure!(b.len() >= l, MessageTooShortSnafu); + b = &b[l..]; + } + } + // The only remaining bytes in b should be alignment. + // However, under some circumstances DragonFly BSD appears to put + // more addresses in the message than are indicated in the address + // bitmask, so don't check for this. + Ok(addrs) +} + +/// Parses `b` as an internet address for IPv4 or IPv6. +fn parse_inet_addr(af: i32, b: &[u8]) -> Result { + match af { + AF_INET => { + snafu::ensure!(b.len() >= SIZEOF_SOCKADDR_INET, InvalidAddressSnafu); + + let ip = Ipv4Addr::new(b[4], b[5], b[6], b[7]); + Ok(Addr::Inet4 { ip }) + } + AF_INET6 => { + snafu::ensure!(b.len() >= SIZEOF_SOCKADDR_INET6, InvalidAddressSnafu); + + let mut zone = u32_from_ne_range(b, 24..28)?; + let mut oc: [u8; 16] = b + .get(8..24) + .and_then(|s| TryInto::<[u8; 16]>::try_into(s).ok()) + .context(InvalidMessageSnafu)?; + if oc[0] == 0xfe && oc[1] & 0xc0 == 0x80 + || oc[0] == 0xff && (oc[1] & 0x0f == 0x01 || oc[1] & 0x0f == 0x02) + { + // KAME based IPv6 protocol stack usually + // embeds the interface index in the + // interface-local or link-local address as + // the kernel-internal form. + // NOTE: This is the only place in which uses big-endian. Is that right? + let id = oc + .get(2..4) + .and_then(|s| TryInto::<[u8; 2]>::try_into(s).ok()) + .map(u16::from_be_bytes) + .context(InvalidMessageSnafu)? as u32; + if id != 0 { + zone = id; + oc[2] = 0; + oc[3] = 0; + } + } + Ok(Addr::Inet6 { + ip: Ipv6Addr::from(oc), + zone, + }) + } + _ => Err(InvalidAddressSnafu.build()), + } +} + +/// Parses b as an internet address in conventional BSD kernel form. +fn parse_kernel_inet_addr(af: i32, b: &[u8]) -> Result<(i32, Addr), RouteError> { + // The encoding looks similar to the NLRI encoding. + // +----------------------------+ + // | Length (1 octet) | + // +----------------------------+ + // | Address prefix (variable) | + // +----------------------------+ + // + // The differences between the kernel form and the NLRI + // encoding are: + // + // - The length field of the kernel form indicates the prefix + // length in bytes, not in bits + // + // - In the kernel form, zero value of the length field + // doesn't mean 0.0.0.0/0 or ::/0 + // + // - The kernel form appends leading bytes to the prefix field + // to make the tuple to be conformed with + // the routing message boundary + let mut l = b[0] as usize; + + #[cfg(any(target_os = "macos", target_os = "ios"))] + { + // On Darwin, an address in the kernel form is also used as a message filler. + if l == 0 || b.len() > roundup(l) { + l = roundup(l) + } + } + #[cfg(not(any(target_os = "macos", target_os = "ios")))] + { + l = roundup(l); + } + + snafu::ensure!(b.len() >= l, InvalidAddressSnafu); + // Don't reorder case expressions. + // The case expressions for IPv6 must come first. + const OFF4: usize = 4; // offset of in_addr + const OFF6: usize = 8; // offset of in6_addr + + let addr = if b[0] as usize == SIZEOF_SOCKADDR_INET6 { + let octets: [u8; 16] = b + .get(OFF6..OFF6 + 16) + .and_then(|s| TryInto::try_into(s).ok()) + .context(InvalidMessageSnafu)?; + let ip = Ipv6Addr::from(octets); + Addr::Inet6 { ip, zone: 0 } + } else if af == AF_INET6 { + let mut octets = [0u8; 16]; + if l - 1 < OFF6 { + octets[..l - 1].copy_from_slice(&b[1..l]); + } else { + octets.copy_from_slice(&b[l - OFF6..l]); + } + let ip = Ipv6Addr::from(octets); + Addr::Inet6 { ip, zone: 0 } + } else if b[0] as usize == SIZEOF_SOCKADDR_INET { + let octets: [u8; 4] = b + .get(OFF4..OFF4 + 4) + .and_then(|s| TryInto::try_into(s).ok()) + .context(InvalidMessageSnafu)?; + let ip = Ipv4Addr::from(octets); + Addr::Inet4 { ip } + } else { + // an old fashion, AF_UNSPEC or unknown means AF_INET + let mut octets = [0u8; 4]; + if l - 1 < OFF4 { + octets[..l - 1].copy_from_slice(&b[1..l]); + } else { + octets.copy_from_slice(&b[l - OFF4..l]); + } + let ip = Ipv4Addr::from(octets); + Addr::Inet4 { ip } + }; + + Ok((b[0] as _, addr)) +} + +fn parse_link_addr(b: &[u8]) -> Result { + snafu::ensure!(b.len() >= 8, InvalidAddressSnafu); + let (_, mut a) = parse_kernel_link_addr(AF_LINK, &b[4..])?; + + if let Addr::Link { index, .. } = &mut a { + *index = u16_from_ne_range(b, 2..4)? as _; + } + + Ok(a) +} + +// Parses b as a link-layer address in conventional BSD kernel form. +fn parse_kernel_link_addr(_: i32, b: &[u8]) -> Result<(usize, Addr), RouteError> { + // The encoding looks like the following: + // +----------------------------+ + // | Type (1 octet) | + // +----------------------------+ + // | Name length (1 octet) | + // +----------------------------+ + // | Address length (1 octet) | + // +----------------------------+ + // | Selector length (1 octet) | + // +----------------------------+ + // | Data (variable) | + // +----------------------------+ + // + // On some platforms, all-bit-one of length field means "don't + // care". + let mut nlen = b[1] as usize; + let mut alen = b[2] as usize; + let mut slen = b[3] as usize; + + if nlen == 0xff { + nlen = 0; + } + if alen == 0xff { + alen = 0; + } + if slen == 0xff { + slen = 0; + } + + let l = 4 + nlen + alen + slen; + snafu::ensure!(b.len() >= l, InvalidAddressSnafu); + let mut data = &b[4..]; + + let name = if nlen > 0 { + let name = std::str::from_utf8(&data[..nlen]) + .map_err(|_| InvalidAddressSnafu.build())? + .to_string(); + data = &data[nlen..]; + Some(name) + } else { + None + }; + + let addr = if alen > 0 { + Some(data[..alen].to_vec().into_boxed_slice()) + } else { + None + }; + + let a = Addr::Link { + index: 0, + name, + addr, + }; + + Ok((l, a)) +} + +fn parse_default_addr(b: &[u8]) -> Result { + snafu::ensure!( + b.len() >= 2 && b.len() >= b[0] as usize, + InvalidAddressSnafu + ); + Ok(Addr::Default { + af: b[1] as _, + raw: b[..b[0] as usize].to_vec().into_boxed_slice(), + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_fetch_parse_routing_table() { + let rib_raw = fetch_routing_table().unwrap(); + assert!(!rib_raw.is_empty()); + println!("got rib: {}", rib_raw.len()); + let rib_parsed = parse_routing_table(&rib_raw).unwrap(); + println!("got {} entries", rib_parsed.len()); + assert!(!rib_parsed.is_empty()); + } + + struct ParseAddrsTest { + attrs: i32, + #[allow(clippy::type_complexity)] + parse_fn: Box Result<(i32, Addr), RouteError>>, + b: Vec, + addrs: Vec, + } + + #[test] + #[cfg(target_endian = "little")] + fn test_parse_addrs() { + #[cfg(any(target_os = "macos", target_os = "ios"))] + use libc::{RTA_BRD, RTA_DST, RTA_GATEWAY, RTA_IFA, RTA_IFP, RTA_NETMASK}; + + let parse_addrs_little_endian_tests = [ + ParseAddrsTest { + attrs: RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_BRD, + parse_fn: Box::new(parse_kernel_inet_addr), + b: vec![ + 0x38, 0x12, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x38, 0x12, 0x2, 0x0, 0x6, 0x3, + 0x6, 0x0, 0x65, 0x6d, 0x31, 0x0, 0xc, 0x29, 0x66, 0x2c, 0xdc, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xb4, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x2, 0x0, 0x0, 0xac, 0x10, 0xdc, 0xff, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + ], + addrs: vec![ + Addr::Link { + index: 0, + name: None, + addr: None, + }, + Addr::Link { + index: 2, + name: Some("em1".to_string()), + addr: Some(vec![0x00, 0x0c, 0x29, 0x66, 0x2c, 0xdc].into_boxed_slice()), + }, + Addr::Inet4 { + ip: Ipv4Addr::from([172, 16, 220, 180]), + }, + /*nil, + nil, + nil, + nil,*/ + Addr::Inet4 { + ip: Ipv4Addr::from([172, 16, 220, 255]), + }, + ], + }, + ParseAddrsTest { + attrs: RTA_NETMASK | RTA_IFP | RTA_IFA, + parse_fn: Box::new(parse_kernel_inet_addr), + b: vec![ + 0x7, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0x0, 0x18, 0x12, 0xa, 0x0, 0x87, 0x8, + 0x0, 0x0, 0x76, 0x6c, 0x61, 0x6e, 0x35, 0x36, 0x38, 0x32, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x10, 0x2, 0x0, 0x0, 0xa9, 0xfe, 0x0, 0x1, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, + ], + addrs: vec![ + // nil, + // nil, + Addr::Inet4 { + ip: Ipv4Addr::from([255, 255, 255, 0]), + }, + // nil, + Addr::Link { + index: 10, + name: Some("vlan5682".to_string()), + addr: None, + }, + Addr::Inet4 { + ip: Ipv4Addr::from([169, 254, 0, 1]), + }, + // nil, + // nil, + ], + }, + ]; + + for (i, tt) in parse_addrs_little_endian_tests.into_iter().enumerate() { + let addrs = parse_addrs(tt.attrs, tt.parse_fn, &tt.b) + .unwrap_or_else(|_| panic!("failed {}", i)); + + assert_eq!(addrs, tt.addrs, "{}", i); + } + } +} diff --git a/patches/netwatch/src/interfaces/bsd/freebsd.rs b/patches/netwatch/src/interfaces/bsd/freebsd.rs new file mode 100644 index 0000000000..2114978700 --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/freebsd.rs @@ -0,0 +1,328 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use libc::c_int; + +use super::{MessageType, RoutingStack, WireFormat}; + +// Missing constants from libc. +// https://github.com/rust-lang/libc/issues/3711 + +// net/route.h +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 8; +pub const RTM_VERSION: c_int = 5; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_OLDADD: c_int = 0x9; +pub const RTM_OLDDEL: c_int = 0xa; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_NEWMADDR: c_int = 0xf; +pub const RTM_DELMADDR: c_int = 0x10; +pub const RTM_IFANNOUNCE: c_int = 0x11; +pub const RTM_IEEE80211: c_int = 0x12; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_amd64.go +#[cfg(target_arch = "x86_64")] +pub use self::amd64::*; +#[cfg(target_arch = "x86_64")] +mod amd64 { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_386.go +#[cfg(target_arch = "x86")] +pub use self::i686::*; +#[cfg(target_arch = "x86")] +mod i686 { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0x68; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0x6c; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x5c; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x38; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0x60; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0x60; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0x60; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0x64; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x50; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x50; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x50; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x54; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + // MODIFIED BY HAND FOR 386 EMULATION ON AMD64 + // 386 EMULATION USES THE UNDERLYING RAW DATA LAYOUT + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_arm.go +#[cfg(target_arch = "arm")] +pub use self::arm::*; +#[cfg(target_arch = "arm")] +mod arm { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0x68; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0x6c; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x5c; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x38; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0x68; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0x6c; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x5c; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x38; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0x70; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x60; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_freebsd_arm.go +#[cfg(target_arch = "aarch64")] +pub use self::arm64::*; +#[cfg(target_arch = "aarch64")] +mod arm64 { + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11: usize = 0x98; + + pub const SIZEOF_IF_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFA_MSGHDR_FREE_BSD10_EMU: usize = 0x14; + pub const SIZEOF_IFA_MSGHDRL_FREE_BSD10_EMU: usize = 0xb0; + pub const SIZEOF_IFMA_MSGHDR_FREE_BSD10_EMU: usize = 0x10; + pub const SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10_EMU: usize = 0x18; + + pub const SIZEOF_RT_MSGHDR_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_RT_METRICS_FREE_BSD10_EMU: usize = 0x70; + + pub const SIZEOF_IF_MSGHDR_FREE_BSD7_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD8_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD9_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD10_EMU: usize = 0xa8; + pub const SIZEOF_IF_MSGHDR_FREE_BSD11_EMU: usize = 0xa8; + + pub const SIZEOF_IF_DATA_FREE_BSD7_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD8_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD9_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD10_EMU: usize = 0x98; + pub const SIZEOF_IF_DATA_FREE_BSD11_EMU: usize = 0x98; + + pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; + pub const SIZEOF_SOCKADDR_INET: usize = 0x10; + pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; +} + +/// 386 emulation on amd64 +fn detect_compat_freebsd32() -> bool { + // TODO: implement detection when someone actually needs it + false +} + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = RTM_VERSION; + + // Currently only BSD11 support is implemented. + // At the time of this writing rust supports 10 and 11, if this is a problem + // please file an issue. + + let (rtm, ifm, ifam, ifmam, ifanm) = if detect_compat_freebsd32() { + unimplemented!() + } else { + let rtm = WireFormat { + ext_off: SIZEOF_RT_MSGHDR_FREE_BSD10 - SIZEOF_RT_METRICS_FREE_BSD10, + body_off: SIZEOF_RT_MSGHDR_FREE_BSD10, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 16, + body_off: SIZEOF_IF_MSGHDR_FREE_BSD11, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: SIZEOF_IFA_MSGHDR_FREE_BSD10, + body_off: SIZEOF_IFA_MSGHDR_FREE_BSD10, + typ: MessageType::InterfaceAddr, + }; + let ifmam = WireFormat { + ext_off: SIZEOF_IFMA_MSGHDR_FREE_BSD10, + body_off: SIZEOF_IFMA_MSGHDR_FREE_BSD10, + typ: MessageType::InterfaceMulticastAddr, + }; + let ifanm = WireFormat { + ext_off: SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10, + body_off: SIZEOF_IF_ANNOUNCEMSGHDR_FREE_BSD10, + typ: MessageType::InterfaceAnnounce, + }; + (rtm, ifm, ifam, ifmam, ifanm) + }; + + let wire_formats = [ + (RTM_ADD, rtm), + (RTM_DELETE, rtm), + (RTM_CHANGE, rtm), + (RTM_GET, rtm), + (RTM_LOSING, rtm), + (RTM_REDIRECT, rtm), + (RTM_MISS, rtm), + (RTM_LOCK, rtm), + (RTM_RESOLVE, rtm), + (RTM_NEWADDR, ifam), + (RTM_DELADDR, ifam), + (RTM_IFINFO, ifm), + (RTM_NEWMADDR, ifmam), + (RTM_DELMADDR, ifmam), + (RTM_IFANNOUNCE, ifanm), + (RTM_IEEE80211, ifanm), + ] + .into_iter() + .collect(); + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 4, + } +} diff --git a/patches/netwatch/src/interfaces/bsd/macos.rs b/patches/netwatch/src/interfaces/bsd/macos.rs new file mode 100644 index 0000000000..7f5cf4a9d5 --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/macos.rs @@ -0,0 +1,88 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use super::{MessageType, RoutingStack, WireFormat}; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_darwin.go +const SIZEOF_IF_MSGHDR_DARWIN15: usize = 0x70; +const SIZEOF_IFA_MSGHDR_DARWIN15: usize = 0x14; +const SIZEOF_IFMA_MSGHDR_DARWIN15: usize = 0x10; +const SIZEOF_IF_MSGHDR2_DARWIN15: usize = 0xa0; +const SIZEOF_IFMA_MSGHDR2_DARWIN15: usize = 0x14; +const SIZEOF_IF_DATA_DARWIN15: usize = 0x60; +const SIZEOF_IF_DATA64_DARWIN15: usize = 0x80; + +const SIZEOF_RT_MSGHDR_DARWIN15: usize = 0x5c; +const SIZEOF_RT_MSGHDR2_DARWIN15: usize = 0x5c; +const SIZEOF_RT_METRICS_DARWIN15: usize = 0x38; + +const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; +pub(super) const SIZEOF_SOCKADDR_INET: usize = 0x10; +pub(super) const SIZEOF_SOCKADDR_INET6: usize = 0x1c; + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = libc::RTM_VERSION; + + let rtm = WireFormat { + ext_off: 36, + body_off: SIZEOF_RT_MSGHDR_DARWIN15, + typ: MessageType::Route, + }; + let rtm2 = WireFormat { + ext_off: 36, + body_off: SIZEOF_RT_MSGHDR2_DARWIN15, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 16, + body_off: SIZEOF_IF_MSGHDR_DARWIN15, + typ: MessageType::Interface, + }; + let ifm2 = WireFormat { + ext_off: 32, + body_off: SIZEOF_IF_MSGHDR2_DARWIN15, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: SIZEOF_IFA_MSGHDR_DARWIN15, + body_off: SIZEOF_IFA_MSGHDR_DARWIN15, + typ: MessageType::InterfaceAddr, + }; + let ifmam = WireFormat { + ext_off: SIZEOF_IFMA_MSGHDR_DARWIN15, + body_off: SIZEOF_IFMA_MSGHDR_DARWIN15, + typ: MessageType::InterfaceMulticastAddr, + }; + let ifmam2 = WireFormat { + ext_off: SIZEOF_IFMA_MSGHDR2_DARWIN15, + body_off: SIZEOF_IFMA_MSGHDR2_DARWIN15, + typ: MessageType::InterfaceMulticastAddr, + }; + + let wire_formats = [ + (libc::RTM_ADD, rtm), + (libc::RTM_DELETE, rtm), + (libc::RTM_CHANGE, rtm), + (libc::RTM_GET, rtm), + (libc::RTM_LOSING, rtm), + (libc::RTM_REDIRECT, rtm), + (libc::RTM_MISS, rtm), + (libc::RTM_LOCK, rtm), + (libc::RTM_RESOLVE, rtm), + (libc::RTM_NEWADDR, ifam), + (libc::RTM_DELADDR, ifam), + (libc::RTM_IFINFO, ifm), + (libc::RTM_NEWMADDR, ifmam), + (libc::RTM_DELMADDR, ifmam), + (libc::RTM_IFINFO2, ifm2), + (libc::RTM_NEWMADDR2, ifmam2), + (libc::RTM_GET2, rtm2), + ] + .into_iter() + .collect(); + + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 4, + } +} diff --git a/patches/netwatch/src/interfaces/bsd/netbsd.rs b/patches/netwatch/src/interfaces/bsd/netbsd.rs new file mode 100644 index 0000000000..69f6111ca8 --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/netbsd.rs @@ -0,0 +1,117 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use libc::c_int; + +use super::{MessageType, RoutingStack, WireFormat}; + +// Missing constants from libc. +// https://github.com/rust-lang/libc/issues/3711 + +// net/route.h +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 9; +pub const RTM_VERSION: c_int = 4; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_LOCK: c_int = 0x8; +pub const RTM_OLDADD: c_int = 0x9; +pub const RTM_OLDDEL: c_int = 0xa; +// pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_ONEWADDR: c_int = 0xc; +pub const RTM_ODELADDR: c_int = 0xd; +pub const RTM_OOIFINFO: c_int = 0xe; +pub const RTM_OIFINFO: c_int = 0xf; +pub const RTM_NEWMADDR: c_int = 0xf; +pub const RTM_IFANNOUNCE: c_int = 0x10; +pub const RTM_IEEE80211: c_int = 0x11; +pub const RTM_SETGATE: c_int = 0x12; + +pub const RTM_LLINFO_UPD: c_int = 0x13; + +pub const RTM_IFINFO: c_int = 0x14; +pub const RTM_OCHGADDR: c_int = 0x15; +pub const RTM_NEWADDR: c_int = 0x16; +pub const RTM_DELADDR: c_int = 0x17; +pub const RTM_CHGADDR: c_int = 0x18; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/zsys_netbsd.go + +pub(super) const SIZEOF_IF_MSGHDR_NET_BSD7: usize = 0x98; +pub(super) const SIZEOF_IFA_MSGHDR_NET_BSD7: usize = 0x18; +pub(super) const SIZEOF_IF_ANNOUNCEMSGHDR_NET_BSD7: usize = 0x18; + +pub(super) const SIZEOF_RT_MSGHDR_NET_BSD7: usize = 0x78; +pub(super) const SIZEOF_RT_METRICS_NET_BSD7: usize = 0x50; + +pub(super) const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; +pub(super) const SIZEOF_SOCKADDR_INET: usize = 0x10; +pub(super) const SIZEOF_SOCKADDR_INET6: usize = 0x1c; + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = RTM_VERSION; + + let rtm = WireFormat { + ext_off: 40, + body_off: SIZEOF_RT_MSGHDR_NET_BSD7, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 16, + body_off: SIZEOF_IF_MSGHDR_NET_BSD7, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: SIZEOF_IFA_MSGHDR_NET_BSD7, + body_off: SIZEOF_IFA_MSGHDR_NET_BSD7, + typ: MessageType::InterfaceAddr, + }; + let ifannm = WireFormat { + ext_off: SIZEOF_IF_ANNOUNCEMSGHDR_NET_BSD7, + body_off: SIZEOF_IF_ANNOUNCEMSGHDR_NET_BSD7, + typ: MessageType::InterfaceAnnounce, + }; + + let wire_formats = [ + (RTM_ADD, rtm), + (RTM_DELETE, rtm), + (RTM_CHANGE, rtm), + (RTM_GET, rtm), + (RTM_LOSING, rtm), + (RTM_REDIRECT, rtm), + (RTM_MISS, rtm), + (RTM_LOCK, rtm), + (RTM_NEWADDR, ifam), + (RTM_DELADDR, ifam), + (RTM_IFANNOUNCE, ifannm), + (RTM_IFINFO, ifm), + ] + .into_iter() + .collect(); + + // NetBSD 6 and above kernels require 64-bit aligned access to routing facilities. + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 8, + } +} diff --git a/patches/netwatch/src/interfaces/bsd/openbsd.rs b/patches/netwatch/src/interfaces/bsd/openbsd.rs new file mode 100644 index 0000000000..e806542f86 --- /dev/null +++ b/patches/netwatch/src/interfaces/bsd/openbsd.rs @@ -0,0 +1,107 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use libc::c_int; + +use super::{MessageType, RoutingStack, WireFormat}; + +// Missing constants from libc. +// https://github.com/rust-lang/libc/issues/3711 + +// net/route.h +pub const RTF_GATEWAY: c_int = 0x2; +pub const RTAX_DST: c_int = 0; +pub const RTAX_GATEWAY: c_int = 1; +pub const RTAX_NETMASK: c_int = 2; +pub const RTAX_IFP: c_int = 4; +pub const RTAX_BRD: c_int = 7; +pub const RTAX_MAX: c_int = 15; +pub const RTM_VERSION: c_int = 5; +pub const RTA_DST: c_int = 0x1; +pub const RTA_GATEWAY: c_int = 0x2; +pub const RTA_NETMASK: c_int = 0x4; +pub const RTA_GENMASK: c_int = 0x8; +pub const RTA_IFP: c_int = 0x10; +pub const RTA_IFA: c_int = 0x20; +pub const RTA_AUTHOR: c_int = 0x40; +pub const RTA_BRD: c_int = 0x80; + +// Message types +pub const RTM_ADD: c_int = 0x1; +pub const RTM_DELETE: c_int = 0x2; +pub const RTM_CHANGE: c_int = 0x3; +pub const RTM_GET: c_int = 0x4; +pub const RTM_LOSING: c_int = 0x5; +pub const RTM_REDIRECT: c_int = 0x6; +pub const RTM_MISS: c_int = 0x7; +pub const RTM_RESOLVE: c_int = 0xb; +pub const RTM_NEWADDR: c_int = 0xc; +pub const RTM_DELADDR: c_int = 0xd; +pub const RTM_IFINFO: c_int = 0xe; +pub const RTM_IFANNOUNCE: c_int = 0xf; +pub const RTM_DESYNC: c_int = 0x10; +pub const RTM_INVALIDATE: c_int = 0x11; +pub const RTM_BFD: c_int = 0x12; +pub const RTM_PROPOSAL: c_int = 0x13; +pub const RTM_CHGADDRATTR: c_int = 0x14; +pub const RTM_80211INFO: c_int = 0x15; +pub const RTM_SOURCE: c_int = 0x16; + +// socket.h +pub const NET_RT_STATS: c_int = 5; +pub const NET_RT_TABLE: c_int = 5; + +pub const SIZEOF_SOCKADDR_STORAGE: usize = 0x80; +pub const SIZEOF_SOCKADDR_INET: usize = 0x10; +pub const SIZEOF_SOCKADDR_INET6: usize = 0x1c; + +// Hardcoded based on the generated values here: https://cs.opensource.google/go/x/net/+/master:route/sys_openbsd.go + +pub(super) fn probe_routing_stack() -> RoutingStack { + let rtm_version = RTM_VERSION; + + let rtm = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::Route, + }; + let ifm = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::Interface, + }; + let ifam = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::InterfaceAddr, + }; + let ifannm = WireFormat { + ext_off: 0, + body_off: 0, + typ: MessageType::InterfaceAnnounce, + }; + + let wire_formats = [ + (RTM_ADD, rtm), + (RTM_DELETE, rtm), + (RTM_CHANGE, rtm), + (RTM_GET, rtm), + (RTM_LOSING, rtm), + (RTM_REDIRECT, rtm), + (RTM_MISS, rtm), + (RTM_RESOLVE, rtm), + (RTM_NEWADDR, ifam), + (RTM_DELADDR, ifam), + (RTM_IFINFO, ifm), + (RTM_IFANNOUNCE, ifannm), + (RTM_DESYNC, ifannm), + ] + .into_iter() + .collect(); + + // NetBSD 6 and above kernels require 64-bit aligned access to routing facilities. + RoutingStack { + rtm_version, + wire_formats, + kernel_align: 8, + } +} diff --git a/patches/netwatch/src/interfaces/linux.rs b/patches/netwatch/src/interfaces/linux.rs new file mode 100644 index 0000000000..7e42b4a6a9 --- /dev/null +++ b/patches/netwatch/src/interfaces/linux.rs @@ -0,0 +1,340 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Linux-specific network interfaces implementations. + +use nested_enum_utils::common_fields; +use snafu::{Backtrace, OptionExt, ResultExt, Snafu}; +use tokio::{ + fs::File, + io::{AsyncBufReadExt, BufReader}, +}; + +use super::DefaultRouteDetails; + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(super)))] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { source: std::io::Error }, + #[cfg(not(target_os = "android"))] + #[snafu(display("no netlink response"))] + NoResponse {}, + #[cfg(not(target_os = "android"))] + #[snafu(display("interface not found"))] + InterfaceNotFound {}, + #[snafu(display("iface field is missing"))] + MissingIfaceField {}, + #[snafu(display("destination field is missing"))] + MissingDestinationField {}, + #[snafu(display("mask field is missing"))] + MissingMaskField {}, + #[cfg(not(target_os = "android"))] + #[snafu(display("netlink"))] + Netlink { + source: netlink_proto::Error, + }, + #[cfg(not(target_os = "android"))] + #[snafu(display("unexpected netlink message"))] + UnexpectedNetlinkMessage {}, + #[cfg(not(target_os = "android"))] + #[snafu(display("netlink error message: {message:?}"))] + NetlinkErrorMessage { + message: netlink_packet_core::error::ErrorMessage, + }, +} + +pub async fn default_route() -> Option { + let route = default_route_proc().await; + if let Ok(route) = route { + return route; + } + + #[cfg(target_os = "android")] + let res = android::default_route().await; + + #[cfg(not(target_os = "android"))] + let res = sane::default_route().await; + + res.ok().flatten() +} + +const PROC_NET_ROUTE_PATH: &str = "/proc/net/route"; + +async fn default_route_proc() -> Result, Error> { + const ZERO_ADDR: &str = "00000000"; + let file = File::open(PROC_NET_ROUTE_PATH).await.context(IoSnafu)?; + + // Explicitly set capacity, this is min(4096, DEFAULT_BUF_SIZE): + // https://github.com/google/gvisor/issues/5732 + // On a regular Linux kernel you can read the first 128 bytes of /proc/net/route, + // then come back later to read the next 128 bytes and so on. + // + // In Google Cloud Run, where /proc/net/route comes from gVisor, you have to + // read it all at once. If you read only the first few bytes then the second + // read returns 0 bytes no matter how much originally appeared to be in the file. + // + // At the time of this writing (Mar 2021) Google Cloud Run has eth0 and eth1 + // with a 384 byte /proc/net/route. We allocate a large buffer to ensure we'll + // read it all in one call. + let reader = BufReader::with_capacity(8 * 1024, file); + let mut lines_iter = reader.lines(); + while let Some(line) = lines_iter.next_line().await.context(IoSnafu)? { + if !line.contains(ZERO_ADDR) { + continue; + } + let mut fields = line.split_ascii_whitespace(); + let iface = fields.next().context(MissingIfaceFieldSnafu)?; + let destination = fields.next().context(MissingDestinationFieldSnafu)?; + let mask = fields.nth(5).context(MissingMaskFieldSnafu)?; + // if iface.starts_with("tailscale") || iface.starts_with("wg") { + // continue; + // } + if destination == ZERO_ADDR && mask == ZERO_ADDR { + return Ok(Some(DefaultRouteDetails { + interface_name: iface.to_string(), + })); + } + } + Ok(None) +} + +#[cfg(target_os = "android")] +mod android { + use tokio::process::Command; + + use super::*; + + /// Try find the default route by parsing the "ip route" command output. + /// + /// We use this on Android where /proc/net/route can be missing entries or have locked-down + /// permissions. See also comments in . + pub async fn default_route() -> Result, Error> { + let output = Command::new("/system/bin/ip") + .args(["route", "show", "table", "0"]) + .kill_on_drop(true) + .output() + .await + .context(IoSnafu)?; + let stdout = std::string::String::from_utf8_lossy(&output.stdout); + let details = parse_android_ip_route(&stdout).map(|iface| DefaultRouteDetails { + interface_name: iface.to_string(), + }); + Ok(details) + } +} + +#[cfg(not(target_os = "android"))] +mod sane { + use n0_future::{Either, StreamExt, TryStream}; + use netlink_packet_core::{NetlinkMessage, NLM_F_DUMP, NLM_F_REQUEST}; + use netlink_packet_route::{ + link::{LinkAttribute, LinkMessage}, + route::{RouteAttribute, RouteHeader, RouteMessage, RouteProtocol, RouteScope, RouteType}, + AddressFamily, RouteNetlinkMessage, + }; + use netlink_sys::protocols::NETLINK_ROUTE; + use snafu::IntoError; + use tracing::{info_span, Instrument}; + + use super::*; + + type Handle = netlink_proto::ConnectionHandle; + + macro_rules! try_rtnl { + ($msg: expr, $message_type:path) => {{ + use netlink_packet_core::NetlinkPayload; + use netlink_packet_route::RouteNetlinkMessage; + + let (_header, payload) = $msg.into_parts(); + match payload { + NetlinkPayload::InnerMessage($message_type(msg)) => msg, + NetlinkPayload::Error(err) => { + return Err(NetlinkErrorMessageSnafu { message: err }.build()) + } + _ => return Err(UnexpectedNetlinkMessageSnafu.build()), + } + }}; + } + + pub async fn default_route() -> Result, Error> { + let (connection, handle, _receiver) = + netlink_proto::new_connection::(NETLINK_ROUTE).context(IoSnafu)?; + + let task = tokio::spawn(connection.instrument(info_span!("netlink.conn"))); + + let default = default_route_netlink_family(&handle, AddressFamily::Inet).await?; + let default = match default { + Some(default) => Some(default), + None => { + default_route_netlink_family(&handle, netlink_packet_route::AddressFamily::Inet6) + .await? + } + }; + task.abort(); + task.await.ok(); + Ok(default.map(|(name, _index)| DefaultRouteDetails { + interface_name: name, + })) + } + + fn get_route( + handle: Handle, + message: RouteMessage, + ) -> impl TryStream { + let mut req = NetlinkMessage::from(RouteNetlinkMessage::GetRoute(message)); + req.header.flags = NLM_F_REQUEST | NLM_F_DUMP; + + match handle.request(req, netlink_proto::sys::SocketAddr::new(0, 0)) { + Ok(response) => Either::Left( + response.map(move |msg| Ok(try_rtnl!(msg, RouteNetlinkMessage::NewRoute))), + ), + Err(e) => Either::Right(n0_future::stream::once::>(Err( + NetlinkSnafu.into_error(e), + ))), + } + } + + fn create_route_message(family: netlink_packet_route::AddressFamily) -> RouteMessage { + let mut message = RouteMessage::default(); + message.header.table = RouteHeader::RT_TABLE_MAIN; + message.header.protocol = RouteProtocol::Static; + message.header.scope = RouteScope::Universe; + message.header.kind = RouteType::Unicast; + message.header.address_family = family; + message + } + + /// Returns the `(name, index)` of the interface for the default route. + async fn default_route_netlink_family( + handle: &Handle, + family: netlink_packet_route::AddressFamily, + ) -> Result, Error> { + let msg = create_route_message(family); + let mut routes = get_route(handle.clone(), msg); + + while let Some(route) = routes.try_next().await? { + let route_attrs = route.attributes; + + if !route_attrs + .iter() + .any(|attr| matches!(attr, RouteAttribute::Gateway(_))) + { + // A default route has a gateway. + continue; + } + + if route.header.destination_prefix_length > 0 { + // A default route has no destination prefix length because it needs to route all + // destinations. + continue; + } + + let index = route_attrs.iter().find_map(|attr| match attr { + RouteAttribute::Oif(index) => Some(*index), + _ => None, + }); + + if let Some(index) = index { + if index == 0 { + continue; + } + let name = iface_by_index(handle, index).await?; + return Ok(Some((name, index))); + } + } + Ok(None) + } + + fn get_link( + handle: Handle, + message: LinkMessage, + ) -> impl TryStream { + let mut req = NetlinkMessage::from(RouteNetlinkMessage::GetLink(message)); + req.header.flags = NLM_F_REQUEST; + + match handle.request(req, netlink_proto::sys::SocketAddr::new(0, 0)) { + Ok(response) => Either::Left( + response.map(move |msg| Ok(try_rtnl!(msg, RouteNetlinkMessage::NewLink))), + ), + Err(e) => Either::Right(n0_future::stream::once::>(Err( + NetlinkSnafu.into_error(e), + ))), + } + } + + fn create_link_get_message(index: u32) -> LinkMessage { + let mut message = LinkMessage::default(); + message.header.index = index; + message + } + + async fn iface_by_index(handle: &Handle, index: u32) -> Result { + let message = create_link_get_message(index); + let mut links = get_link(handle.clone(), message); + let msg = links.try_next().await?.context(NoResponseSnafu)?; + + for nla in msg.attributes { + if let LinkAttribute::IfName(name) = nla { + return Ok(name); + } + } + Err(InterfaceNotFoundSnafu.build()) + } + + #[cfg(test)] + mod tests { + use super::*; + + #[tokio::test] + async fn test_default_route_netlink() { + let route = default_route().await.unwrap(); + // assert!(route.is_some()); + if let Some(route) = route { + assert!(!route.interface_name.is_empty()); + } + } + } +} + +/// Parses the output of the android `/system/bin/ip` command for the default route. +/// +/// Searches for line like `default via 10.0.2.2. dev radio0 table 1016 proto static mtu +/// 1500` +#[cfg(any(target_os = "android", test))] +fn parse_android_ip_route(stdout: &str) -> Option<&str> { + for line in stdout.lines() { + if !line.starts_with("default via") { + continue; + } + let mut fields = line.split_ascii_whitespace(); + if let Some(_dev) = fields.find(|s: &&str| *s == "dev") { + return fields.next(); + } + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_default_route_proc() { + let route = default_route_proc().await.unwrap(); + // assert!(route.is_some()); + if let Some(route) = route { + assert!(!route.interface_name.is_empty()); + } + } + + #[test] + fn test_parse_android_ip_route() { + let stdout = "default via 10.0.2.2. dev radio0 table 1016 proto static mtu 1500"; + let iface = parse_android_ip_route(stdout).unwrap(); + assert_eq!(iface, "radio0"); + } +} diff --git a/patches/netwatch/src/interfaces/wasm_browser.rs b/patches/netwatch/src/interfaces/wasm_browser.rs new file mode 100644 index 0000000000..0000504854 --- /dev/null +++ b/patches/netwatch/src/interfaces/wasm_browser.rs @@ -0,0 +1,120 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::{collections::HashMap, fmt}; + +use js_sys::{JsString, Reflect}; + +pub const BROWSER_INTERFACE: &str = "browserif"; + +/// Represents a network interface. +#[derive(Debug, PartialEq, Eq)] +pub struct Interface { + is_up: bool, +} + +impl fmt::Display for Interface { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "navigator.onLine={}", self.is_up) + } +} + +impl Interface { + async fn new() -> Self { + let is_up = Self::is_up(); + tracing::debug!(onLine = is_up, "Fetched globalThis.navigator.onLine"); + Self { + is_up: is_up.unwrap_or(true), + } + } + + fn is_up() -> Option { + let navigator = Reflect::get( + js_sys::global().as_ref(), + JsString::from("navigator").as_ref(), + ) + .ok()?; + + let is_up = Reflect::get(&navigator, JsString::from("onLine").as_ref()).ok()?; + + is_up.as_bool() + } + + /// The name of the interface. + pub(crate) fn name(&self) -> &str { + BROWSER_INTERFACE + } +} + +/// Intended to store the state of the machine's network interfaces, routing table, and +/// other network configuration. For now it's pretty basic. +#[derive(Debug, PartialEq, Eq)] +pub struct State { + /// Maps from an interface name interface. + pub interfaces: HashMap, + + /// Whether this machine has an IPv6 Global or Unique Local Address + /// which might provide connectivity. + pub have_v6: bool, + + /// Whether the machine has some non-localhost, non-link-local IPv4 address. + pub have_v4: bool, + + //// Whether the current network interface is considered "expensive", which currently means LTE/etc + /// instead of Wifi. This field is not populated by `get_state`. + pub(crate) is_expensive: bool, + + /// The interface name for the machine's default route. + /// + /// It is not yet populated on all OSes. + /// + /// When set, its value is the map key into `interface` and `interface_ips`. + pub(crate) default_route_interface: Option, + + /// The HTTP proxy to use, if any. + pub(crate) http_proxy: Option, + + /// The URL to the Proxy Autoconfig URL, if applicable. + pub(crate) pac: Option, +} + +impl fmt::Display for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for iface in self.interfaces.values() { + write!(f, "{iface}")?; + if let Some(ref default_if) = self.default_route_interface { + if iface.name() == default_if { + write!(f, " (default)")?; + } + } + if f.alternate() { + writeln!(f)?; + } else { + write!(f, "; ")?; + } + } + Ok(()) + } +} + +impl State { + /// Returns the state of all the current machine's network interfaces. + /// + /// It does not set the returned `State.is_expensive`. The caller can populate that. + pub async fn new() -> Self { + let mut interfaces = HashMap::new(); + let have_v6 = false; + let have_v4 = false; + + interfaces.insert(BROWSER_INTERFACE.to_string(), Interface::new().await); + + State { + interfaces, + have_v4, + have_v6, + is_expensive: false, + default_route_interface: Some(BROWSER_INTERFACE.to_string()), + http_proxy: None, + pac: None, + } + } +} diff --git a/patches/netwatch/src/interfaces/windows.rs b/patches/netwatch/src/interfaces/windows.rs new file mode 100644 index 0000000000..7d515be4c7 --- /dev/null +++ b/patches/netwatch/src/interfaces/windows.rs @@ -0,0 +1,60 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::collections::HashMap; + +use nested_enum_utils::common_fields; +use serde::Deserialize; +use snafu::{Backtrace, OptionExt, ResultExt, Snafu}; +use tracing::warn; +use wmi::{query::FilterValue, COMLibrary, WMIConnection}; + +use super::DefaultRouteDetails; + +/// API Docs: +#[derive(Deserialize, Debug)] +#[allow(non_camel_case_types, non_snake_case)] +struct Win32_IP4RouteTable { + Name: String, +} + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[allow(dead_code)] // not sure why we have this here? + #[snafu(display("IO"))] + Io { source: std::io::Error }, + #[snafu(display("not route found"))] + NoRoute {}, + #[snafu(display("WMI"))] + Wmi { source: wmi::WMIError }, +} + +fn get_default_route() -> Result { + let com_con = COMLibrary::new().context(WmiSnafu)?; + let wmi_con = WMIConnection::new(com_con).context(WmiSnafu)?; + + let query: HashMap<_, _> = [("Destination".into(), FilterValue::Str("0.0.0.0"))].into(); + let route: Win32_IP4RouteTable = wmi_con + .filtered_query(&query) + .context(WmiSnafu)? + .drain(..) + .next() + .context(NoRouteSnafu)?; + + Ok(DefaultRouteDetails { + interface_name: route.Name, + }) +} + +pub async fn default_route() -> Option { + match get_default_route() { + Ok(route) => Some(route), + Err(err) => { + warn!("failed to retrieve default route: {:#?}", err); + None + } + } +} diff --git a/patches/netwatch/src/ip.rs b/patches/netwatch/src/ip.rs new file mode 100644 index 0000000000..4fccc36e02 --- /dev/null +++ b/patches/netwatch/src/ip.rs @@ -0,0 +1,161 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! IP address related utilities. + +#[cfg(not(wasm_browser))] +use std::net::IpAddr; +use std::net::Ipv6Addr; + +#[cfg(not(wasm_browser))] +const IFF_UP: u32 = 0x1; +#[cfg(not(wasm_browser))] +const IFF_LOOPBACK: u32 = 0x8; + +/// List of machine's IP addresses. +#[cfg(not(wasm_browser))] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LocalAddresses { + /// Loopback addresses. + pub loopback: Vec, + /// Regular addresses. + pub regular: Vec, +} + +#[cfg(not(wasm_browser))] +impl Default for LocalAddresses { + fn default() -> Self { + Self::new() + } +} + +#[cfg(not(wasm_browser))] +impl LocalAddresses { + /// Returns the machine's IP addresses. + /// If there are no regular addresses it will return any IPv4 linklocal or IPv6 unique local + /// addresses because we know of environments where these are used with NAT to provide connectivity. + pub fn new() -> Self { + let ifaces = netdev::interface::get_interfaces(); + + let mut loopback = Vec::new(); + let mut regular4 = Vec::new(); + let mut regular6 = Vec::new(); + let mut linklocal4 = Vec::new(); + let mut ula6 = Vec::new(); + + for iface in ifaces { + if !is_up(&iface) { + // Skip down interfaces + continue; + } + let ifc_is_loopback = is_loopback(&iface); + let addrs = iface + .ipv4 + .iter() + .map(|a| IpAddr::V4(a.addr())) + .chain(iface.ipv6.iter().map(|a| IpAddr::V6(a.addr()))); + + for ip in addrs { + let ip = ip.to_canonical(); + + if ip.is_loopback() || ifc_is_loopback { + loopback.push(ip); + } else if is_link_local(ip) { + if ip.is_ipv4() { + linklocal4.push(ip); + } + + // We know of no cases where the IPv6 fe80:: addresses + // are used to provide WAN connectivity. It is also very + // common for users to have no IPv6 WAN connectivity, + // but their OS supports IPv6 so they have an fe80:: + // address. We don't want to report all of those + // IPv6 LL to Control. + } else if ip.is_ipv6() && is_private(&ip) { + // Google Cloud Run uses NAT with IPv6 Unique + // Local Addresses to provide IPv6 connectivity. + ula6.push(ip); + } else if ip.is_ipv4() { + regular4.push(ip); + } else { + regular6.push(ip); + } + } + } + + if regular4.is_empty() && regular6.is_empty() { + // if we have no usable IP addresses then be willing to accept + // addresses we otherwise wouldn't, like: + // + 169.254.x.x (AWS Lambda uses NAT with these) + // + IPv6 ULA (Google Cloud Run uses these with address translation) + regular4 = linklocal4; + regular6 = ula6; + } + let mut regular = regular4; + regular.extend(regular6); + + regular.sort(); + loopback.sort(); + + LocalAddresses { loopback, regular } + } +} + +#[cfg(not(wasm_browser))] +pub(crate) const fn is_up(interface: &netdev::Interface) -> bool { + interface.flags & IFF_UP != 0 +} + +#[cfg(not(wasm_browser))] +pub(crate) const fn is_loopback(interface: &netdev::Interface) -> bool { + interface.flags & IFF_LOOPBACK != 0 +} + +/// Reports whether ip is a private address, according to RFC 1918 +/// (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether +/// ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. +#[cfg(not(wasm_browser))] +pub(crate) fn is_private(ip: &IpAddr) -> bool { + match ip { + IpAddr::V4(ip) => { + // RFC 1918 allocates 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16 as + // private IPv4 address subnets. + let octets = ip.octets(); + octets[0] == 10 + || (octets[0] == 172 && octets[1] & 0xf0 == 16) + || (octets[0] == 192 && octets[1] == 168) + } + IpAddr::V6(ip) => is_private_v6(ip), + } +} + +#[cfg(not(wasm_browser))] +pub(crate) fn is_private_v6(ip: &Ipv6Addr) -> bool { + // RFC 4193 allocates fc00::/7 as the unique local unicast IPv6 address subnet. + ip.octets()[0] & 0xfe == 0xfc +} + +#[cfg(not(wasm_browser))] +pub(super) fn is_link_local(ip: IpAddr) -> bool { + match ip { + IpAddr::V4(ip) => ip.is_link_local(), + IpAddr::V6(ip) => is_unicast_link_local(ip), + } +} + +/// Returns true if the address is a unicast address with link-local scope, as defined in RFC 4291. +// Copied from std lib, not stable yet +pub const fn is_unicast_link_local(addr: Ipv6Addr) -> bool { + (addr.segments()[0] & 0xffc0) == 0xfe80 +} + +#[cfg(test)] +mod tests { + #[cfg(not(wasm_browser))] + #[test] + fn test_local_addresses() { + let addrs = super::LocalAddresses::new(); + dbg!(&addrs); + assert!(!addrs.loopback.is_empty()); + assert!(!addrs.regular.is_empty()); + } +} diff --git a/patches/netwatch/src/ip_family.rs b/patches/netwatch/src/ip_family.rs new file mode 100644 index 0000000000..e7f8ee4ccc --- /dev/null +++ b/patches/netwatch/src/ip_family.rs @@ -0,0 +1,49 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + +/// Ip family selection between Ipv4 and Ipv6. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum IpFamily { + /// Ipv4 + V4, + /// Ipv6 + V6, +} + +impl From for IpFamily { + fn from(value: IpAddr) -> Self { + match value { + IpAddr::V4(_) => Self::V4, + IpAddr::V6(_) => Self::V6, + } + } +} + +impl IpFamily { + /// Returns the matching default address. + pub fn unspecified_addr(&self) -> IpAddr { + match self { + Self::V4 => Ipv4Addr::UNSPECIFIED.into(), + Self::V6 => Ipv6Addr::UNSPECIFIED.into(), + } + } + + /// Returns the matching localhost address. + pub fn local_addr(&self) -> IpAddr { + match self { + Self::V4 => Ipv4Addr::LOCALHOST.into(), + Self::V6 => Ipv6Addr::LOCALHOST.into(), + } + } +} + +#[cfg(not(wasm_browser))] +impl From for socket2::Domain { + fn from(value: IpFamily) -> Self { + match value { + IpFamily::V4 => socket2::Domain::IPV4, + IpFamily::V6 => socket2::Domain::IPV6, + } + } +} diff --git a/patches/netwatch/src/lib.rs b/patches/netwatch/src/lib.rs new file mode 100644 index 0000000000..70078e9275 --- /dev/null +++ b/patches/netwatch/src/lib.rs @@ -0,0 +1,15 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Networking related utilities + +#[cfg_attr(wasm_browser, path = "interfaces/wasm_browser.rs")] +pub mod interfaces; +pub mod ip; +mod ip_family; +pub mod netmon; +#[cfg(not(wasm_browser))] +mod udp; + +pub use self::ip_family::IpFamily; +#[cfg(not(wasm_browser))] +pub use self::udp::UdpSocket; diff --git a/patches/netwatch/src/netmon.rs b/patches/netwatch/src/netmon.rs new file mode 100644 index 0000000000..cf7f02d51e --- /dev/null +++ b/patches/netwatch/src/netmon.rs @@ -0,0 +1,133 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! Monitoring of networking interfaces and route changes. + +use n0_future::{ + boxed::BoxFuture, + task::{self, AbortOnDropHandle}, +}; +use nested_enum_utils::common_fields; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::sync::{mpsc, oneshot}; + +mod actor; +#[cfg(target_os = "android")] +mod android; +#[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" +))] +mod bsd; +#[cfg(target_os = "linux")] +mod linux; +#[cfg(wasm_browser)] +mod wasm_browser; +#[cfg(target_os = "windows")] +mod windows; + +pub use self::actor::CallbackToken; +use self::actor::{Actor, ActorMessage}; + +/// Monitors networking interface and route changes. +#[derive(Debug)] +pub struct Monitor { + /// Task handle for the monitor task. + _handle: AbortOnDropHandle<()>, + actor_tx: mpsc::Sender, +} + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("channel closed"))] + ChannelClosed {}, + #[snafu(display("actor error"))] + Actor { source: actor::Error }, +} + +impl From> for Error { + fn from(_value: mpsc::error::SendError) -> Self { + ChannelClosedSnafu.build() + } +} + +impl From for Error { + fn from(_value: oneshot::error::RecvError) -> Self { + ChannelClosedSnafu.build() + } +} + +impl Monitor { + /// Create a new monitor. + pub async fn new() -> Result { + let actor = Actor::new().await.context(ActorSnafu)?; + let actor_tx = actor.subscribe(); + + let handle = task::spawn(async move { + actor.run().await; + }); + + Ok(Monitor { + _handle: AbortOnDropHandle::new(handle), + actor_tx, + }) + } + + /// Subscribe to network changes. + pub async fn subscribe(&self, callback: F) -> Result + where + F: Fn(bool) -> BoxFuture<()> + 'static + Sync + Send, + { + let (s, r) = oneshot::channel(); + self.actor_tx + .send(ActorMessage::Subscribe(Box::new(callback), s)) + .await?; + let token = r.await?; + Ok(token) + } + + /// Unsubscribe a callback from network changes, using the provided token. + pub async fn unsubscribe(&self, token: CallbackToken) -> Result<(), Error> { + let (s, r) = oneshot::channel(); + self.actor_tx + .send(ActorMessage::Unsubscribe(token, s)) + .await?; + r.await?; + Ok(()) + } + + /// Potential change detected outside + pub async fn network_change(&self) -> Result<(), Error> { + self.actor_tx.send(ActorMessage::NetworkChange).await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use n0_future::future::FutureExt; + + use super::*; + + #[tokio::test] + async fn test_smoke_monitor() { + let mon = Monitor::new().await.unwrap(); + let _token = mon + .subscribe(|is_major| { + async move { + println!("CHANGE DETECTED: {}", is_major); + } + .boxed() + }) + .await + .unwrap(); + + tokio::time::sleep(std::time::Duration::from_secs(15)).await; + } +} diff --git a/patches/netwatch/src/netmon/actor.rs b/patches/netwatch/src/netmon/actor.rs new file mode 100644 index 0000000000..de0eeea7ac --- /dev/null +++ b/patches/netwatch/src/netmon/actor.rs @@ -0,0 +1,276 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::{collections::HashMap, sync::Arc}; + +use n0_future::{ + boxed::BoxFuture, + task, + time::{self, Duration, Instant}, +}; +#[cfg(not(wasm_browser))] +use os::is_interesting_interface; +pub(super) use os::Error; +use os::RouteMonitor; +use tokio::sync::{mpsc, oneshot}; +use tracing::{debug, trace}; + +#[cfg(target_os = "android")] +use super::android as os; +#[cfg(any( + target_os = "freebsd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "macos", + target_os = "ios" +))] +use super::bsd as os; +#[cfg(target_os = "linux")] +use super::linux as os; +#[cfg(wasm_browser)] +use super::wasm_browser as os; +#[cfg(target_os = "windows")] +use super::windows as os; +use crate::interfaces::State; +#[cfg(not(wasm_browser))] +use crate::{interfaces::IpNet, ip::is_link_local}; + +/// The message sent by the OS specific monitors. +#[derive(Debug, Copy, Clone)] +pub(super) enum NetworkMessage { + /// A change was detected. + #[allow(dead_code)] + Change, +} + +/// How often we execute a check for big jumps in wall time. +#[cfg(not(any(target_os = "ios", target_os = "android")))] +const POLL_WALL_TIME_INTERVAL: Duration = Duration::from_secs(15); +/// Set background polling time to 1h to effectively disable it on mobile, +/// to avoid increased battery usage. Sleep detection won't work this way there. +#[cfg(any(target_os = "ios", target_os = "android"))] +const POLL_WALL_TIME_INTERVAL: Duration = Duration::from_secs(60 * 60); +const MON_CHAN_CAPACITY: usize = 16; +const ACTOR_CHAN_CAPACITY: usize = 16; + +pub(super) struct Actor { + /// Latest known interface state. + interface_state: State, + /// Latest observed wall time. + wall_time: Instant, + /// OS specific monitor. + #[allow(dead_code)] + route_monitor: RouteMonitor, + mon_receiver: mpsc::Receiver, + actor_receiver: mpsc::Receiver, + actor_sender: mpsc::Sender, + /// Callback registry. + callbacks: HashMap>, + callback_token: u64, +} + +/// Token to remove a callback +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CallbackToken(u64); + +/// Callbacks that get notified about changes. +pub(super) type Callback = Box BoxFuture<()> + Sync + Send + 'static>; + +pub(super) enum ActorMessage { + Subscribe(Callback, oneshot::Sender), + Unsubscribe(CallbackToken, oneshot::Sender<()>), + NetworkChange, +} + +impl Actor { + pub(super) async fn new() -> Result { + let interface_state = State::new().await; + let wall_time = Instant::now(); + + let (mon_sender, mon_receiver) = mpsc::channel(MON_CHAN_CAPACITY); + let route_monitor = RouteMonitor::new(mon_sender)?; + let (actor_sender, actor_receiver) = mpsc::channel(ACTOR_CHAN_CAPACITY); + + Ok(Actor { + interface_state, + wall_time, + route_monitor, + mon_receiver, + actor_receiver, + actor_sender, + callbacks: Default::default(), + callback_token: 0, + }) + } + + pub(super) fn subscribe(&self) -> mpsc::Sender { + self.actor_sender.clone() + } + + pub(super) async fn run(mut self) { + const DEBOUNCE: Duration = Duration::from_millis(250); + + let mut last_event = None; + let mut debounce_interval = time::interval(DEBOUNCE); + let mut wall_time_interval = time::interval(POLL_WALL_TIME_INTERVAL); + + loop { + tokio::select! { + biased; + + _ = debounce_interval.tick() => { + if let Some(time_jumped) = last_event.take() { + self.handle_potential_change(time_jumped).await; + } + } + _ = wall_time_interval.tick() => { + trace!("tick: wall_time_interval"); + if self.check_wall_time_advance() { + // Trigger potential change + last_event.replace(true); + debounce_interval.reset_immediately(); + } + } + event = self.mon_receiver.recv() => { + match event { + Some(NetworkMessage::Change) => { + trace!("network activity detected"); + last_event.replace(false); + debounce_interval.reset_immediately(); + } + None => { + debug!("shutting down, network monitor receiver gone"); + break; + } + } + } + msg = self.actor_receiver.recv() => { + match msg { + Some(ActorMessage::Subscribe(callback, s)) => { + let token = self.next_callback_token(); + self.callbacks.insert(token, Arc::new(callback)); + s.send(token).ok(); + } + Some(ActorMessage::Unsubscribe(token, s)) => { + self.callbacks.remove(&token); + s.send(()).ok(); + } + Some(ActorMessage::NetworkChange) => { + trace!("external network activity detected"); + last_event.replace(false); + debounce_interval.reset_immediately(); + } + None => { + debug!("shutting down, actor receiver gone"); + break; + } + } + } + } + } + } + + fn next_callback_token(&mut self) -> CallbackToken { + let token = CallbackToken(self.callback_token); + self.callback_token += 1; + token + } + + async fn handle_potential_change(&mut self, time_jumped: bool) { + trace!("potential change"); + + let new_state = State::new().await; + let old_state = &self.interface_state; + + // No major changes, continue on + if !time_jumped && old_state == &new_state { + debug!("no changes detected"); + return; + } + + let is_major = is_major_change(old_state, &new_state) || time_jumped; + + if is_major { + self.interface_state = new_state; + } + + debug!("triggering {} callbacks", self.callbacks.len()); + for cb in self.callbacks.values() { + let cb = cb.clone(); + task::spawn(async move { + cb(is_major).await; + }); + } + } + + /// Reports whether wall time jumped more than 150% + /// of `POLL_WALL_TIME_INTERVAL`, indicating we probably just came out of sleep. + fn check_wall_time_advance(&mut self) -> bool { + let now = Instant::now(); + let jumped = if let Some(elapsed) = now.checked_duration_since(self.wall_time) { + elapsed > POLL_WALL_TIME_INTERVAL * 3 / 2 + } else { + false + }; + + self.wall_time = now; + jumped + } +} + +#[cfg(wasm_browser)] +fn is_major_change(s1: &State, s2: &State) -> bool { + // All changes are major. + // In the browser, there only are changes from online to offline + s1 != s2 +} + +#[cfg(not(wasm_browser))] +fn is_major_change(s1: &State, s2: &State) -> bool { + if s1.have_v6 != s2.have_v6 + || s1.have_v4 != s2.have_v4 + || s1.is_expensive != s2.is_expensive + || s1.default_route_interface != s2.default_route_interface + || s1.http_proxy != s2.http_proxy + || s1.pac != s2.pac + { + return true; + } + + for (iname, i) in &s1.interfaces { + if !is_interesting_interface(i.name()) { + continue; + } + let Some(i2) = s2.interfaces.get(iname) else { + return true; + }; + if i != i2 || !prefixes_major_equal(i.addrs(), i2.addrs()) { + return true; + } + } + + false +} + +/// Checks whether `a` and `b` are equal after ignoring uninteresting +/// things, like link-local, loopback and multicast addresses. +#[cfg(not(wasm_browser))] +fn prefixes_major_equal(a: impl Iterator, b: impl Iterator) -> bool { + fn is_interesting(p: &IpNet) -> bool { + let a = p.addr(); + if is_link_local(a) || a.is_loopback() || a.is_multicast() { + return false; + } + true + } + + let a = a.filter(is_interesting); + let b = b.filter(is_interesting); + + for (a, b) in a.zip(b) { + if a != b { + return false; + } + } + + true +} diff --git a/patches/netwatch/src/netmon/android.rs b/patches/netwatch/src/netmon/android.rs new file mode 100644 index 0000000000..089327170a --- /dev/null +++ b/patches/netwatch/src/netmon/android.rs @@ -0,0 +1,28 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use tokio::sync::mpsc; + +use super::actor::NetworkMessage; + +#[derive(Debug, derive_more::Display)] +#[display("error")] +pub struct Error; + +impl std::error::Error for Error {} + +#[derive(Debug)] +pub(super) struct RouteMonitor { + _sender: mpsc::Sender, +} + +impl RouteMonitor { + pub(super) fn new(_sender: mpsc::Sender) -> Result { + // Very sad monitor. Android doesn't allow us to do this + + Ok(RouteMonitor { _sender }) + } +} + +pub(super) fn is_interesting_interface(_name: &str) -> bool { + true +} diff --git a/patches/netwatch/src/netmon/bsd.rs b/patches/netwatch/src/netmon/bsd.rs new file mode 100644 index 0000000000..8cc3498849 --- /dev/null +++ b/patches/netwatch/src/netmon/bsd.rs @@ -0,0 +1,138 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +#[cfg(any(target_os = "macos", target_os = "ios"))] +use libc::{RTAX_DST, RTAX_IFP}; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::{io::AsyncReadExt, sync::mpsc}; +use tokio_util::task::AbortOnDropHandle; +use tracing::{trace, warn}; + +use super::actor::NetworkMessage; +#[cfg(any(target_os = "freebsd", target_os = "netbsd", target_os = "openbsd"))] +use crate::interfaces::bsd::{RTAX_DST, RTAX_IFP}; +use crate::{interfaces::bsd::WireMessage, ip::is_link_local}; + +#[derive(Debug)] +pub(super) struct RouteMonitor { + _handle: AbortOnDropHandle<()>, +} + +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { + source: std::io::Error, + backtrace: Option, + }, +} + +fn create_socket() -> std::io::Result { + use std::os::fd::{FromRawFd, IntoRawFd}; + + // socket2 0.5+ compatibility: use raw socket type constant instead of Type::RAW + let socket = socket2::Socket::new(libc::AF_ROUTE.into(), socket2::Type::from(libc::SOCK_RAW), None)?; + socket.set_nonblocking(true)?; + + // socket2 0.5+ compatibility: explicit conversion through raw file descriptor + let fd = socket.into_raw_fd(); + let socket_std: std::os::unix::net::UnixStream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(fd) }; + let socket: tokio::net::UnixStream = socket_std.try_into()?; + + trace!("AF_ROUTE socket bound"); + + Ok(socket) +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + let mut socket = create_socket().context(IoSnafu)?; + let handle = tokio::task::spawn(async move { + trace!("AF_ROUTE monitor started"); + + // TODO: cleaner shutdown + let mut buffer = vec![0u8; 2048]; + loop { + match socket.read(&mut buffer).await { + Ok(read) => { + trace!("AF_ROUTE: read {} bytes", read); + match super::super::interfaces::bsd::parse_rib( + libc::NET_RT_DUMP, + &buffer[..read], + ) { + Ok(msgs) => { + if contains_interesting_message(&msgs) { + sender.send(NetworkMessage::Change).await.ok(); + } + } + Err(err) => { + warn!("AF_ROUTE: failed to parse rib: {:?}", err); + } + } + } + Err(err) => { + warn!("AF_ROUTE: error reading: {:?}", err); + // recreate socket, as it is likely in an invalid state + // TODO: distinguish between different errors? + match create_socket() { + Ok(new_socket) => { + socket = new_socket; + } + Err(err) => { + warn!("AF_ROUTE: unable to bind a new socket: {:?}", err); + // TODO: what to do here? + } + } + } + } + } + }); + + Ok(RouteMonitor { + _handle: AbortOnDropHandle::new(handle), + }) + } +} + +fn contains_interesting_message(msgs: &[WireMessage]) -> bool { + msgs.iter().any(is_interesting_message) +} + +pub(super) fn is_interesting_message(msg: &WireMessage) -> bool { + match msg { + WireMessage::InterfaceMulticastAddr(_) => true, + WireMessage::Interface(_) => false, + WireMessage::InterfaceAddr(msg) => { + if let Some(addr) = msg.addrs.get(RTAX_IFP as usize) { + if let Some(name) = addr.name() { + if !is_interesting_interface(name) { + return false; + } + } + } + true + } + WireMessage::Route(msg) => { + // Ignore local unicast + if let Some(addr) = msg.addrs.get(RTAX_DST as usize) { + if let Some(ip) = addr.ip() { + if is_link_local(ip) { + return false; + } + } + } + + true + } + WireMessage::InterfaceAnnounce(_) => false, + } +} + +pub(super) fn is_interesting_interface(name: &str) -> bool { + let base_name = name.trim_end_matches("0123456789"); + if base_name == "llw" || base_name == "awdl" || base_name == "ipsec" { + return false; + } + + true +} diff --git a/patches/netwatch/src/netmon/linux.rs b/patches/netwatch/src/netmon/linux.rs new file mode 100644 index 0000000000..2b83b25404 --- /dev/null +++ b/patches/netwatch/src/netmon/linux.rs @@ -0,0 +1,191 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::{ + collections::{HashMap, HashSet}, + net::IpAddr, +}; + +use libc::{ + RTNLGRP_IPV4_IFADDR, RTNLGRP_IPV4_ROUTE, RTNLGRP_IPV4_RULE, RTNLGRP_IPV6_IFADDR, + RTNLGRP_IPV6_ROUTE, RTNLGRP_IPV6_RULE, +}; +use n0_future::StreamExt; +use netlink_packet_core::NetlinkPayload; +use netlink_packet_route::{address, route, RouteNetlinkMessage}; +use netlink_sys::{AsyncSocket, SocketAddr}; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::{sync::mpsc, task::JoinHandle}; +use tracing::{trace, warn}; + +use super::actor::NetworkMessage; +use crate::ip::is_link_local; + +#[derive(Debug)] +pub(super) struct RouteMonitor { + conn_handle: JoinHandle<()>, + handle: JoinHandle<()>, +} + +impl Drop for RouteMonitor { + fn drop(&mut self) { + self.handle.abort(); + self.conn_handle.abort(); + } +} + +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { + source: std::io::Error, + backtrace: Option, + }, +} + +const fn nl_mgrp(group: u32) -> u32 { + if group > 31 { + panic!("use netlink_sys::Socket::add_membership() for this group"); + } + if group == 0 { + 0 + } else { + 1 << (group - 1) + } +} +macro_rules! get_nla { + ($msg:expr, $nla:path) => { + $msg.attributes.iter().find_map(|nla| match nla { + $nla(n) => Some(n), + _ => None, + }) + }; +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + use netlink_sys::protocols::NETLINK_ROUTE; + + let (mut conn, _handle, mut messages) = netlink_proto::new_connection::< + netlink_packet_route::RouteNetlinkMessage, + >(NETLINK_ROUTE) + .context(IoSnafu)?; + + // Specify flags to listen on. + let groups = nl_mgrp(RTNLGRP_IPV4_IFADDR) + | nl_mgrp(RTNLGRP_IPV6_IFADDR) + | nl_mgrp(RTNLGRP_IPV4_ROUTE) + | nl_mgrp(RTNLGRP_IPV6_ROUTE) + | nl_mgrp(RTNLGRP_IPV4_RULE) + | nl_mgrp(RTNLGRP_IPV6_RULE); + + let addr = SocketAddr::new(0, groups); + conn.socket_mut() + .socket_mut() + .bind(&addr) + .context(IoSnafu)?; + + let conn_handle = tokio::task::spawn(conn); + + let handle = tokio::task::spawn(async move { + // let mut addr_cache: HashMap>> = HashMap::new(); + let mut addr_cache: HashMap> = HashMap::new(); + + while let Some((message, _)) = messages.next().await { + match message.payload { + NetlinkPayload::Error(err) => { + warn!("error reading netlink payload: {:?}", err); + } + NetlinkPayload::Done(_) => { + trace!("done received, exiting"); + break; + } + NetlinkPayload::InnerMessage(msg) => match msg { + RouteNetlinkMessage::NewAddress(msg) => { + trace!("NEWADDR: {:?}", msg); + let addrs = addr_cache.entry(msg.header.index).or_default(); + if let Some(addr) = get_nla!(msg, address::AddressAttribute::Address) { + if addrs.contains(addr) { + // already cached + continue; + } else { + addrs.insert(*addr); + sender.send(NetworkMessage::Change).await.ok(); + } + } + } + RouteNetlinkMessage::DelAddress(msg) => { + trace!("DELADDR: {:?}", msg); + let addrs = addr_cache.entry(msg.header.index).or_default(); + if let Some(addr) = get_nla!(msg, address::AddressAttribute::Address) { + addrs.remove(addr); + } + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::NewRoute(msg) | RouteNetlinkMessage::DelRoute(msg) => { + trace!("ROUTE:: {:?}", msg); + + // Ignore the following messages + let table = get_nla!(msg, route::RouteAttribute::Table) + .copied() + .unwrap_or_default(); + if let Some(dst) = get_nla!(msg, route::RouteAttribute::Destination) { + match dst { + route::RouteAddress::Inet(addr) => { + if (table == 255 || table == 254) + && (addr.is_multicast() + || is_link_local(IpAddr::V4(*addr))) + { + continue; + } + } + route::RouteAddress::Inet6(addr) => { + if (table == 255 || table == 254) + && (addr.is_multicast() + || is_link_local(IpAddr::V6(*addr))) + { + continue; + } + } + _ => {} + } + } + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::NewRule(msg) => { + trace!("NEWRULE: {:?}", msg); + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::DelRule(msg) => { + trace!("DELRULE: {:?}", msg); + sender.send(NetworkMessage::Change).await.ok(); + } + RouteNetlinkMessage::NewLink(msg) => { + trace!("NEWLINK: {:?}", msg); + // ignored atm + } + RouteNetlinkMessage::DelLink(msg) => { + trace!("DELLINK: {:?}", msg); + // ignored atm + } + msg => { + trace!("unhandled: {:?}", msg); + } + }, + _ => { + // ignore other types + } + } + } + }); + + Ok(RouteMonitor { + handle, + conn_handle, + }) + } +} + +pub(super) fn is_interesting_interface(_name: &str) -> bool { + true +} diff --git a/patches/netwatch/src/netmon/wasm_browser.rs b/patches/netwatch/src/netmon/wasm_browser.rs new file mode 100644 index 0000000000..fdcc32f51c --- /dev/null +++ b/patches/netwatch/src/netmon/wasm_browser.rs @@ -0,0 +1,88 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use js_sys::{ + wasm_bindgen::{prelude::Closure, JsCast}, + Function, +}; +use n0_future::task; +use tokio::sync::mpsc; +use web_sys::{EventListener, EventTarget}; + +use super::actor::NetworkMessage; + +#[derive(Debug, derive_more::Display)] +#[display("error")] +pub struct Error; + +impl std::error::Error for Error {} + +#[derive(Debug)] +pub(super) struct RouteMonitor { + _listeners: Option, +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + let closure: Function = Closure::::new(move || { + tracing::trace!("browser RouteMonitor event triggered"); + // task::spawn is effectively translated into a queueMicrotask in JS + let sender = sender.clone(); + task::spawn(async move { + sender + .send(NetworkMessage::Change) + .await + .inspect_err(|err| { + tracing::debug!(?err, "failed sending NetworkMessage::Change") + }) + }); + }) + .into_js_value() + .unchecked_into(); + // The closure keeps itself alive via reference counting internally + let _listeners = add_event_listeners(&closure); + Ok(RouteMonitor { _listeners }) + } +} + +fn add_event_listeners(f: &Function) -> Option { + let online_listener = EventListener::new(); + online_listener.set_handle_event(f); + let offline_listener = EventListener::new(); + offline_listener.set_handle_event(f); + + // https://developer.mozilla.org/en-US/docs/Web/API/Navigator/onLine#listening_for_changes_in_network_status + let window: EventTarget = js_sys::global().unchecked_into(); + window + .add_event_listener_with_event_listener("online", &online_listener) + .inspect_err(|err| tracing::debug!(?err, "failed adding event listener")) + .ok()?; + + window + .add_event_listener_with_event_listener("offline", &offline_listener) + .inspect_err(|err| tracing::debug!(?err, "failed adding event listener")) + .ok()?; + + Some(Listeners { + online_listener, + offline_listener, + }) +} + +#[derive(Debug)] +struct Listeners { + online_listener: EventListener, + offline_listener: EventListener, +} + +impl Drop for Listeners { + fn drop(&mut self) { + tracing::trace!("Removing online/offline event listeners"); + let window: EventTarget = js_sys::global().unchecked_into(); + window + .remove_event_listener_with_event_listener("online", &self.online_listener) + .ok(); + window + .remove_event_listener_with_event_listener("offline", &self.offline_listener) + .ok(); + } +} diff --git a/patches/netwatch/src/netmon/windows.rs b/patches/netwatch/src/netmon/windows.rs new file mode 100644 index 0000000000..de8d066d5f --- /dev/null +++ b/patches/netwatch/src/netmon/windows.rs @@ -0,0 +1,225 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::{collections::HashMap, sync::Arc}; + +use libc::c_void; +use nested_enum_utils::common_fields; +use snafu::{Backtrace, ResultExt, Snafu}; +use tokio::sync::mpsc; +use tracing::{trace, warn}; +use windows::Win32::{ + Foundation::HANDLE as Handle, + NetworkManagement::IpHelper::{ + MIB_IPFORWARD_ROW2, MIB_NOTIFICATION_TYPE, MIB_UNICASTIPADDRESS_ROW, + }, +}; + +use super::actor::NetworkMessage; + +#[derive(Debug)] +pub(super) struct RouteMonitor { + #[allow(dead_code)] + cb_handler: CallbackHandler, +} + +#[common_fields({ + backtrace: Option, +})] +#[derive(Debug, Snafu)] +#[non_exhaustive] +pub enum Error { + #[snafu(display("IO"))] + Io { source: std::io::Error }, + #[snafu(display("win32"))] + Win32 { source: windows_result::Error }, +} + +impl RouteMonitor { + pub(super) fn new(sender: mpsc::Sender) -> Result { + // Register two callbacks with the windows api + let mut cb_handler = CallbackHandler::default(); + + // 1. Unicast Address Changes + let s = sender.clone(); + cb_handler.register_unicast_address_change_callback(Box::new(move || { + if let Err(err) = s.blocking_send(NetworkMessage::Change) { + warn!("unable to send: unicast change notification: {:?}", err); + } + }))?; + + // 2. Route Changes + cb_handler.register_route_change_callback(Box::new(move || { + if let Err(err) = sender.blocking_send(NetworkMessage::Change) { + warn!("unable to send: route change notification: {:?}", err); + } + }))?; + + Ok(RouteMonitor { cb_handler }) + } +} + +pub(super) fn is_interesting_interface(_name: &str) -> bool { + true +} + +/// Manages callbacks registered with the win32 networking API. +#[derive(derive_more::Debug, Default)] +struct CallbackHandler { + /// Stores the callbacks and `Handle`s for unicast. + // `Handle` is not hashable, so store the underlying `isize`. + #[debug("HashMap>, + /// Stores the callbacks and `Handle`s for route. + // `Handle` is not hashable, so store the underlying `isize`. + #[debug("HashMap>, +} + +impl Drop for CallbackHandler { + fn drop(&mut self) { + // Make sure to unregister all callbacks left. + let handles: Vec<_> = self + .unicast_callbacks + .keys() + .map(|h| UnicastCallbackHandle(Handle(*h as *mut c_void))) + .collect(); + + for handle in handles { + self.unregister_unicast_address_change_callback(handle).ok(); // best effort + } + + let handles: Vec<_> = self + .route_callbacks + .keys() + .map(|h| RouteCallbackHandle(Handle(*h as *mut c_void))) + .collect(); + + for handle in handles { + self.unregister_route_change_callback(handle).ok(); // best effort + } + } +} + +struct UnicastCallbackHandle(Handle); +type UnicastCallback = Box; + +struct RouteCallbackHandle(Handle); +type RouteCallback = Box; + +impl CallbackHandler { + fn register_unicast_address_change_callback( + &mut self, + cb: UnicastCallback, + ) -> Result { + trace!("registering unicast callback"); + let mut handle = Handle::default(); + let cb = Arc::new(cb); + unsafe { + windows::Win32::NetworkManagement::IpHelper::NotifyUnicastIpAddressChange( + windows::Win32::Networking::WinSock::AF_UNSPEC, + Some(unicast_change_callback), + Some(Arc::as_ptr(&cb) as *const c_void), // context + false, // initial notification, + &mut handle, + ) + .ok() + .context(Win32Snafu)?; + } + + self.unicast_callbacks.insert(handle.0 as isize, cb); + + Ok(UnicastCallbackHandle(handle)) + } + + fn unregister_unicast_address_change_callback( + &mut self, + handle: UnicastCallbackHandle, + ) -> Result<(), Error> { + trace!("unregistering unicast callback"); + if self + .unicast_callbacks + .remove(&(handle.0 .0 as isize)) + .is_some() + { + unsafe { + windows::Win32::NetworkManagement::IpHelper::CancelMibChangeNotify2(handle.0) + .ok() + .context(Win32Snafu)?; + } + } + + Ok(()) + } + + fn register_route_change_callback( + &mut self, + cb: RouteCallback, + ) -> Result { + trace!("registering route change callback"); + let mut handle = Handle::default(); + let cb = Arc::new(cb); + unsafe { + windows::Win32::NetworkManagement::IpHelper::NotifyRouteChange2( + windows::Win32::Networking::WinSock::AF_UNSPEC, + Some(route_change_callback), + Arc::as_ptr(&cb) as *const c_void, // context + false, // initial notification, + &mut handle, + ) + .ok() + .context(Win32Snafu)?; + } + + self.route_callbacks.insert(handle.0 as isize, cb); + + Ok(RouteCallbackHandle(handle)) + } + + fn unregister_route_change_callback( + &mut self, + handle: RouteCallbackHandle, + ) -> Result<(), Error> { + trace!("unregistering route callback"); + if self + .route_callbacks + .remove(&(handle.0 .0 as isize)) + .is_some() + { + unsafe { + windows::Win32::NetworkManagement::IpHelper::CancelMibChangeNotify2(handle.0) + .ok() + .context(Win32Snafu)?; + } + } + + Ok(()) + } +} + +unsafe extern "system" fn unicast_change_callback( + callercontext: *const c_void, + _row: *const MIB_UNICASTIPADDRESS_ROW, + _notificationtype: MIB_NOTIFICATION_TYPE, +) { + if callercontext.is_null() { + // Nothing we can do + return; + } + let callercontext = callercontext as *const UnicastCallback; + let cb = &*callercontext; + cb(); +} + +unsafe extern "system" fn route_change_callback( + callercontext: *const c_void, + _row: *const MIB_IPFORWARD_ROW2, + _notificationtype: MIB_NOTIFICATION_TYPE, +) { + if callercontext.is_null() { + // Nothing we can do + return; + } + let callercontext = callercontext as *const RouteCallback; + let cb = &*callercontext; + cb(); +} diff --git a/patches/netwatch/src/udp.rs b/patches/netwatch/src/udp.rs new file mode 100644 index 0000000000..0276d5d3c0 --- /dev/null +++ b/patches/netwatch/src/udp.rs @@ -0,0 +1,912 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use std::{ + future::Future, + io, + net::SocketAddr, + pin::Pin, + sync::{atomic::AtomicBool, RwLock, RwLockReadGuard, TryLockError}, + task::{Context, Poll}, +}; + +use atomic_waker::AtomicWaker; +use quinn_udp::Transmit; +use tokio::io::Interest; +use tracing::{debug, trace, warn}; + +use super::IpFamily; + +/// Wrapper around a tokio UDP socket. +#[derive(Debug)] +pub struct UdpSocket { + socket: RwLock, + recv_waker: AtomicWaker, + send_waker: AtomicWaker, + /// Set to true, when an error occurred, that means we need to rebind the socket. + is_broken: AtomicBool, +} + +/// UDP socket read/write buffer size (7MB). The value of 7MB is chosen as it +/// is the max supported by a default configuration of macOS. Some platforms will silently clamp the value. +const SOCKET_BUFFER_SIZE: usize = 7 << 20; +impl UdpSocket { + /// Bind only Ipv4 on any interface. + pub fn bind_v4(port: u16) -> io::Result { + Self::bind(IpFamily::V4, port) + } + + /// Bind only Ipv6 on any interface. + pub fn bind_v6(port: u16) -> io::Result { + Self::bind(IpFamily::V6, port) + } + + /// Bind only Ipv4 on localhost. + pub fn bind_local_v4(port: u16) -> io::Result { + Self::bind_local(IpFamily::V4, port) + } + + /// Bind only Ipv6 on localhost. + pub fn bind_local_v6(port: u16) -> io::Result { + Self::bind_local(IpFamily::V6, port) + } + + /// Bind to the given port only on localhost. + pub fn bind_local(network: IpFamily, port: u16) -> io::Result { + let addr = SocketAddr::new(network.local_addr(), port); + Self::bind_raw(addr) + } + + /// Bind to the given port and listen on all interfaces. + pub fn bind(network: IpFamily, port: u16) -> io::Result { + let addr = SocketAddr::new(network.unspecified_addr(), port); + Self::bind_raw(addr) + } + + /// Bind to any provided [`SocketAddr`]. + pub fn bind_full(addr: impl Into) -> io::Result { + Self::bind_raw(addr) + } + + /// Is the socket broken and needs a rebind? + pub fn is_broken(&self) -> bool { + self.is_broken.load(std::sync::atomic::Ordering::Acquire) + } + + /// Marks this socket as needing a rebind + fn mark_broken(&self) { + self.is_broken + .store(true, std::sync::atomic::Ordering::Release); + } + + /// Rebind the underlying socket. + pub fn rebind(&self) -> io::Result<()> { + { + let mut guard = self.socket.write().unwrap(); + guard.rebind()?; + + // Clear errors + self.is_broken + .store(false, std::sync::atomic::Ordering::Release); + + drop(guard); + } + + // wakeup + self.wake_all(); + + Ok(()) + } + + fn bind_raw(addr: impl Into) -> io::Result { + let socket = SocketState::bind(addr.into())?; + + Ok(UdpSocket { + socket: RwLock::new(socket), + recv_waker: AtomicWaker::default(), + send_waker: AtomicWaker::default(), + is_broken: AtomicBool::new(false), + }) + } + + /// Receives a single datagram message on the socket from the remote address + /// to which it is connected. On success, returns the number of bytes read. + /// + /// The function must be called with valid byte array `buf` of sufficient + /// size to hold the message bytes. If a message is too long to fit in the + /// supplied buffer, excess bytes may be discarded. + /// + /// The [`connect`] method will connect this socket to a remote address. + /// This method will fail if the socket is not connected. + /// + /// [`connect`]: method@Self::connect + pub fn recv<'a, 'b>(&'b self, buffer: &'a mut [u8]) -> RecvFut<'a, 'b> { + RecvFut { + socket: self, + buffer, + } + } + + /// Receives a single datagram message on the socket. On success, returns + /// the number of bytes read and the origin. + /// + /// The function must be called with valid byte array `buf` of sufficient + /// size to hold the message bytes. If a message is too long to fit in the + /// supplied buffer, excess bytes may be discarded. + pub fn recv_from<'a, 'b>(&'b self, buffer: &'a mut [u8]) -> RecvFromFut<'a, 'b> { + RecvFromFut { + socket: self, + buffer, + } + } + + /// Sends data on the socket to the remote address that the socket is + /// connected to. + /// + /// The [`connect`] method will connect this socket to a remote address. + /// This method will fail if the socket is not connected. + /// + /// [`connect`]: method@Self::connect + /// + /// # Return + /// + /// On success, the number of bytes sent is returned, otherwise, the + /// encountered error is returned. + pub fn send<'a, 'b>(&'b self, buffer: &'a [u8]) -> SendFut<'a, 'b> { + SendFut { + socket: self, + buffer, + } + } + + /// Sends data on the socket to the given address. On success, returns the + /// number of bytes written. + pub fn send_to<'a, 'b>(&'b self, buffer: &'a [u8], to: SocketAddr) -> SendToFut<'a, 'b> { + SendToFut { + socket: self, + buffer, + to, + } + } + + /// Connects the UDP socket setting the default destination for send() and + /// limiting packets that are read via `recv` from the address specified in + /// `addr`. + pub fn connect(&self, addr: SocketAddr) -> io::Result<()> { + trace!(%addr, "connecting"); + let guard = self.socket.read().unwrap(); + let (socket_tokio, _state) = guard.try_get_connected()?; + + let sock_ref = socket2::SockRef::from(&socket_tokio); + sock_ref.connect(&socket2::SockAddr::from(addr))?; + + Ok(()) + } + + /// Returns the local address of this socket. + pub fn local_addr(&self) -> io::Result { + let guard = self.socket.read().unwrap(); + let (socket, _state) = guard.try_get_connected()?; + + socket.local_addr() + } + + /// Closes the socket, and waits for the underlying `libc::close` call to be finished. + pub async fn close(&self) { + let socket = self.socket.write().unwrap().close(); + self.wake_all(); + if let Some((sock, _)) = socket { + let std_sock = sock.into_std(); + let res = tokio::runtime::Handle::current() + .spawn_blocking(move || { + // Calls libc::close, which can block + drop(std_sock); + }) + .await; + if let Err(err) = res { + warn!("failed to close socket: {:?}", err); + } + } + } + + /// Check if this socket is closed. + pub fn is_closed(&self) -> bool { + self.socket.read().unwrap().is_closed() + } + + /// Handle potential read errors, updating internal state. + /// + /// Returns `Some(error)` if the error is fatal otherwise `None. + fn handle_read_error(&self, error: io::Error) -> Option { + match error.kind() { + io::ErrorKind::NotConnected => { + // This indicates the underlying socket is broken, and we should attempt to rebind it + self.mark_broken(); + None + } + _ => Some(error), + } + } + + /// Handle potential write errors, updating internal state. + /// + /// Returns `Some(error)` if the error is fatal otherwise `None. + fn handle_write_error(&self, error: io::Error) -> Option { + match error.kind() { + io::ErrorKind::BrokenPipe => { + // This indicates the underlying socket is broken, and we should attempt to rebind it + self.mark_broken(); + None + } + _ => Some(error), + } + } + + /// Try to get a read lock for the sockets, but don't block for trying to acquire it. + fn poll_read_socket( + &self, + waker: &AtomicWaker, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + let guard = match self.socket.try_read() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(e)) => panic!("socket lock poisoned: {e}"), + Err(TryLockError::WouldBlock) => { + waker.register(cx.waker()); + + match self.socket.try_read() { + Ok(guard) => { + // we're actually fine, no need to cause a spurious wakeup + waker.take(); + guard + } + Err(TryLockError::Poisoned(e)) => panic!("socket lock poisoned: {e}"), + Err(TryLockError::WouldBlock) => { + // Ok fine, we registered our waker, the lock is really closed, + // we can return pending. + return Poll::Pending; + } + } + } + }; + Poll::Ready(guard) + } + + fn wake_all(&self) { + self.recv_waker.wake(); + self.send_waker.wake(); + } + + /// Checks if the socket needs a rebind, and if so does it. + /// + /// Returns an error if the rebind is needed, but failed. + fn maybe_rebind(&self) -> io::Result<()> { + if self.is_broken() { + self.rebind()?; + } + Ok(()) + } + + /// Poll for writable + pub fn poll_writable(&self, cx: &mut std::task::Context<'_>) -> Poll> { + loop { + if let Err(err) = self.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = std::task::ready!(self.poll_read_socket(&self.send_waker, cx)); + let (socket, _state) = guard.try_get_connected()?; + + match socket.poll_send_ready(cx) { + Poll::Pending => { + self.send_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => return Poll::Ready(Ok(())), + Poll::Ready(Err(err)) => { + if let Some(err) = self.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } + + /// Send a quinn based `Transmit`. + pub fn try_send_quinn(&self, transmit: &Transmit<'_>) -> io::Result<()> { + loop { + self.maybe_rebind()?; + + let guard = match self.socket.try_read() { + Ok(guard) => guard, + Err(TryLockError::Poisoned(e)) => { + panic!("lock poisoned: {:?}", e); + } + Err(TryLockError::WouldBlock) => { + return Err(io::Error::new(io::ErrorKind::WouldBlock, "")); + } + }; + let (socket, state) = guard.try_get_connected()?; + + let res = socket.try_io(Interest::WRITABLE, || state.send(socket.into(), transmit)); + + match res { + Ok(()) => return Ok(()), + Err(err) => match self.handle_write_error(err) { + Some(err) => return Err(err), + None => { + continue; + } + }, + } + } + } + + /// quinn based `poll_recv` + pub fn poll_recv_quinn( + &self, + cx: &mut Context, + bufs: &mut [io::IoSliceMut<'_>], + meta: &mut [quinn_udp::RecvMeta], + ) -> Poll> { + loop { + if let Err(err) = self.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = n0_future::ready!(self.poll_read_socket(&self.recv_waker, cx)); + let (socket, state) = guard.try_get_connected()?; + + match socket.poll_recv_ready(cx) { + Poll::Pending => { + self.recv_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + // We are ready to read, continue + } + Poll::Ready(Err(err)) => match self.handle_read_error(err) { + Some(err) => return Poll::Ready(Err(err)), + None => { + continue; + } + }, + } + + let res = socket.try_io(Interest::READABLE, || state.recv(socket.into(), bufs, meta)); + match res { + Ok(count) => { + for meta in meta.iter().take(count) { + trace!( + src = %meta.addr, + len = meta.len, + count = meta.len / meta.stride, + dst = %meta.dst_ip.map(|x| x.to_string()).unwrap_or_default(), + "UDP recv" + ); + } + return Poll::Ready(Ok(count)); + } + Err(err) => { + // ignore spurious wakeups + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + match self.handle_read_error(err) { + Some(err) => return Poll::Ready(Err(err)), + None => { + continue; + } + } + } + } + } + } + + /// Whether transmitted datagrams might get fragmented by the IP layer + /// + /// Returns `false` on targets which employ e.g. the `IPV6_DONTFRAG` socket option. + pub fn may_fragment(&self) -> bool { + let guard = self.socket.read().unwrap(); + guard.may_fragment() + } + + /// The maximum amount of segments which can be transmitted if a platform + /// supports Generic Send Offload (GSO). + /// + /// This is 1 if the platform doesn't support GSO. Subject to change if errors are detected + /// while using GSO. + pub fn max_gso_segments(&self) -> usize { + let guard = self.socket.read().unwrap(); + guard.max_gso_segments() + } + + /// The number of segments to read when GRO is enabled. Used as a factor to + /// compute the receive buffer size. + /// + /// Returns 1 if the platform doesn't support GRO. + pub fn gro_segments(&self) -> usize { + let guard = self.socket.read().unwrap(); + guard.gro_segments() + } +} + +/// Receive future +#[derive(Debug)] +pub struct RecvFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a mut [u8], +} + +impl Future for RecvFut<'_, '_> { + type Output = io::Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let Self { socket, buffer } = &mut *self; + + loop { + if let Err(err) = socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = n0_future::ready!(socket.poll_read_socket(&socket.recv_waker, cx)); + let (inner_socket, _state) = guard.try_get_connected()?; + + match inner_socket.poll_recv_ready(cx) { + Poll::Pending => { + self.socket.recv_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = inner_socket.try_recv(buffer); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +/// Receive future +#[derive(Debug)] +pub struct RecvFromFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a mut [u8], +} + +impl Future for RecvFromFut<'_, '_> { + type Output = io::Result<(usize, SocketAddr)>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let Self { socket, buffer } = &mut *self; + + loop { + if let Err(err) = socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = n0_future::ready!(socket.poll_read_socket(&socket.recv_waker, cx)); + let (inner_socket, _state) = guard.try_get_connected()?; + + match inner_socket.poll_recv_ready(cx) { + Poll::Pending => { + self.socket.recv_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = inner_socket.try_recv_from(buffer); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = socket.handle_read_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +/// Send future +#[derive(Debug)] +pub struct SendFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a [u8], +} + +impl Future for SendFut<'_, '_> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + loop { + if let Err(err) = self.socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = + n0_future::ready!(self.socket.poll_read_socket(&self.socket.send_waker, cx)); + let (socket, _state) = guard.try_get_connected()?; + + match socket.poll_send_ready(cx) { + Poll::Pending => { + self.socket.send_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = socket.try_send(self.buffer); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +/// Send future +#[derive(Debug)] +pub struct SendToFut<'a, 'b> { + socket: &'b UdpSocket, + buffer: &'a [u8], + to: SocketAddr, +} + +impl Future for SendToFut<'_, '_> { + type Output = io::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + loop { + if let Err(err) = self.socket.maybe_rebind() { + return Poll::Ready(Err(err)); + } + + let guard = + n0_future::ready!(self.socket.poll_read_socket(&self.socket.send_waker, cx)); + let (socket, _state) = guard.try_get_connected()?; + + match socket.poll_send_ready(cx) { + Poll::Pending => { + self.socket.send_waker.register(cx.waker()); + return Poll::Pending; + } + Poll::Ready(Ok(())) => { + let res = socket.try_send_to(self.buffer, self.to); + if let Err(err) = res { + if err.kind() == io::ErrorKind::WouldBlock { + continue; + } + + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + return Poll::Ready(res); + } + Poll::Ready(Err(err)) => { + if let Some(err) = self.socket.handle_write_error(err) { + return Poll::Ready(Err(err)); + } + continue; + } + } + } + } +} + +#[derive(Debug)] +enum SocketState { + Connected { + socket: tokio::net::UdpSocket, + state: quinn_udp::UdpSocketState, + /// The addr we are binding to. + addr: SocketAddr, + }, + Closed { + last_max_gso_segments: usize, + last_gro_segments: usize, + last_may_fragment: bool, + }, +} + +impl SocketState { + fn try_get_connected( + &self, + ) -> io::Result<(&tokio::net::UdpSocket, &quinn_udp::UdpSocketState)> { + match self { + Self::Connected { + socket, + state, + addr: _, + } => Ok((socket, state)), + Self::Closed { .. } => { + warn!("socket closed"); + Err(io::Error::new(io::ErrorKind::BrokenPipe, "socket closed")) + } + } + } + + fn bind(addr: SocketAddr) -> io::Result { + let network = IpFamily::from(addr.ip()); + let socket = socket2::Socket::new( + network.into(), + socket2::Type::DGRAM, + Some(socket2::Protocol::UDP), + )?; + + if let Err(err) = socket.set_recv_buffer_size(SOCKET_BUFFER_SIZE) { + debug!( + "failed to set recv_buffer_size to {}: {:?}", + SOCKET_BUFFER_SIZE, err + ); + } + if let Err(err) = socket.set_send_buffer_size(SOCKET_BUFFER_SIZE) { + debug!( + "failed to set send_buffer_size to {}: {:?}", + SOCKET_BUFFER_SIZE, err + ); + } + if network == IpFamily::V6 { + // Avoid dualstack + socket.set_only_v6(true)?; + } + + // Binding must happen before calling quinn, otherwise `local_addr` + // is not yet available on all OSes. + socket.bind(&addr.into())?; + + // Ensure nonblocking + socket.set_nonblocking(true)?; + + let socket: std::net::UdpSocket = socket.into(); + + // Convert into tokio UdpSocket + let socket = tokio::net::UdpSocket::from_std(socket)?; + let socket_ref = quinn_udp::UdpSockRef::from(&socket); + let socket_state = quinn_udp::UdpSocketState::new(socket_ref)?; + + let local_addr = socket.local_addr()?; + if addr.port() != 0 && local_addr.port() != addr.port() { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "wrong port bound: {:?}: wanted: {} got {}", + network, + addr.port(), + local_addr.port(), + ), + )); + } + + Ok(Self::Connected { + socket, + state: socket_state, + addr: local_addr, + }) + } + + fn rebind(&mut self) -> io::Result<()> { + let (addr, closed_state) = match self { + Self::Connected { state, addr, .. } => { + let s = SocketState::Closed { + last_max_gso_segments: state.max_gso_segments(), + last_gro_segments: state.gro_segments(), + last_may_fragment: state.may_fragment(), + }; + (*addr, s) + } + Self::Closed { .. } => { + return Err(io::Error::new( + io::ErrorKind::Other, + "socket is closed and cannot be rebound", + )); + } + }; + debug!("rebinding {}", addr); + + *self = closed_state; + *self = Self::bind(addr)?; + + Ok(()) + } + + fn is_closed(&self) -> bool { + matches!(self, Self::Closed { .. }) + } + + fn close(&mut self) -> Option<(tokio::net::UdpSocket, quinn_udp::UdpSocketState)> { + match self { + Self::Connected { state, .. } => { + let s = SocketState::Closed { + last_max_gso_segments: state.max_gso_segments(), + last_gro_segments: state.gro_segments(), + last_may_fragment: state.may_fragment(), + }; + let Self::Connected { socket, state, .. } = std::mem::replace(self, s) else { + unreachable!("just checked"); + }; + Some((socket, state)) + } + Self::Closed { .. } => None, + } + } + + fn may_fragment(&self) -> bool { + match self { + Self::Connected { state, .. } => state.may_fragment(), + Self::Closed { + last_may_fragment, .. + } => *last_may_fragment, + } + } + + fn max_gso_segments(&self) -> usize { + match self { + Self::Connected { state, .. } => state.max_gso_segments(), + Self::Closed { + last_max_gso_segments, + .. + } => *last_max_gso_segments, + } + } + + fn gro_segments(&self) -> usize { + match self { + Self::Connected { state, .. } => state.gro_segments(), + Self::Closed { + last_gro_segments, .. + } => *last_gro_segments, + } + } +} + +impl Drop for UdpSocket { + fn drop(&mut self) { + trace!("dropping UdpSocket"); + if let Some((socket, _)) = self.socket.write().unwrap().close() { + if let Ok(handle) = tokio::runtime::Handle::try_current() { + // No wakeup after dropping write lock here, since we're getting dropped. + // this will be empty if `close` was called before + let std_sock = socket.into_std(); + handle.spawn_blocking(move || { + // Calls libc::close, which can block + drop(std_sock); + }); + } + } + } +} + +#[cfg(test)] +mod tests { + use testresult::TestResult; + + use super::*; + + #[tokio::test] + async fn test_reconnect() -> TestResult { + let (s_b, mut r_b) = tokio::sync::mpsc::channel(16); + let handle_a = tokio::task::spawn(async move { + let socket = UdpSocket::bind_local(IpFamily::V4, 0)?; + let addr = socket.local_addr()?; + s_b.send(addr).await?; + println!("socket bound to {:?}", addr); + + let mut buffer = [0u8; 16]; + for i in 0..100 { + println!("-- tick {i}"); + let read = socket.recv_from(&mut buffer).await; + match read { + Ok((count, addr)) => { + println!("got {:?}", &buffer[..count]); + println!("sending {:?} to {:?}", &buffer[..count], addr); + socket.send_to(&buffer[..count], addr).await?; + } + Err(err) => { + eprintln!("error reading: {:?}", err); + } + } + } + socket.close().await; + Ok::<_, testresult::TestError>(()) + }); + + let socket = UdpSocket::bind_local(IpFamily::V4, 0)?; + let first_addr = socket.local_addr()?; + println!("socket2 bound to {:?}", socket.local_addr()?); + let addr = r_b.recv().await.unwrap(); + + let mut buffer = [0u8; 16]; + for i in 0u8..100 { + println!("round one - {}", i); + socket.send_to(&[i][..], addr).await?; + let (count, from) = socket.recv_from(&mut buffer).await?; + assert_eq!(addr, from); + assert_eq!(count, 1); + assert_eq!(buffer[0], i); + + // check for errors + assert!(!socket.is_broken()); + + // rebind + socket.rebind()?; + + // check that the socket has the same address as before + assert_eq!(socket.local_addr()?, first_addr); + } + + handle_a.await.ok(); + + Ok(()) + } + + #[tokio::test] + async fn test_udp_mark_broken() -> TestResult { + let socket_a = UdpSocket::bind_local(IpFamily::V4, 0)?; + let addr_a = socket_a.local_addr()?; + println!("socket bound to {:?}", addr_a); + + let socket_b = UdpSocket::bind_local(IpFamily::V4, 0)?; + let addr_b = socket_b.local_addr()?; + println!("socket bound to {:?}", addr_b); + + let handle = tokio::task::spawn(async move { + let mut buffer = [0u8; 16]; + for _ in 0..2 { + match socket_b.recv_from(&mut buffer).await { + Ok((count, addr)) => { + println!("got {:?} from {:?}", &buffer[..count], addr); + } + Err(err) => { + eprintln!("error recv: {:?}", err); + } + } + } + }); + socket_a.send_to(&[0][..], addr_b).await?; + socket_a.mark_broken(); + assert!(socket_a.is_broken()); + socket_a.send_to(&[0][..], addr_b).await?; + assert!(!socket_a.is_broken()); + + handle.await?; + Ok(()) + } +} diff --git a/patches/netwatch/tests/smoke.rs b/patches/netwatch/tests/smoke.rs new file mode 100644 index 0000000000..deefb299d4 --- /dev/null +++ b/patches/netwatch/tests/smoke.rs @@ -0,0 +1,75 @@ +// Copyright 2022-2024 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +//! A very basic smoke test for netwatch, to make sure it doesn't error out immediately +//! in Wasm at all. +//! +//! We can't test browsers easily, because that would mean we need control over turning +//! the browser online/offline. +//! +//! However, this gives us a minimum guarantee that the Wasm build doesn't break fully. +use n0_future::FutureExt; +use netwatch::netmon; +use testresult::TestResult; +#[cfg(not(wasm_browser))] +use tokio::test; +#[cfg(wasm_browser)] +use wasm_bindgen_test::wasm_bindgen_test as test; + +// Enable this if you want to run these tests in the browser. +// Unfortunately it's either-or: Enable this and you can run in the browser, disable to run in nodejs. +// #[cfg(wasm_browser)] +// wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +#[test] +async fn smoke_test() -> TestResult { + setup_logging(); + + tracing::info!("Creating netmon::Monitor"); + let monitor = netmon::Monitor::new().await?; + tracing::info!("netmon::Monitor created."); + + // Unfortunately this doesn't do anything in node.js, because it doesn't have + // globalThis.navigator.onLine or globalThis.addEventListener("online"/"offline", ...) APIs, + // so this is more of a test to see if we gracefully handle these situations & if our + // .wasm files are without "env" imports. + tracing::info!("subscribing to netmon callback"); + let token = monitor + .subscribe(|is_major| { + async move { + tracing::info!(is_major, "network change"); + } + .boxed() + }) + .await?; + tracing::info!("successfully subscribed to netmon callback"); + + tracing::info!("unsubscribing"); + monitor.unsubscribe(token).await?; + tracing::info!("unsubscribed"); + + tracing::info!("dropping netmon::Monitor"); + drop(monitor); + tracing::info!("dropped."); + + Ok(()) +} + +#[cfg(wasm_browser)] +fn setup_logging() { + tracing_subscriber::fmt() + .with_max_level(tracing::level_filters::LevelFilter::DEBUG) + .with_writer( + // To avoide trace events in the browser from showing their JS backtrace + tracing_subscriber_wasm::MakeConsoleWriter::default() + .map_trace_level_to(tracing::Level::DEBUG), + ) + // If we don't do this in the browser, we get a runtime error. + .without_time() + .with_ansi(false) + .init(); +} + +#[cfg(not(wasm_browser))] +fn setup_logging() { + tracing_subscriber::fmt().init(); +}