diff --git a/packages/migration-claim/sp1/Cargo.lock b/packages/migration-claim/sp1/Cargo.lock index 13c6e55..81e12e3 100644 --- a/packages/migration-claim/sp1/Cargo.lock +++ b/packages/migration-claim/sp1/Cargo.lock @@ -752,6 +752,189 @@ dependencies = [ "serde", ] +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "pin-project-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.5.0", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + +[[package]] +name = "async-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" +dependencies = [ + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "windows-sys 0.61.2", +] + +[[package]] +name = "async-lock" +version = "3.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" +dependencies = [ + "event-listener 5.4.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-object-pool" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "333c456b97c3f2d50604e8b2624253b7f787208cb72eb75e64b0ad11b221652c" +dependencies = [ + "async-std", +] + +[[package]] +name = "async-process" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc50921ec0055cdd8a16de48773bfeec5c972598674347252c0399676be7da75" +dependencies = [ + "async-channel 2.5.0", + "async-io", + "async-lock", + "async-signal", + "async-task", + "blocking", + "cfg-if", + "event-listener 5.4.1", + "futures-lite", + "rustix", +] + +[[package]] +name = "async-signal" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43c070bbf59cd3570b6b2dd54cd772527c7c3620fce8be898406dd3ed6adc64c" +dependencies = [ + "async-io", + "async-lock", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.61.2", +] + +[[package]] +name = "async-std" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8e079a4ab67ae52b7403632e4618815d6db36d2a010cfe41b02c1b1578f93b" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io", + "async-lock", + "async-process", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -774,6 +957,12 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" version = "0.1.89" @@ -1278,6 +1467,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "base64" version = "0.22.1" @@ -1300,6 +1495,17 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" +[[package]] +name = "basic-cookies" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67bd8fd42c16bdb08688243dc5f0cc117a3ca9efeeaba3a345a18a6159ad96f7" +dependencies = [ + "lalrpop", + "lalrpop-util", + "regex", +] + [[package]] name = "bincode" version = "1.3.3" @@ -1329,15 +1535,30 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec 0.6.3", +] + [[package]] name = "bit-set" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ - "bit-vec", + "bit-vec 0.8.0", ] +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bit-vec" version = "0.8.0" @@ -1429,6 +1650,19 @@ dependencies = [ "objc2", ] +[[package]] +name = "blocking" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" +dependencies = [ + "async-channel 2.5.0", + "async-task", + "futures-io", + "futures-lite", + "piper", +] + [[package]] name = "bls12_381" version = "0.7.1" @@ -1720,6 +1954,15 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "console" version = "0.15.11" @@ -2146,6 +2389,16 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + [[package]] name = "dirs-sys" version = "0.4.1" @@ -2158,6 +2411,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "dispatch2" version = "0.3.0" @@ -2269,6 +2533,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ena" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +dependencies = [ + "log", +] + [[package]] name = "encode_unicode" version = "1.0.0" @@ -2332,6 +2605,33 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener 5.4.1", + "pin-project-lite", +] + [[package]] name = "eventsource-stream" version = "0.2.3" @@ -2444,6 +2744,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "fnv" version = "1.0.7" @@ -2531,6 +2837,19 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -2660,6 +2979,18 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "group" version = "0.12.1" @@ -2901,6 +3232,34 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "httpmock" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08ec9586ee0910472dec1a1f0f8acf52f0fdde93aea74d70d4a3107b4be0fd5b" +dependencies = [ + "assert-json-diff", + "async-object-pool", + "async-std", + "async-trait", + "base64 0.21.7", + "basic-cookies", + "crossbeam-utils", + "form_urlencoded", + "futures-util", + "hyper 0.14.32", + "lazy_static", + "levenshtein", + "log", + "regex", + "serde", + "serde_json", + "serde_regex", + "similar", + "tokio", + "url", +] + [[package]] name = "hyper" version = "0.14.32" @@ -3000,7 +3359,7 @@ version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ - "base64", + "base64 0.22.1", "bytes", "futures-channel", "futures-core", @@ -3252,6 +3611,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.12.1" @@ -3353,6 +3721,46 @@ dependencies = [ "sha3-asm", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lalrpop" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" +dependencies = [ + "ascii-canvas", + "bit-set 0.5.3", + "ena", + "itertools 0.11.0", + "lalrpop-util", + "petgraph", + "pico-args", + "regex", + "regex-syntax", + "string_cache", + "term", + "tiny-keccak", + "unicode-xid", + "walkdir", +] + +[[package]] +name = "lalrpop-util" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" +dependencies = [ + "regex-automata", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -3362,6 +3770,12 @@ dependencies = [ "spin", ] +[[package]] +name = "levenshtein" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" + [[package]] name = "libc" version = "0.2.178" @@ -3420,6 +3834,9 @@ name = "log" version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" +dependencies = [ + "value-bag", +] [[package]] name = "lru" @@ -3518,6 +3935,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + [[package]] name = "nix" version = "0.30.1" @@ -4072,6 +4495,12 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.12.5" @@ -4162,6 +4591,31 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.12.1", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pico-args" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" + [[package]] name = "pin-project" version = "1.1.10" @@ -4194,6 +4648,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -4204,6 +4669,20 @@ dependencies = [ "spki", ] +[[package]] +name = "polling" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix", + "windows-sys 0.61.2", +] + [[package]] name = "portable-atomic" version = "1.12.0" @@ -4234,6 +4713,12 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + [[package]] name = "prettyplease" version = "0.2.37" @@ -4320,8 +4805,8 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ - "bit-set", - "bit-vec", + "bit-set 0.8.0", + "bit-vec 0.8.0", "bitflags", "num-traits", "rand 0.9.2", @@ -4640,7 +5125,7 @@ version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ - "base64", + "base64 0.22.1", "bytes", "futures-channel", "futures-core", @@ -4930,6 +5415,15 @@ version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "scale-info" version = "2.11.6" @@ -5197,6 +5691,16 @@ dependencies = [ "serde_core", ] +[[package]] +name = "serde_regex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8136f1a4ea815d7eac4101cfd0b16dc0cb5e1fe1b8609dfd728058656b7badf" +dependencies = [ + "regex", + "serde", +] + [[package]] name = "serde_spanned" version = "0.6.9" @@ -5224,7 +5728,7 @@ version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ - "base64", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -5350,6 +5854,18 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "size" version = "0.4.1" @@ -5983,6 +6499,18 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "string_cache" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" +dependencies = [ + "new_debug_unreachable", + "parking_lot", + "phf_shared", + "precomputed-hash", +] + [[package]] name = "strsim" version = "0.11.1" @@ -6134,6 +6662,17 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -6266,9 +6805,12 @@ dependencies = [ "anyhow", "axum", "hex", + "httpmock", + "merlin", "primitive-types", "reqwest", "rustls 0.23.35", + "schnorrkel", "serde", "serde_json", "sp1-sdk", @@ -6443,7 +6985,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64", + "base64 0.22.1", "bytes", "h2 0.4.12", "http 1.4.0", @@ -6771,6 +7313,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +[[package]] +name = "value-bag" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0" + [[package]] name = "vec_map" version = "0.8.2" @@ -6801,6 +7349,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -6941,6 +7499,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" diff --git a/packages/migration-claim/sp1/merkle-tree.json b/packages/migration-claim/sp1/merkle-tree.json new file mode 120000 index 0000000..b8e81b5 --- /dev/null +++ b/packages/migration-claim/sp1/merkle-tree.json @@ -0,0 +1 @@ +../merkle-tree.json \ No newline at end of file diff --git a/packages/migration-claim/sp1/prover-api/Cargo.toml b/packages/migration-claim/sp1/prover-api/Cargo.toml index 9a56bbe..d849306 100644 --- a/packages/migration-claim/sp1/prover-api/Cargo.toml +++ b/packages/migration-claim/sp1/prover-api/Cargo.toml @@ -10,8 +10,10 @@ alloy-sol-types = { workspace = true } anyhow = { workspace = true } axum = "0.7" hex = { workspace = true } +merlin = { workspace = true } primitive-types = "0.12" reqwest = { version = "0.12", default-features = false, features = ["blocking", "json", "rustls-tls"] } +schnorrkel = { workspace = true } serde = { workspace = true } serde_json = "1.0" sp1-sdk = { workspace = true } @@ -22,3 +24,6 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["fmt"] } uuid = { version = "1", features = ["v4"] } rustls = { version = "0.23", features = ["ring"] } + +[dev-dependencies] +httpmock = "0.7" diff --git a/packages/migration-claim/sp1/prover-api/Dockerfile b/packages/migration-claim/sp1/prover-api/Dockerfile index 6da741e..ac5cbed 100644 --- a/packages/migration-claim/sp1/prover-api/Dockerfile +++ b/packages/migration-claim/sp1/prover-api/Dockerfile @@ -16,8 +16,17 @@ RUN apt-get update \ && apt-get install -y --no-install-recommends ca-certificates libssl3 \ && rm -rf /var/lib/apt/lists/* +# Create app directory structure +WORKDIR /app + +# Copy eligibility data (can be overridden via volume mount at runtime) +COPY packages/migration-claim/sp1/merkle-tree.json /app/data/merkle-tree.json + +# Copy binary COPY --from=builder /repo/packages/migration-claim/sp1/target/release/tnt-claim-prover-api /usr/local/bin/tnt-claim-prover-api +# Set default eligibility path (overridable via ELIGIBILITY_FILE env var) +ENV ELIGIBILITY_FILE=/app/data/merkle-tree.json ENV PORT=8080 EXPOSE 8080 diff --git a/packages/migration-claim/sp1/prover-api/scripts/test-api.sh b/packages/migration-claim/sp1/prover-api/scripts/test-api.sh new file mode 100755 index 0000000..089c5d0 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/scripts/test-api.sh @@ -0,0 +1,223 @@ +#!/bin/bash +# Integration test script for SP1 Prover API +# Run this script after starting the API server + +set -e + +API_URL="${API_URL:-http://localhost:8080}" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +pass() { + echo -e "${GREEN}PASS${NC}: $1" +} + +fail() { + echo -e "${RED}FAIL${NC}: $1" + exit 1 +} + +warn() { + echo -e "${YELLOW}WARN${NC}: $1" +} + +# Test data +VALID_SS58="5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" +VALID_SIGNATURE="0x$(printf 'ab%.0s' {1..64})" +VALID_EVM_ADDRESS="0x742d35Cc6634C0532925a3b844Bc9e7595f4a3b2" +VALID_CHALLENGE="0x$(printf '12%.0s' {1..32})" +VALID_AMOUNT="1000000000000000000" + +echo "======================================" +echo "SP1 Prover API Integration Tests" +echo "API URL: $API_URL" +echo "======================================" +echo + +# Test 1: Health endpoint +echo "Test 1: Health endpoint" +HEALTH=$(curl -s "$API_URL/health") +if echo "$HEALTH" | grep -q '"status":"ok"'; then + pass "Health endpoint returns ok" + echo " Response: $HEALTH" +else + fail "Health endpoint failed: $HEALTH" +fi +echo + +# Test 2: Missing required fields +echo "Test 2: Missing required fields -> 400" +RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d '{"ss58Address": ""}') +HTTP_CODE=$(echo "$RESPONSE" | tail -n1) +BODY=$(echo "$RESPONSE" | head -n-1) + +if [ "$HTTP_CODE" = "400" ]; then + pass "Missing fields returns 400" + echo " Response: $BODY" +else + fail "Expected 400, got $HTTP_CODE: $BODY" +fi +echo + +# Test 3: Invalid signature length +echo "Test 3: Invalid signature length -> 400" +RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d "{ + \"ss58Address\": \"$VALID_SS58\", + \"signature\": \"0x1234\", + \"evmAddress\": \"$VALID_EVM_ADDRESS\", + \"challenge\": \"$VALID_CHALLENGE\", + \"amount\": \"$VALID_AMOUNT\" + }") +HTTP_CODE=$(echo "$RESPONSE" | tail -n1) +BODY=$(echo "$RESPONSE" | head -n-1) + +if [ "$HTTP_CODE" = "400" ]; then + pass "Invalid signature length returns 400" + echo " Response: $BODY" +else + fail "Expected 400, got $HTTP_CODE: $BODY" +fi +echo + +# Test 4: Invalid amount (hex instead of decimal) +echo "Test 4: Invalid amount format -> 400" +RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d "{ + \"ss58Address\": \"$VALID_SS58\", + \"signature\": \"$VALID_SIGNATURE\", + \"evmAddress\": \"$VALID_EVM_ADDRESS\", + \"challenge\": \"$VALID_CHALLENGE\", + \"amount\": \"0x1234\" + }") +HTTP_CODE=$(echo "$RESPONSE" | tail -n1) +BODY=$(echo "$RESPONSE" | head -n-1) + +if [ "$HTTP_CODE" = "400" ]; then + pass "Hex amount returns 400" + echo " Response: $BODY" +else + fail "Expected 400, got $HTTP_CODE: $BODY" +fi +echo + +# Test 5: Invalid SS58 address +echo "Test 5: Invalid SS58 address -> 400" +RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d "{ + \"ss58Address\": \"invalid_address_here\", + \"signature\": \"$VALID_SIGNATURE\", + \"evmAddress\": \"$VALID_EVM_ADDRESS\", + \"challenge\": \"$VALID_CHALLENGE\", + \"amount\": \"$VALID_AMOUNT\" + }") +HTTP_CODE=$(echo "$RESPONSE" | tail -n1) +BODY=$(echo "$RESPONSE" | head -n-1) + +if [ "$HTTP_CODE" = "400" ]; then + pass "Invalid SS58 address returns 400" + echo " Response: $BODY" +else + fail "Expected 400, got $HTTP_CODE: $BODY" +fi +echo + +# Test 6: Unknown job ID -> 404 +echo "Test 6: Unknown job ID -> 404" +RESPONSE=$(curl -s -w "\n%{http_code}" "$API_URL/status/non-existent-job-id") +HTTP_CODE=$(echo "$RESPONSE" | tail -n1) +BODY=$(echo "$RESPONSE" | head -n-1) + +if [ "$HTTP_CODE" = "404" ]; then + pass "Unknown job ID returns 404" + echo " Response: $BODY" +else + fail "Expected 404, got $HTTP_CODE: $BODY" +fi +echo + +# Test 7: Valid request (only if in mock mode) +echo "Test 7: Valid request submission" +if echo "$HEALTH" | grep -q '"prover_mode":"mock"'; then + RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d "{ + \"ss58Address\": \"$VALID_SS58\", + \"signature\": \"$VALID_SIGNATURE\", + \"evmAddress\": \"$VALID_EVM_ADDRESS\", + \"challenge\": \"$VALID_CHALLENGE\", + \"amount\": \"$VALID_AMOUNT\" + }") + HTTP_CODE=$(echo "$RESPONSE" | tail -n1) + BODY=$(echo "$RESPONSE" | head -n-1) + + if [ "$HTTP_CODE" = "200" ]; then + pass "Valid request accepted" + echo " Response: $BODY" + + # Extract job ID and poll status + JOB_ID=$(echo "$BODY" | grep -o '"jobId":"[^"]*"' | cut -d'"' -f4) + if [ -n "$JOB_ID" ]; then + echo " Polling status for job: $JOB_ID" + for i in {1..10}; do + STATUS=$(curl -s "$API_URL/status/$JOB_ID") + echo " Status: $STATUS" + if echo "$STATUS" | grep -q '"status":"completed"'; then + pass "Job completed successfully" + break + elif echo "$STATUS" | grep -q '"status":"failed"'; then + warn "Job failed (expected in mock mode without proper setup)" + break + fi + sleep 1 + done + fi + else + fail "Expected 200, got $HTTP_CODE: $BODY" + fi +else + warn "Skipping - API is not in mock mode (prover_mode != mock)" +fi +echo + +# Test 8: Rate limiting test +echo "Test 8: Rate limiting" +if echo "$HEALTH" | grep -q '"prover_mode":"mock"'; then + echo " Sending multiple rapid requests..." + for i in {1..5}; do + RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$API_URL" \ + -H "Content-Type: application/json" \ + -d "{ + \"ss58Address\": \"$VALID_SS58\", + \"signature\": \"$VALID_SIGNATURE\", + \"evmAddress\": \"$VALID_EVM_ADDRESS\", + \"challenge\": \"$VALID_CHALLENGE\", + \"amount\": \"$VALID_AMOUNT\" + }") + HTTP_CODE=$(echo "$RESPONSE" | tail -n1) + BODY=$(echo "$RESPONSE" | head -n-1) + echo " Request $i: HTTP $HTTP_CODE" + + if [ "$HTTP_CODE" = "429" ]; then + pass "Rate limiting kicked in on request $i" + echo " Response: $BODY" + break + fi + done +else + warn "Skipping - API is not in mock mode" +fi +echo + +echo "======================================" +echo "Integration tests completed" +echo "======================================" diff --git a/packages/migration-claim/sp1/prover-api/src/cache.rs b/packages/migration-claim/sp1/prover-api/src/cache.rs new file mode 100644 index 0000000..33247e0 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/cache.rs @@ -0,0 +1,158 @@ +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::Mutex; +use tracing::info; + +use crate::types::CachedProof; + +/// Proof cache for deduplication +pub struct ProofCache { + cache: Arc>>, + ttl_seconds: u64, +} + +impl ProofCache { + pub fn new(cache: Arc>>, ttl_seconds: u64) -> Self { + Self { cache, ttl_seconds } + } + + /// Get a cached proof if it exists and is not expired + pub async fn get(&self, key: &str) -> Option { + let cache = self.cache.lock().await; + if let Some(proof) = cache.get(key) { + if !proof.is_expired(self.ttl_seconds) { + return Some(proof.clone()); + } + } + None + } + + /// Clean up expired entries + pub async fn cleanup(&self) -> usize { + let mut cache = self.cache.lock().await; + let before = cache.len(); + cache.retain(|_, v| !v.is_expired(self.ttl_seconds)); + let removed = before - cache.len(); + if removed > 0 { + info!("Cache cleanup: removed {} expired entries", removed); + } + removed + } +} + +/// Start a background task to periodically clean up the cache +pub fn start_cache_cleanup_task( + cache: Arc>>, + ttl_seconds: u64, + cleanup_interval_seconds: u64, +) { + let proof_cache = ProofCache::new(cache, ttl_seconds); + tokio::spawn(async move { + let mut interval = + tokio::time::interval(std::time::Duration::from_secs(cleanup_interval_seconds)); + loop { + interval.tick().await; + proof_cache.cleanup().await; + } + }); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::now_ts; + + #[tokio::test] + async fn test_cache_get() { + let cache = Arc::new(Mutex::new(HashMap::new())); + + // Insert directly into the HashMap (as production code does) + { + let mut c = cache.lock().await; + c.insert( + "key1".to_string(), + CachedProof::new("proof1".to_string(), "values1".to_string()), + ); + } + + let proof_cache = ProofCache::new(cache, 60); + let result = proof_cache.get("key1").await; + assert!(result.is_some()); + let cached = result.unwrap(); + assert_eq!(cached.zk_proof, "proof1"); + assert_eq!(cached.public_values, "values1"); + } + + #[tokio::test] + async fn test_cache_miss() { + let cache = Arc::new(Mutex::new(HashMap::new())); + let proof_cache = ProofCache::new(cache, 60); + + let result = proof_cache.get("nonexistent").await; + assert!(result.is_none()); + } + + #[tokio::test] + async fn test_cache_expired() { + let cache = Arc::new(Mutex::new(HashMap::new())); + + // Insert an already-expired entry + { + let mut c = cache.lock().await; + c.insert( + "expired_key".to_string(), + CachedProof { + zk_proof: "old_proof".to_string(), + public_values: "old_values".to_string(), + created_at: now_ts() - 120, // 2 minutes ago + }, + ); + } + + let proof_cache = ProofCache::new(cache, 60); // 1 minute TTL + + // Should return None because entry is expired + let result = proof_cache.get("expired_key").await; + assert!(result.is_none()); + } + + #[tokio::test] + async fn test_cache_cleanup() { + let cache = Arc::new(Mutex::new(HashMap::new())); + + // Insert a mix of fresh and expired entries + { + let mut c = cache.lock().await; + c.insert( + "fresh".to_string(), + CachedProof::new("proof".to_string(), "values".to_string()), + ); + c.insert( + "expired1".to_string(), + CachedProof { + zk_proof: "old".to_string(), + public_values: "old".to_string(), + created_at: now_ts() - 120, + }, + ); + c.insert( + "expired2".to_string(), + CachedProof { + zk_proof: "old".to_string(), + public_values: "old".to_string(), + created_at: now_ts() - 200, + }, + ); + } + + let proof_cache = ProofCache::new(cache.clone(), 60); + + assert_eq!(cache.lock().await.len(), 3); + let removed = proof_cache.cleanup().await; + assert_eq!(removed, 2); + assert_eq!(cache.lock().await.len(), 1); + + // Fresh entry should still be there + assert!(proof_cache.get("fresh").await.is_some()); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/config.rs b/packages/migration-claim/sp1/prover-api/src/config.rs new file mode 100644 index 0000000..6aaae70 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/config.rs @@ -0,0 +1,300 @@ +use std::env; + +use crate::types::{AppConfig, ClaimContractConfig, VerifyOnchainConfig}; +use crate::validation::parse_hex_bytes; + +/// Load configuration from environment variables +pub fn load_config() -> Result { + let prover_mode = env::var("SP1_PROVER").unwrap_or_else(|_| "network".to_string()); + let allow_mock = env::var("ALLOW_MOCK") + .map(|value| value == "true") + .unwrap_or(false); + + // Validate prover mode + if prover_mode == "mock" && !allow_mock { + return Err("SP1_PROVER=mock is disabled. Set ALLOW_MOCK=true to enable.".to_string()); + } + + if prover_mode == "network" && env::var("NETWORK_PRIVATE_KEY").is_err() { + return Err("NETWORK_PRIVATE_KEY is required when SP1_PROVER=network.".to_string()); + } + + // Parse verify settings + let verify_proof = env::var("VERIFY_PROOF") + .map(|value| value == "true") + .unwrap_or(false); + + let verify_onchain = env::var("VERIFY_ONCHAIN") + .map(|value| value == "true") + .unwrap_or(false); + + // Enforce verification in production (SP1_PROVER=network) + if prover_mode == "network" && !verify_proof { + return Err( + "VERIFY_PROOF=true is required when SP1_PROVER=network for production safety." + .to_string(), + ); + } + + // Parse RPC timeout early (needed for verify_onchain_config) + let rpc_timeout_seconds = env::var("RPC_TIMEOUT_SECONDS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(10); + + // Parse on-chain verification config + let verify_onchain_config = if verify_onchain { + let rpc_url = env::var("VERIFY_ONCHAIN_RPC_URL") + .or_else(|_| env::var("RPC_URL")) + .unwrap_or_else(|_| "http://localhost:8545".to_string()); + + let verifier_address = env::var("SP1_VERIFIER_ADDRESS") + .unwrap_or_else(|_| "0x397A5f7f3dBd538f23DE225B51f532c34448dA9B".to_string()); + + let program_vkey = env::var("SP1_PROGRAM_VKEY") + .map_err(|_| "SP1_PROGRAM_VKEY is required when VERIFY_ONCHAIN=true")?; + + let program_vkey = parse_hex_bytes::<32>(&program_vkey) + .map_err(|e| format!("Invalid SP1_PROGRAM_VKEY: {e}"))?; + + let verifier_bytes = parse_hex_bytes::<20>(&verifier_address) + .map_err(|e| format!("Invalid SP1_VERIFIER_ADDRESS: {e}"))?; + + Some(VerifyOnchainConfig { + rpc_url, + verifier_address: verifier_bytes, + program_vkey, + timeout_seconds: rpc_timeout_seconds, + }) + } else { + None + }; + + // Parse claim contract config + let claim_contract = match env::var("CLAIM_CONTRACT_ADDRESS") { + Ok(address) => { + let rpc_url = env::var("CLAIM_CONTRACT_RPC_URL") + .or_else(|_| env::var("VERIFY_ONCHAIN_RPC_URL")) + .or_else(|_| env::var("RPC_URL")) + .unwrap_or_else(|_| "http://localhost:8545".to_string()); + + let contract_bytes = parse_hex_bytes::<20>(&address) + .map_err(|e| format!("Invalid CLAIM_CONTRACT_ADDRESS: {e}"))?; + + Some(ClaimContractConfig { + rpc_url, + contract_address: contract_bytes, + }) + } + Err(_) => None, + }; + + // Parse numeric settings with defaults + let cache_ttl_seconds = env::var("CACHE_TTL_SECONDS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(3600); // 1 hour - proofs are deterministic and expensive + + let rate_limit_window_seconds = env::var("RATE_LIMIT_WINDOW_SECONDS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(300); // 5 minutes + + let rate_limit_max_requests = env::var("RATE_LIMIT_MAX_REQUESTS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(3); + + let queue_capacity = env::var("QUEUE_CAPACITY") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(50); // ~1 hour max wait with 4 workers + + let worker_count = env::var("WORKER_COUNT") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(4); + + let proof_timeout_seconds = env::var("PROOF_TIMEOUT_SECONDS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(600); // 10 minutes + + let max_body_bytes = env::var("MAX_BODY_BYTES") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(4096); // 4 KB + + let jobs_ttl_seconds = env::var("JOBS_TTL_SECONDS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(3600); // 1 hour - let users come back for their proof + + // IP-based rate limiting (separate from pubkey rate limiting) + let ip_rate_limit_window_seconds = env::var("IP_RATE_LIMIT_WINDOW_SECONDS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(60); // 1 minute + + let ip_rate_limit_max_requests = env::var("IP_RATE_LIMIT_MAX_REQUESTS") + .ok() + .and_then(|v| v.parse().ok()) + .unwrap_or(10); // 10 requests per minute per IP + + // Eligibility file path (default works for local dev, Dockerfile overrides for production) + let eligibility_file = + env::var("ELIGIBILITY_FILE").unwrap_or_else(|_| "../merkle-tree.json".to_string()); + + // Signature verification (enabled by default, can be disabled for testing) + let verify_signatures = env::var("VERIFY_SIGNATURES") + .map(|v| v != "false") + .unwrap_or(true); + + // Trust proxy headers (X-Forwarded-For, X-Real-IP) for IP extraction + // Default: false for security. Set to true when behind a trusted reverse proxy. + let trust_proxy_headers = env::var("TRUST_PROXY_HEADERS") + .map(|v| v == "true") + .unwrap_or(false); + + Ok(AppConfig { + prover_mode, + verify_proof, + verify_onchain: verify_onchain_config, + claim_contract, + cache_ttl_seconds, + rate_limit_window_seconds, + rate_limit_max_requests, + ip_rate_limit_window_seconds, + ip_rate_limit_max_requests, + queue_capacity, + worker_count, + proof_timeout_seconds, + rpc_timeout_seconds, + max_body_bytes, + jobs_ttl_seconds, + eligibility_file, + verify_signatures, + trust_proxy_headers, + }) +} + +/// Validate CORS configuration +pub fn validate_cors(prover_mode: &str) -> Result, String> { + let cors_origins = env::var("CORS_ALLOWED_ORIGINS").ok(); + + match &cors_origins { + Some(origins) if !origins.is_empty() => { + let trimmed = origins.trim(); + + // Handle wildcard - return None to use Any mode + if trimmed == "*" { + return Ok(None); + } + + // Validate that origins are parseable + let valid: Vec<_> = origins + .split(',') + .filter_map(|s| { + let trimmed = s.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_string()) + } + }) + .collect(); + + if valid.is_empty() { + return Err("CORS_ALLOWED_ORIGINS contains no valid origins".to_string()); + } + + Ok(Some(origins.clone())) + } + _ if prover_mode == "network" => { + Err("CORS_ALLOWED_ORIGINS is required when SP1_PROVER=network".to_string()) + } + _ => Ok(None), // Allow all origins for local/mock testing + } +} + +/// Get the port from environment +pub fn get_port() -> u16 { + env::var("PORT") + .ok() + .and_then(|value| value.parse().ok()) + .unwrap_or(8080) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + + // Note: These tests modify environment variables so they should be run serially + // In practice, you'd use a test framework that handles this + + #[allow(dead_code)] + fn clear_env_vars() { + env::remove_var("SP1_PROVER"); + env::remove_var("ALLOW_MOCK"); + env::remove_var("NETWORK_PRIVATE_KEY"); + env::remove_var("VERIFY_PROOF"); + env::remove_var("VERIFY_ONCHAIN"); + env::remove_var("CORS_ALLOWED_ORIGINS"); + env::remove_var("CLAIM_CONTRACT_ADDRESS"); + env::remove_var("SP1_PROGRAM_VKEY"); + env::remove_var("CACHE_TTL_SECONDS"); + env::remove_var("RATE_LIMIT_WINDOW_SECONDS"); + env::remove_var("RATE_LIMIT_MAX_REQUESTS"); + env::remove_var("QUEUE_CAPACITY"); + env::remove_var("WORKER_COUNT"); + env::remove_var("PROOF_TIMEOUT_SECONDS"); + env::remove_var("RPC_TIMEOUT_SECONDS"); + } + + // These tests use the same env var so must be run together to avoid race conditions + #[test] + fn test_validate_cors_all_cases() { + // Allow all origins in mock mode (no env var) + env::remove_var("CORS_ALLOWED_ORIGINS"); + let result = validate_cors("mock"); + assert!(result.is_ok(), "mock mode should allow no CORS config"); + + // Required in network/production mode + let result = validate_cors("network"); + assert!(result.is_err(), "network mode should require CORS config"); + assert!(result.unwrap_err().contains("CORS_ALLOWED_ORIGINS")); + + // Empty origins not allowed + env::set_var("CORS_ALLOWED_ORIGINS", " "); + let result = validate_cors("network"); + assert!(result.is_err(), "empty origins should be rejected"); + + // Valid origins work + env::set_var("CORS_ALLOWED_ORIGINS", "http://localhost:3000"); + let result = validate_cors("network"); + assert!(result.is_ok(), "valid origins should work"); + + // Cleanup + env::remove_var("CORS_ALLOWED_ORIGINS"); + } + + // These tests use the same env var so must be run together to avoid race conditions + #[test] + fn test_get_port_all_cases() { + // Default case + env::remove_var("PORT"); + assert_eq!(get_port(), 8080, "default port should be 8080"); + + // Custom valid port + env::set_var("PORT", "9000"); + assert_eq!(get_port(), 9000, "custom port should work"); + + // Invalid port uses default + env::set_var("PORT", "not_a_number"); + assert_eq!(get_port(), 8080, "invalid port should use default"); + + // Cleanup + env::remove_var("PORT"); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/eligibility.rs b/packages/migration-claim/sp1/prover-api/src/eligibility.rs new file mode 100644 index 0000000..840aba3 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/eligibility.rs @@ -0,0 +1,195 @@ +//! Eligibility data loading and verification +//! +//! Loads the merkle tree JSON file at startup and provides O(1) lookup +//! to check if a public key is eligible for migration claims. +//! +//! Uses pubkey-based lookup to handle SS58 addresses with different network prefixes. + +use alloy_primitives::U256; +use serde::Deserialize; +use std::collections::HashMap; +use std::fs; +use tracing::info; + +/// Entry in the merkle tree for an eligible address +#[derive(Debug, Clone)] +pub struct EligibilityEntry { + /// Eligible balance in wei + pub balance: U256, +} + +/// Eligibility data loaded from merkle-tree.json +pub struct EligibilityData { + /// Entries keyed by lowercase hex pubkey (e.g., "0xabcd...") for O(1) lookup + /// This handles SS58 addresses with different network prefixes correctly + entries_by_pubkey: HashMap, +} + +/// JSON structure for parsing merkle-tree.json +#[derive(Deserialize)] +struct MerkleTreeJson { + /// Entries by pubkey (preferred for lookup - handles different SS58 prefixes) + #[serde(rename = "entriesByPubkey")] + entries_by_pubkey: Option>, + /// Entries by SS58 address (fallback if entriesByPubkey not present) + entries: HashMap, +} + +/// JSON structure for individual merkle tree entries (by SS58) +#[derive(Deserialize)] +struct MerkleEntryJson { + balance: String, + /// Pubkey in hex format (lowercase, with 0x prefix) + pubkey: Option, +} + +/// JSON structure for entries by pubkey +#[derive(Deserialize)] +struct MerkleEntryByPubkeyJson { + balance: String, +} + +impl EligibilityData { + /// Load eligibility data from a merkle-tree.json file + /// + /// Prefers `entriesByPubkey` for lookup (handles different SS58 prefixes). + /// Falls back to `entries` if `entriesByPubkey` is not present. + /// + /// # Arguments + /// * `path` - Path to the merkle-tree.json file + /// + /// # Returns + /// * `Ok(EligibilityData)` - Loaded eligibility data + /// * `Err(String)` - Error message if loading fails + pub fn load_from_file(path: &str) -> Result { + // Read file + let content = fs::read_to_string(path) + .map_err(|e| format!("Failed to read eligibility file '{}': {}", path, e))?; + + // Parse JSON + let merkle_tree: MerkleTreeJson = serde_json::from_str(&content) + .map_err(|e| format!("Failed to parse eligibility JSON: {}", e))?; + + let mut entries_by_pubkey = HashMap::new(); + + // Prefer entriesByPubkey if available (already keyed by pubkey) + if let Some(by_pubkey) = merkle_tree.entries_by_pubkey { + for (pubkey, entry) in by_pubkey { + let balance = U256::from_str_radix(&entry.balance, 10).map_err(|e| { + format!( + "Invalid balance for pubkey '{}': {} (value: {})", + pubkey, e, entry.balance + ) + })?; + + // Normalize pubkey to lowercase + let normalized_pubkey = pubkey.to_lowercase(); + entries_by_pubkey.insert(normalized_pubkey, EligibilityEntry { balance }); + } + + info!( + "Loaded {} eligible addresses (by pubkey)", + entries_by_pubkey.len() + ); + } else { + // Fallback: use entries and extract pubkey from each entry + for (ss58_address, entry) in merkle_tree.entries { + let balance = U256::from_str_radix(&entry.balance, 10).map_err(|e| { + format!( + "Invalid balance for address '{}': {} (value: {})", + ss58_address, e, entry.balance + ) + })?; + + // Get pubkey from entry or skip if not available + if let Some(pubkey) = entry.pubkey { + let normalized_pubkey = pubkey.to_lowercase(); + entries_by_pubkey.insert(normalized_pubkey, EligibilityEntry { balance }); + } + } + + info!( + "Loaded {} eligible addresses (from entries with pubkey)", + entries_by_pubkey.len() + ); + } + + Ok(Self { entries_by_pubkey }) + } + + /// Check if a public key is eligible + /// + /// # Arguments + /// * `pubkey` - 32-byte public key + #[inline] + pub fn is_eligible_by_pubkey(&self, pubkey: &[u8; 32]) -> bool { + let hex_pubkey = format!("0x{}", hex::encode(pubkey)); + self.entries_by_pubkey.contains_key(&hex_pubkey) + } + + /// Verify that the requested amount matches the eligible balance + /// + /// # Arguments + /// * `pubkey` - 32-byte public key + /// * `amount` - Requested amount + /// + /// Returns true if the pubkey is eligible AND the amount matches exactly. + pub fn verify_amount_by_pubkey(&self, pubkey: &[u8; 32], amount: &U256) -> bool { + let hex_pubkey = format!("0x{}", hex::encode(pubkey)); + self.entries_by_pubkey + .get(&hex_pubkey) + .map(|entry| &entry.balance == amount) + .unwrap_or(false) + } + + /// Get the number of eligible addresses + #[inline] + pub fn entry_count(&self) -> usize { + self.entries_by_pubkey.len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_eligibility_data_lookup() { + let mut entries_by_pubkey = HashMap::new(); + // Test pubkey (32 bytes of 0xab) + let test_pubkey = [0xab; 32]; + let hex_pubkey = format!("0x{}", hex::encode(test_pubkey)); + + entries_by_pubkey.insert( + hex_pubkey, + EligibilityEntry { + balance: U256::from(1000u64), + }, + ); + + let data = EligibilityData { entries_by_pubkey }; + + assert!(data.is_eligible_by_pubkey(&test_pubkey)); + assert!(!data.is_eligible_by_pubkey(&[0xcd; 32])); // Different pubkey + } + + #[test] + fn test_verify_amount() { + let mut entries_by_pubkey = HashMap::new(); + let test_pubkey = [0xab; 32]; + let hex_pubkey = format!("0x{}", hex::encode(test_pubkey)); + + entries_by_pubkey.insert( + hex_pubkey, + EligibilityEntry { + balance: U256::from(1000u64), + }, + ); + + let data = EligibilityData { entries_by_pubkey }; + + assert!(data.verify_amount_by_pubkey(&test_pubkey, &U256::from(1000u64))); + assert!(!data.verify_amount_by_pubkey(&test_pubkey, &U256::from(999u64))); + assert!(!data.verify_amount_by_pubkey(&[0xcd; 32], &U256::from(1000u64))); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/handlers.rs b/packages/migration-claim/sp1/prover-api/src/handlers.rs new file mode 100644 index 0000000..b02c686 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/handlers.rs @@ -0,0 +1,536 @@ +use alloy_primitives::U256; +use axum::{ + extract::{ConnectInfo, Path, State}, + http::{HeaderMap, StatusCode}, + response::IntoResponse, + Json, +}; +use std::net::SocketAddr; +use tracing::{error, info, warn}; +use uuid::Uuid; + +use crate::cache::ProofCache; +use crate::prover::check_already_claimed; +use crate::queue::{EnqueueResult, JobQueue}; +use crate::rate_limit::{RateLimitResult, RateLimiter}; +use crate::signature::verify_signature; +use crate::types::{ + error_codes, AppState, HealthResponse, JobEntry, JobMessage, JobResponse, JobStatus, + ProveRequest, StatusResponse, +}; +use crate::validation::{ + cache_key, extract_client_ip, rate_limit_key_ip, rate_limit_key_pubkey, validate_request, +}; + +/// Submit a new proof generation job +pub async fn submit_job( + State(state): State, + ConnectInfo(addr): ConnectInfo, + headers: HeaderMap, + Json(request): Json, +) -> Result, (StatusCode, Json)> { + // 0. Extract client IP for rate limiting + let client_ip = extract_client_ip( + &headers, + Some(&addr.to_string()), + state.config.trust_proxy_headers, + ); + + // 1. Check IP-based rate limit FIRST (cheapest check, catches scanners/bots) + let ip_rate_limit_key = rate_limit_key_ip(&client_ip); + let ip_limiter = RateLimiter::new( + state.ip_rate_limits.clone(), + state.config.ip_rate_limit_window_seconds, + state.config.ip_rate_limit_max_requests, + ); + + match ip_limiter.check_and_update(&ip_rate_limit_key).await { + RateLimitResult::Limited { retry_after } => { + warn!( + "IP rate limited: {} (retry after {}s)", + client_ip, retry_after + ); + return Err(( + StatusCode::TOO_MANY_REQUESTS, + Json(StatusResponse::failed_with_retry( + error_codes::RATE_LIMITED, + format!( + "Too many requests from your IP. Please wait {} seconds.", + retry_after + ), + retry_after, + )), + )); + } + RateLimitResult::Allowed => {} + } + + // 2. Validate input format + let validated = validate_request(&request).map_err(|e| { + warn!("Invalid request: {}", e.message); + ( + StatusCode::BAD_REQUEST, + Json(StatusResponse::failed( + error_codes::INVALID_INPUT, + e.message, + )), + ) + })?; + + // 3. Check eligibility by pubkey (handles different SS58 prefixes) + if !state.eligibility.is_eligible_by_pubkey(&validated.pubkey) { + warn!("Ineligible address: {}", request.ss58_address); + return Err(( + StatusCode::FORBIDDEN, + Json(StatusResponse::failed( + error_codes::NOT_ELIGIBLE, + "This address is not eligible for migration claims".to_string(), + )), + )); + } + + // 4. Verify amount matches the eligible balance (by pubkey) + let request_amount = U256::from_be_bytes(validated.amount); + if !state + .eligibility + .verify_amount_by_pubkey(&validated.pubkey, &request_amount) + { + warn!( + "Amount mismatch for {}: requested {} but eligible for different amount", + request.ss58_address, request_amount + ); + return Err(( + StatusCode::FORBIDDEN, + Json(StatusResponse::failed( + error_codes::AMOUNT_MISMATCH, + "Requested amount does not match the eligible balance".to_string(), + )), + )); + } + + // 5. Verify signature (if enabled) + if state.config.verify_signatures { + if let Err(e) = verify_signature( + &validated.pubkey, + &validated.signature, + &validated.challenge, + ) { + warn!("Invalid signature for {}: {}", request.ss58_address, e); + return Err(( + StatusCode::UNAUTHORIZED, + Json(StatusResponse::failed( + error_codes::INVALID_SIGNATURE, + "Signature verification failed. Please sign with the correct key.".to_string(), + )), + )); + } + } + + // 6. Check per-pubkey rate limit (existing, for abuse prevention) + let rate_limit_key = rate_limit_key_pubkey(&validated.pubkey); + let rate_limiter = RateLimiter::new( + state.rate_limits.clone(), + state.config.rate_limit_window_seconds, + state.config.rate_limit_max_requests, + ); + + match rate_limiter.check_and_update(&rate_limit_key).await { + RateLimitResult::Limited { retry_after } => { + warn!( + "Rate limited: {} (retry after {}s)", + request.ss58_address, retry_after + ); + return Err(( + StatusCode::TOO_MANY_REQUESTS, + Json(StatusResponse::failed_with_retry( + error_codes::RATE_LIMITED, + format!( + "Too many requests. Please wait {} seconds before trying again.", + retry_after + ), + retry_after, + )), + )); + } + RateLimitResult::Allowed => {} + } + + // 7. Check cache for existing proof + let cache_key = cache_key(&request); + let proof_cache = ProofCache::new(state.cache.clone(), state.config.cache_ttl_seconds); + + if let Some(cached) = proof_cache.get(&cache_key).await { + info!("Cache hit for {}", request.ss58_address); + // Return a synthetic completed job with the cached proof + let job_id = Uuid::new_v4().to_string(); + { + let mut jobs = state.jobs.lock().await; + jobs.insert( + job_id.clone(), + JobEntry::new(JobStatus::Completed { + zk_proof: cached.zk_proof, + public_values: cached.public_values, + }), + ); + } + return Ok(Json(JobResponse { job_id })); + } + + // 8. Check if user has already claimed on-chain + if let Some(ref claim_config) = state.config.claim_contract { + match check_already_claimed( + claim_config, + &request.ss58_address, + state.config.rpc_timeout_seconds, + ) + .await + { + Ok(true) => { + info!( + "Rejecting request: {} has already claimed", + request.ss58_address + ); + return Err(( + StatusCode::CONFLICT, + Json(StatusResponse::failed( + error_codes::ALREADY_CLAIMED, + "This address has already claimed tokens".to_string(), + )), + )); + } + Ok(false) => { + // User hasn't claimed, proceed + } + Err(e) => { + error!( + "Failed to check claim status for {}: {}", + request.ss58_address, e + ); + return Err(( + StatusCode::SERVICE_UNAVAILABLE, + Json(StatusResponse::failed( + error_codes::RPC_UNAVAILABLE, + format!("Unable to verify claim status: {e}. Please try again."), + )), + )); + } + } + } + + // 9. Create job and try to enqueue + let job_id = Uuid::new_v4().to_string(); + + // Try to enqueue if we have a queue + if let (Some(sender), Some(queue_size)) = + (state.job_sender.as_ref(), state.queue_size.as_ref()) + { + let queue = JobQueue::from_sender( + sender.clone(), + state.config.queue_capacity, + queue_size.clone(), + ); + + match queue + .try_enqueue(JobMessage { + job_id: job_id.clone(), + request: request.clone(), + }) + .await + { + EnqueueResult::Queued => { + // Create pending job entry + { + let mut jobs = state.jobs.lock().await; + jobs.insert(job_id.clone(), JobEntry::new(JobStatus::Pending)); + } + info!("Job {} queued for {}", job_id, request.ss58_address); + } + EnqueueResult::QueueFull => { + warn!("Queue full, rejecting job for {}", request.ss58_address); + return Err(( + StatusCode::SERVICE_UNAVAILABLE, + Json(StatusResponse::failed( + error_codes::QUEUE_FULL, + "Server is at capacity. Please try again later.".to_string(), + )), + )); + } + } + } else if state.job_sender.is_some() { + error!("Job sender configured but queue size counter is missing"); + return Err(( + StatusCode::INTERNAL_SERVER_ERROR, + Json(StatusResponse::failed( + error_codes::INTERNAL_ERROR, + "Server misconfiguration".to_string(), + )), + )); + } else { + // Legacy mode: spawn task directly (for backward compatibility during transition) + { + let mut jobs = state.jobs.lock().await; + jobs.insert(job_id.clone(), JobEntry::new(JobStatus::Pending)); + } + + let jobs = state.jobs.clone(); + let cache = state.cache.clone(); + let verify_proof = state.config.verify_proof; + let verify_onchain = state.config.verify_onchain.clone(); + let prover_mode = state.config.prover_mode.clone(); + let job_id_clone = job_id.clone(); + let cache_key_clone = cache_key.clone(); + let config = state.config.clone(); + let ss58_address = request.ss58_address.clone(); + let metrics = state.metrics.clone(); + + tokio::spawn(async move { + use crate::prover::generate_proof; + use crate::types::{now_ts, CachedProof}; + + // Update to running + { + let mut jobs = jobs.lock().await; + if let Some(entry) = jobs.get_mut(&job_id_clone) { + entry.status = JobStatus::Running; + entry.updated_at = now_ts(); + } + } + + // Generate proof with timeout + let timeout = std::time::Duration::from_secs(config.proof_timeout_seconds); + let prover_mode_clone = prover_mode.clone(); + let handle = tokio::task::spawn_blocking(move || { + generate_proof(request, verify_proof, verify_onchain, &prover_mode_clone) + }); + tokio::pin!(handle); + let result = tokio::select! { + biased; + res = &mut handle => Some(res), + _ = tokio::time::sleep(timeout) => None, + }; + let timed_out = result.is_none(); + + let final_status = match result { + Some(Ok(Ok((zk_proof, public_values)))) => { + metrics.record_completion(); + // Store in cache + { + let mut c = cache.lock().await; + c.insert( + cache_key_clone.clone(), + CachedProof::new(zk_proof.clone(), public_values.clone()), + ); + } + JobStatus::Completed { + zk_proof, + public_values, + } + } + Some(Ok(Err(err))) => { + error!("Proof generation failed for job {}: {}", job_id_clone, err); + JobStatus::Failed { + error: format!("{}: {}", error_codes::PROOF_FAILED, err), + } + } + Some(Err(join_err)) => { + error!("Job {} panicked: {}", job_id_clone, join_err); + JobStatus::Failed { + error: format!("{}: task panicked", error_codes::INTERNAL_ERROR), + } + } + None => { + metrics.record_timeout(); + error!("Job {} timed out after {:?}", job_id_clone, timeout); + JobStatus::Failed { + error: format!( + "{}: proof generation exceeded {} seconds", + error_codes::TIMEOUT, + timeout.as_secs() + ), + } + } + }; + + // Update job status + { + let mut jobs = jobs.lock().await; + if let Some(entry) = jobs.get_mut(&job_id_clone) { + entry.status = final_status; + entry.updated_at = now_ts(); + } + } + + if timed_out { + match handle.await { + Ok(Ok((zk_proof, public_values))) => { + metrics.record_completion(); + { + let mut c = cache.lock().await; + c.insert( + cache_key_clone, + CachedProof::new(zk_proof.clone(), public_values.clone()), + ); + } + let mut jobs = jobs.lock().await; + if let Some(entry) = jobs.get_mut(&job_id_clone) { + entry.status = JobStatus::Completed { + zk_proof, + public_values, + }; + entry.updated_at = now_ts(); + } + } + Ok(Err(err)) => { + error!( + "Proof generation failed for job {} after timeout: {}", + job_id_clone, err + ); + let mut jobs = jobs.lock().await; + if let Some(entry) = jobs.get_mut(&job_id_clone) { + entry.status = JobStatus::Failed { + error: format!("{}: {}", error_codes::PROOF_FAILED, err), + }; + entry.updated_at = now_ts(); + } + } + Err(join_err) => { + error!( + "Job {} panicked after timeout: {}", + job_id_clone, join_err + ); + let mut jobs = jobs.lock().await; + if let Some(entry) = jobs.get_mut(&job_id_clone) { + entry.status = JobStatus::Failed { + error: format!("{}: task panicked", error_codes::INTERNAL_ERROR), + }; + entry.updated_at = now_ts(); + } + } + } + metrics.decrement_timed_out_still_running(); + } + }); + + info!("Job {} spawned for {}", job_id, ss58_address); + } + + Ok(Json(JobResponse { job_id })) +} + +/// Get job status +pub async fn job_status( + State(state): State, + Path(job_id): Path, +) -> Result, (StatusCode, Json)> { + let jobs = state.jobs.lock().await; + match jobs.get(&job_id) { + Some(job) => Ok(Json(status_from_job(&job.status))), + None => Err(( + StatusCode::NOT_FOUND, + Json(StatusResponse::failed( + error_codes::NOT_FOUND, + "Job not found".to_string(), + )), + )), + } +} + +/// Health check endpoint +pub async fn health(State(state): State) -> impl IntoResponse { + let jobs = state.jobs.lock().await; + let cache = state.cache.lock().await; + + let queue_size = state + .queue_size + .as_ref() + .map(|counter| counter.load(std::sync::atomic::Ordering::SeqCst)) + .unwrap_or(0); + + let response = HealthResponse { + status: "ok".to_string(), + prover_mode: state.config.prover_mode.clone(), + verify_proof: state.config.verify_proof, + verify_onchain: state.config.verify_onchain.is_some(), + jobs: jobs.len(), + cache_size: cache.len(), + queue_size, + queue_capacity: state.config.queue_capacity, + proof_metrics: state.metrics.snapshot(), + }; + (StatusCode::OK, Json(response)) +} + +/// Convert job status to response +fn status_from_job(status: &JobStatus) -> StatusResponse { + match status { + JobStatus::Pending => StatusResponse::pending(), + JobStatus::Running => StatusResponse::running(), + JobStatus::Completed { + zk_proof, + public_values, + } => StatusResponse::completed(zk_proof.clone(), public_values.clone()), + JobStatus::Failed { error } => { + // Parse the error code from the error message if present + let (code, message) = if let Some(idx) = error.find(':') { + let code = &error[..idx]; + let msg = error[idx + 1..].trim(); + (code, msg.to_string()) + } else { + (error_codes::PROOF_FAILED, error.clone()) + }; + StatusResponse::failed(code, message) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_status_from_job_pending() { + let status = JobStatus::Pending; + let resp = status_from_job(&status); + assert_eq!(resp.status, "pending"); + } + + #[test] + fn test_status_from_job_running() { + let status = JobStatus::Running; + let resp = status_from_job(&status); + assert_eq!(resp.status, "running"); + } + + #[test] + fn test_status_from_job_completed() { + let status = JobStatus::Completed { + zk_proof: "0x123".to_string(), + public_values: "0x456".to_string(), + }; + let resp = status_from_job(&status); + assert_eq!(resp.status, "completed"); + assert_eq!(resp.zk_proof, Some("0x123".to_string())); + assert_eq!(resp.public_values, Some("0x456".to_string())); + } + + #[test] + fn test_status_from_job_failed_with_code() { + let status = JobStatus::Failed { + error: "timeout: proof generation exceeded 600 seconds".to_string(), + }; + let resp = status_from_job(&status); + assert_eq!(resp.status, "failed"); + assert_eq!(resp.code, Some("timeout".to_string())); + assert!(resp.error.unwrap().contains("600 seconds")); + } + + #[test] + fn test_status_from_job_failed_without_code() { + let status = JobStatus::Failed { + error: "some error without code".to_string(), + }; + let resp = status_from_job(&status); + assert_eq!(resp.status, "failed"); + assert_eq!(resp.code, Some(error_codes::PROOF_FAILED.to_string())); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/jobs.rs b/packages/migration-claim/sp1/prover-api/src/jobs.rs new file mode 100644 index 0000000..74d2381 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/jobs.rs @@ -0,0 +1,240 @@ +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::Mutex; +use tracing::info; + +use crate::types::{now_ts, JobEntry, JobStatus}; + +/// Job manager for TTL-based cleanup +pub struct JobManager { + jobs: Arc>>, + ttl_seconds: u64, +} + +impl JobManager { + pub fn new(jobs: Arc>>, ttl_seconds: u64) -> Self { + Self { jobs, ttl_seconds } + } + + /// Clean up old completed/failed jobs + pub async fn cleanup(&self) -> usize { + let mut jobs = self.jobs.lock().await; + let before = jobs.len(); + let now = now_ts(); + + jobs.retain(|_, entry| match entry.status { + JobStatus::Completed { .. } | JobStatus::Failed { .. } => { + now - entry.updated_at < self.ttl_seconds + } + JobStatus::Pending | JobStatus::Running => true, + }); + + let removed = before - jobs.len(); + if removed > 0 { + info!("Jobs cleanup: removed {} old entries", removed); + } + removed + } +} + +/// Start a background task to periodically clean up old jobs +pub fn start_jobs_cleanup_task( + jobs: Arc>>, + ttl_seconds: u64, + cleanup_interval_seconds: u64, +) { + let job_manager = JobManager::new(jobs, ttl_seconds); + tokio::spawn(async move { + let mut interval = + tokio::time::interval(std::time::Duration::from_secs(cleanup_interval_seconds)); + loop { + interval.tick().await; + job_manager.cleanup().await; + } + }); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_cleanup_keeps_pending_jobs() { + let jobs = Arc::new(Mutex::new(HashMap::new())); + + // Insert an old pending job + { + let mut j = jobs.lock().await; + j.insert( + "pending_job".to_string(), + JobEntry { + status: JobStatus::Pending, + updated_at: now_ts() - 1000, // Old job + }, + ); + } + + let manager = JobManager::new(jobs.clone(), 60); + let removed = manager.cleanup().await; + + assert_eq!(removed, 0); + assert_eq!(jobs.lock().await.len(), 1); + } + + #[tokio::test] + async fn test_cleanup_keeps_running_jobs() { + let jobs = Arc::new(Mutex::new(HashMap::new())); + + // Insert an old running job + { + let mut j = jobs.lock().await; + j.insert( + "running_job".to_string(), + JobEntry { + status: JobStatus::Running, + updated_at: now_ts() - 1000, // Old job + }, + ); + } + + let manager = JobManager::new(jobs.clone(), 60); + let removed = manager.cleanup().await; + + assert_eq!(removed, 0); + assert_eq!(jobs.lock().await.len(), 1); + } + + #[tokio::test] + async fn test_cleanup_removes_old_completed_jobs() { + let jobs = Arc::new(Mutex::new(HashMap::new())); + + // Insert an old completed job + { + let mut j = jobs.lock().await; + j.insert( + "old_completed".to_string(), + JobEntry { + status: JobStatus::Completed { + zk_proof: "0x123".to_string(), + public_values: "0x456".to_string(), + }, + updated_at: now_ts() - 120, // 2 minutes ago + }, + ); + // Insert a fresh completed job + j.insert( + "fresh_completed".to_string(), + JobEntry { + status: JobStatus::Completed { + zk_proof: "0x789".to_string(), + public_values: "0xabc".to_string(), + }, + updated_at: now_ts() - 30, // 30 seconds ago + }, + ); + } + + let manager = JobManager::new(jobs.clone(), 60); // 1 minute TTL + let removed = manager.cleanup().await; + + assert_eq!(removed, 1); + assert_eq!(jobs.lock().await.len(), 1); + + // Verify the fresh one is still there + let j = jobs.lock().await; + assert!(j.contains_key("fresh_completed")); + assert!(!j.contains_key("old_completed")); + } + + #[tokio::test] + async fn test_cleanup_removes_old_failed_jobs() { + let jobs = Arc::new(Mutex::new(HashMap::new())); + + // Insert an old failed job + { + let mut j = jobs.lock().await; + j.insert( + "old_failed".to_string(), + JobEntry { + status: JobStatus::Failed { + error: "some error".to_string(), + }, + updated_at: now_ts() - 120, // 2 minutes ago + }, + ); + } + + let manager = JobManager::new(jobs.clone(), 60); // 1 minute TTL + let removed = manager.cleanup().await; + + assert_eq!(removed, 1); + assert_eq!(jobs.lock().await.len(), 0); + } + + #[tokio::test] + async fn test_mixed_cleanup() { + let jobs = Arc::new(Mutex::new(HashMap::new())); + let now = now_ts(); + + { + let mut j = jobs.lock().await; + // Old jobs + j.insert( + "old_pending".to_string(), + JobEntry { + status: JobStatus::Pending, + updated_at: now - 200, + }, + ); + j.insert( + "old_running".to_string(), + JobEntry { + status: JobStatus::Running, + updated_at: now - 200, + }, + ); + j.insert( + "old_completed".to_string(), + JobEntry { + status: JobStatus::Completed { + zk_proof: "0x".to_string(), + public_values: "0x".to_string(), + }, + updated_at: now - 200, + }, + ); + j.insert( + "old_failed".to_string(), + JobEntry { + status: JobStatus::Failed { + error: "err".to_string(), + }, + updated_at: now - 200, + }, + ); + // Fresh jobs + j.insert( + "fresh_completed".to_string(), + JobEntry { + status: JobStatus::Completed { + zk_proof: "0x".to_string(), + public_values: "0x".to_string(), + }, + updated_at: now - 30, + }, + ); + } + + let manager = JobManager::new(jobs.clone(), 60); + let removed = manager.cleanup().await; + + // Should remove old completed/failed only; keep pending/running and fresh completed + assert_eq!(removed, 2); + assert_eq!(jobs.lock().await.len(), 3); + + let j = jobs.lock().await; + assert!(j.contains_key("fresh_completed")); + assert!(j.contains_key("old_pending")); + assert!(j.contains_key("old_running")); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/lib.rs b/packages/migration-claim/sp1/prover-api/src/lib.rs new file mode 100644 index 0000000..d2ad9cb --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/lib.rs @@ -0,0 +1,23 @@ +//! SP1 Prover API library +//! +//! This crate provides a REST API for generating ZK proofs using the SP1 SDK. +//! The modules are separated to allow testing of non-SP1-dependent code independently. + +pub mod cache; +pub mod config; +pub mod eligibility; +pub mod handlers; +pub mod jobs; +pub mod prover; +pub mod queue; +pub mod rate_limit; +pub mod signature; +pub mod types; +pub mod validation; + +// Re-export commonly used types +pub use types::{ + error_codes, AppConfig, AppState, CachedProof, ClaimContractConfig, HealthResponse, JobEntry, + JobMessage, JobResponse, JobStatus, ProveRequest, RateLimitEntry, StatusResponse, + VerifyOnchainConfig, +}; diff --git a/packages/migration-claim/sp1/prover-api/src/main.rs b/packages/migration-claim/sp1/prover-api/src/main.rs index 3b51e6c..dbff601 100644 --- a/packages/migration-claim/sp1/prover-api/src/main.rs +++ b/packages/migration-claim/sp1/prover-api/src/main.rs @@ -1,98 +1,34 @@ +mod cache; +mod config; +mod eligibility; +mod handlers; +mod jobs; +mod prover; +mod queue; +mod rate_limit; +mod signature; +mod types; +mod validation; + use axum::{ - extract::{Path, State}, - http::{HeaderValue, StatusCode}, - response::IntoResponse, + extract::DefaultBodyLimit, + http::HeaderValue, routing::{get, post}, - Json, Router, -}; -use alloy_primitives::{Bytes, FixedBytes}; -use alloy_sol_types::{sol, SolCall}; -use alloy_primitives::U256; -use serde::{Deserialize, Serialize}; -use sp1_sdk::{network::NetworkMode, Prover, ProverClient, SP1Stdin}; -use sr25519_claim_lib::{ss58_decode, ProgramInput, PublicValues}; -use std::{ - collections::HashMap, - env, - sync::Arc, - time::{SystemTime, UNIX_EPOCH}, + Router, }; +use std::{collections::HashMap, net::SocketAddr, sync::Arc}; use tokio::sync::Mutex; use tower_http::cors::{Any, CorsLayer}; use tracing::{error, info}; -use uuid::Uuid; - -const ELF: &[u8] = include_bytes!("../../program/elf/riscv32im-succinct-zkvm-elf"); - -#[derive(Clone)] -struct AppState { - jobs: Arc>>, - verify_proof: bool, - verify_onchain: Option, - prover_mode: String, -} - -#[derive(Clone)] -struct VerifyOnchainConfig { - rpc_url: String, - verifier_address: [u8; 20], - program_vkey: [u8; 32], -} - -#[derive(Clone)] -struct JobEntry { - status: JobStatus, - updated_at: u64, -} - -#[derive(Clone)] -enum JobStatus { - Pending, - Running, - Completed { zk_proof: String, public_values: String }, - Failed { error: String }, -} - -#[derive(Deserialize)] -struct ProveRequest { - #[serde(rename = "ss58Address")] - ss58_address: String, - signature: String, - #[serde(rename = "evmAddress")] - evm_address: String, - challenge: String, - amount: String, -} -#[derive(Serialize)] -struct JobResponse { - #[serde(rename = "jobId")] - job_id: String, -} - -#[derive(Serialize)] -struct StatusResponse { - status: String, - #[serde(rename = "zkProof", skip_serializing_if = "Option::is_none")] - zk_proof: Option, - #[serde(rename = "publicValues", skip_serializing_if = "Option::is_none")] - public_values: Option, - #[serde(skip_serializing_if = "Option::is_none")] - error: Option, -} - -#[derive(Serialize)] -struct HealthResponse { - status: String, - prover_mode: String, - verify_proof: bool, - verify_onchain: bool, - jobs: usize, -} - -sol! { - function verifyProof(bytes32 programVKey, bytes publicValues, bytes proofBytes) external view; -} +use cache::start_cache_cleanup_task; +use config::{get_port, load_config, validate_cors}; +use eligibility::EligibilityData; +use handlers::{health, job_status, submit_job}; +use jobs::start_jobs_cleanup_task; +use queue::{JobQueue, WorkerPool}; +use rate_limit::start_rate_limit_cleanup_task; +use types::{AppState, CachedProof, JobEntry, ProofMetrics, RateLimitEntry}; #[tokio::main] async fn main() { @@ -103,405 +39,166 @@ async fn main() { tracing_subscriber::fmt::init(); - let port: u16 = env::var("PORT") - .ok() - .and_then(|value| value.parse().ok()) - .unwrap_or(8080); + // Load configuration + let config = match load_config() { + Ok(c) => c, + Err(e) => { + error!("{}", e); + std::process::exit(1); + } + }; - let prover_mode = env::var("SP1_PROVER").unwrap_or_else(|_| "network".to_string()); - let allow_mock = env::var("ALLOW_MOCK") - .map(|value| value == "true") - .unwrap_or(false); - let verify_proof = env::var("VERIFY_PROOF") - .map(|value| value == "true") - .unwrap_or(false); - let verify_onchain = env::var("VERIFY_ONCHAIN") - .map(|value| value == "true") - .unwrap_or(false); - let cors_origins = env::var("CORS_ALLOWED_ORIGINS").ok(); + // Validate CORS configuration + let cors_origins = match validate_cors(&config.prover_mode) { + Ok(origins) => origins, + Err(e) => { + error!("{}", e); + std::process::exit(1); + } + }; - if prover_mode == "mock" && !allow_mock { - error!("SP1_PROVER=mock is disabled. Set ALLOW_MOCK=true to enable."); - std::process::exit(1); - } + let port = get_port(); - if prover_mode == "network" && env::var("NETWORK_PRIVATE_KEY").is_err() { - error!("NETWORK_PRIVATE_KEY is required when SP1_PROVER=network."); - std::process::exit(1); - } + // Log configuration + info!("Configuration loaded:"); + info!(" SP1_PROVER={}", config.prover_mode); + info!(" VERIFY_PROOF={}", config.verify_proof); + info!(" VERIFY_ONCHAIN={}", config.verify_onchain.is_some()); + info!( + " CLAIM_CONTRACT={}", + config + .claim_contract + .as_ref() + .map(|c| format!("0x{}", hex::encode(c.contract_address))) + .unwrap_or_else(|| "disabled".to_string()) + ); + info!(" CACHE_TTL_SECONDS={}", config.cache_ttl_seconds); + info!( + " RATE_LIMIT={}/{}s", + config.rate_limit_max_requests, config.rate_limit_window_seconds + ); + info!( + " QUEUE_CAPACITY={} WORKERS={}", + config.queue_capacity, config.worker_count + ); + info!(" PROOF_TIMEOUT_SECONDS={}", config.proof_timeout_seconds); + info!(" MAX_BODY_BYTES={}", config.max_body_bytes); + info!(" JOBS_TTL_SECONDS={}", config.jobs_ttl_seconds); + info!( + " IP_RATE_LIMIT={}/{}s", + config.ip_rate_limit_max_requests, config.ip_rate_limit_window_seconds + ); + info!(" ELIGIBILITY_FILE={}", config.eligibility_file); + info!(" VERIFY_SIGNATURES={}", config.verify_signatures); + info!( + " CORS_ALLOWED_ORIGINS={}", + cors_origins.as_deref().unwrap_or("*") + ); - let verify_onchain_config = if verify_onchain { - let rpc_url = env::var("VERIFY_ONCHAIN_RPC_URL") - .or_else(|_| env::var("RPC_URL")) - .unwrap_or_else(|_| "http://localhost:8545".to_string()); - let verifier_address = env::var("SP1_VERIFIER_ADDRESS") - .unwrap_or_else(|_| "0x397A5f7f3dBd538f23DE225B51f532c34448dA9B".to_string()); - let program_vkey = env::var("SP1_PROGRAM_VKEY").unwrap_or_else(|_| { - error!("SP1_PROGRAM_VKEY is required when VERIFY_ONCHAIN=true"); - std::process::exit(1); - }); - let program_vkey = parse_hex_bytes::<32>(&program_vkey).unwrap_or_else(|err| { - error!("Invalid SP1_PROGRAM_VKEY: {err}"); - std::process::exit(1); - }); - let verifier_bytes = parse_hex_bytes::<20>(&verifier_address).unwrap_or_else(|err| { - error!("Invalid SP1_VERIFIER_ADDRESS: {err}"); + // Load eligibility data + info!("Loading eligibility data from {}", config.eligibility_file); + let eligibility = match EligibilityData::load_from_file(&config.eligibility_file) { + Ok(data) => { + info!("Loaded {} eligible addresses", data.entry_count()); + Arc::new(data) + } + Err(e) => { + error!("Failed to load eligibility data: {}", e); std::process::exit(1); - }); - - Some(VerifyOnchainConfig { - rpc_url, - verifier_address: verifier_bytes, - program_vkey, - }) - } else { - None + } }; + // Initialize shared state + let jobs: Arc>> = Arc::new(Mutex::new(HashMap::new())); + let cache: Arc>> = Arc::new(Mutex::new(HashMap::new())); + let rate_limits: Arc>> = + Arc::new(Mutex::new(HashMap::new())); + let ip_rate_limits: Arc>> = + Arc::new(Mutex::new(HashMap::new())); + let metrics = Arc::new(ProofMetrics::new()); + let config = Arc::new(config); + + // Start cleanup tasks + start_cache_cleanup_task(cache.clone(), config.cache_ttl_seconds, 60); + start_rate_limit_cleanup_task( + rate_limits.clone(), + config.rate_limit_window_seconds, + config.rate_limit_max_requests, + 60, + ); + // IP rate limit cleanup task + start_rate_limit_cleanup_task( + ip_rate_limits.clone(), + config.ip_rate_limit_window_seconds, + config.ip_rate_limit_max_requests, + 60, + ); + start_jobs_cleanup_task(jobs.clone(), config.jobs_ttl_seconds, 60); + + // Create job queue and worker pool + let (job_queue, receiver) = JobQueue::new(config.queue_capacity); + let worker_pool = WorkerPool::new(config.worker_count); + + // Start workers + worker_pool.start( + receiver, + jobs.clone(), + cache.clone(), + config.clone(), + job_queue.size_counter(), + metrics.clone(), + ); + let state = AppState { - jobs: Arc::new(Mutex::new(HashMap::new())), - verify_proof, - verify_onchain: verify_onchain_config, - prover_mode: prover_mode.clone(), + jobs, + cache, + rate_limits, + ip_rate_limits, + eligibility, + config: config.clone(), + job_sender: Some(job_queue.sender.clone()), + queue_size: Some(job_queue.size_counter()), + metrics, }; - let cors = match &cors_origins { - Some(origins) if !origins.is_empty() => { + // Configure CORS + let cors = match cors_origins { + Some(origins) => { let origins: Vec = origins .split(',') .filter_map(|s| s.trim().parse().ok()) .collect(); - if origins.is_empty() { - error!("CORS_ALLOWED_ORIGINS contains no valid origins"); - std::process::exit(1); - } CorsLayer::new() .allow_origin(origins) .allow_methods(Any) .allow_headers(Any) } - _ if prover_mode == "network" => { - // Fail-safe: require explicit CORS configuration in production - error!("CORS_ALLOWED_ORIGINS is required when SP1_PROVER=network"); - std::process::exit(1); - } - _ => { - // Allow all origins only for local/mock testing - CorsLayer::new() - .allow_origin(Any) - .allow_methods(Any) - .allow_headers(Any) - } + None => CorsLayer::new() + .allow_origin(Any) + .allow_methods(Any) + .allow_headers(Any), }; + // Build router let app = Router::new() .route("/", post(submit_job)) .route("/status/:job_id", get(job_status)) .route("/health", get(health)) + .layer(DefaultBodyLimit::max(config.max_body_bytes)) .layer(cors) .with_state(state); - let cors_display = cors_origins.as_deref().unwrap_or("*"); info!("SP1 prover API listening on 0.0.0.0:{port}"); - info!( - "SP1_PROVER={prover_mode} VERIFY_PROOF={verify_proof} VERIFY_ONCHAIN={verify_onchain} CORS_ALLOWED_ORIGINS={cors_display}" - ); let listener = tokio::net::TcpListener::bind(("0.0.0.0", port)) .await .expect("Failed to bind port"); - axum::serve(listener, app) - .await - .expect("Server error"); -} - -async fn submit_job( - State(state): State, - Json(request): Json, -) -> Result, (StatusCode, Json)> { - if request.ss58_address.trim().is_empty() - || request.signature.trim().is_empty() - || request.evm_address.trim().is_empty() - || request.challenge.trim().is_empty() - || request.amount.trim().is_empty() - { - return Err(bad_request("Missing required fields")); - } - - if !is_decimal(&request.amount) { - return Err(bad_request("Amount must be a base-10 string")); - } - - let job_id = Uuid::new_v4().to_string(); - { - let mut jobs = state.jobs.lock().await; - jobs.insert( - job_id.clone(), - JobEntry { - status: JobStatus::Pending, - updated_at: now_ts(), - }, - ); - } - - let jobs = Arc::clone(&state.jobs); - let verify_proof = state.verify_proof; - let verify_onchain = state.verify_onchain.clone(); - let job_id_for_response = job_id.clone(); - tokio::spawn(async move { - update_job(&jobs, &job_id, JobStatus::Running).await; - let result = tokio::task::spawn_blocking(move || { - generate_proof(request, verify_proof, verify_onchain) - }) - .await; - match result { - Ok(Ok((zk_proof, public_values))) => update_job(&jobs, &job_id, JobStatus::Completed { zk_proof, public_values }).await, - Ok(Err(err)) => { - update_job(&jobs, &job_id, JobStatus::Failed { error: err }).await - } - Err(err) => update_job( - &jobs, - &job_id, - JobStatus::Failed { - error: format!("Job join error: {err}"), - }, - ) - .await, - } - }); - - Ok(Json(JobResponse { job_id: job_id_for_response })) -} - -async fn job_status( - State(state): State, - Path(job_id): Path, -) -> Result, (StatusCode, Json)> { - let jobs = state.jobs.lock().await; - let entry = jobs.get(&job_id); - match entry { - Some(job) => Ok(Json(status_response(&job.status))), - None => Err((StatusCode::NOT_FOUND, Json(StatusResponse { - status: "not_found".to_string(), - zk_proof: None, - public_values: None, - error: Some("Job not found".to_string()), - }))), - } -} - -async fn health(State(state): State) -> impl IntoResponse { - let jobs = state.jobs.lock().await; - let response = HealthResponse { - status: "ok".to_string(), - prover_mode: state.prover_mode.clone(), - verify_proof: state.verify_proof, - verify_onchain: state.verify_onchain.is_some(), - jobs: jobs.len(), - }; - (StatusCode::OK, Json(response)) -} - -fn generate_proof( - request: ProveRequest, - verify_proof: bool, - verify_onchain: Option, -) -> Result<(String, String), String> { - let signature = parse_hex_bytes::<64>(&request.signature).map_err(err_to_string)?; - let evm_address = parse_hex_bytes::<20>(&request.evm_address).map_err(err_to_string)?; - let challenge = parse_hex_bytes::<32>(&request.challenge).map_err(err_to_string)?; - let amount = parse_amount(&request.amount).map_err(err_to_string)?; - let ss58_address = request.ss58_address; - - let input = ProgramInput { - substrate_address: ss58_address.clone(), - signature, - evm_address, - amount, - challenge, - }; - - // Explicitly use Mainnet mode instead of relying on default (Reserved) - // This ensures we use the correct domain for the mainnet network - let client = ProverClient::builder() - .network_for(NetworkMode::Mainnet) - .build(); - let (pk, vk) = client.setup(ELF); - - let mut stdin = SP1Stdin::new(); - stdin.write(&input); - - let proof = client - .prove(&pk, &stdin) - .groth16() - .run() - .map_err(err_to_string)?; - - if verify_proof { - client.verify(&proof, &vk).map_err(err_to_string)?; - } - - // Log the committed public values for debugging - let committed_public_values = proof.public_values.to_vec(); - info!("Committed public values (hex): 0x{}", hex::encode(&committed_public_values)); - info!("Committed public values length: {} bytes", committed_public_values.len()); - - // Decode and log the individual fields - if let Ok(decoded) = PublicValues::abi_decode(&committed_public_values) { - info!("Decoded pubkey: 0x{}", hex::encode(&decoded.pubkey)); - info!("Decoded evm_address: 0x{}", hex::encode(&decoded.evm_address)); - info!("Decoded amount: 0x{}", hex::encode(&decoded.amount)); - info!("Decoded challenge: 0x{}", hex::encode(&decoded.challenge)); - } - - let proof_bytes = proof.bytes(); - let committed_public_values = committed_public_values.clone(); - - if let Some(config) = verify_onchain { - let pubkey = ss58_decode(&ss58_address).map_err(err_to_string)?; - let public_values = PublicValues { - pubkey, - evm_address, - amount, - challenge, - }; - verify_onchain_proof(&config, public_values, proof_bytes.clone()) - .map_err(|err| format!("On-chain verify failed: {err}"))?; - } - // Return both proof and public values - let proof_hex = format!("0x{}", hex::encode(proof_bytes)); - let public_values_hex = format!("0x{}", hex::encode(&committed_public_values)); - Ok((proof_hex, public_values_hex)) -} - -fn verify_onchain_proof( - config: &VerifyOnchainConfig, - public_values: PublicValues, - proof: Vec, -) -> Result<(), String> { - let call = verifyProofCall { - programVKey: FixedBytes::<32>::from_slice(&config.program_vkey), - publicValues: Bytes::from(public_values.abi_encode()), - proofBytes: Bytes::from(proof), - }; - let data = format!("0x{}", hex::encode(call.abi_encode())); - let to = format!("0x{}", hex::encode(config.verifier_address)); - - let payload = serde_json::json!({ - "jsonrpc": "2.0", - "id": 1, - "method": "eth_call", - "params": [ - { "to": to, "data": data }, - "latest" - ] - }); - - let client = reqwest::blocking::Client::new(); - let response = client - .post(&config.rpc_url) - .json(&payload) - .send() - .map_err(err_to_string)?; - - let status = response.status(); - let body: serde_json::Value = response.json().map_err(err_to_string)?; - if !status.is_success() { - return Err(format!("RPC HTTP error {status}")); - } - - if let Some(error) = body.get("error") { - return Err(format!("eth_call reverted: {error}")); - } - - if body.get("result").is_none() { - return Err("Missing eth_call result".to_string()); - } - - Ok(()) -} - -async fn update_job( - jobs: &Arc>>, - job_id: &str, - status: JobStatus, -) { - let mut jobs = jobs.lock().await; - if let Some(entry) = jobs.get_mut(job_id) { - entry.status = status; - entry.updated_at = now_ts(); - } -} - -fn status_response(status: &JobStatus) -> StatusResponse { - match status { - JobStatus::Pending => StatusResponse { - status: "pending".to_string(), - zk_proof: None, - public_values: None, - error: None, - }, - JobStatus::Running => StatusResponse { - status: "running".to_string(), - zk_proof: None, - public_values: None, - error: None, - }, - JobStatus::Completed { zk_proof, public_values } => StatusResponse { - status: "completed".to_string(), - zk_proof: Some(zk_proof.clone()), - public_values: Some(public_values.clone()), - error: None, - }, - JobStatus::Failed { error } => StatusResponse { - status: "failed".to_string(), - zk_proof: None, - public_values: None, - error: Some(error.clone()), - }, - } -} - -fn bad_request(message: &str) -> (StatusCode, Json) { - ( - StatusCode::BAD_REQUEST, - Json(StatusResponse { - status: "failed".to_string(), - zk_proof: None, - public_values: None, - error: Some(message.to_string()), - }), + // Use into_make_service_with_connect_info to enable IP extraction + axum::serve( + listener, + app.into_make_service_with_connect_info::(), ) -} - -fn parse_hex_bytes(value: &str) -> anyhow::Result<[u8; N]> { - let trimmed = value.strip_prefix("0x").unwrap_or(value); - let bytes = hex::decode(trimmed)?; - if bytes.len() != N { - anyhow::bail!("Expected {} bytes, got {}", N, bytes.len()); - } - let mut out = [0u8; N]; - out.copy_from_slice(&bytes); - Ok(out) -} - -fn parse_amount(value: &str) -> anyhow::Result<[u8; 32]> { - let amount: U256 = value.parse().map_err(|_| anyhow::anyhow!("Invalid amount"))?; - Ok(amount.to_be_bytes()) -} - -fn err_to_string(err: impl std::fmt::Display) -> String { - err.to_string() -} - -fn is_decimal(value: &str) -> bool { - !value.is_empty() && value.chars().all(|c| c.is_ascii_digit()) -} - -fn now_ts() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(0) + .await + .expect("Server error"); } diff --git a/packages/migration-claim/sp1/prover-api/src/prover.rs b/packages/migration-claim/sp1/prover-api/src/prover.rs new file mode 100644 index 0000000..b5945da --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/prover.rs @@ -0,0 +1,370 @@ +use alloy_primitives::{Bytes, FixedBytes}; +use alloy_sol_types::{sol, SolCall}; +use sp1_sdk::{network::NetworkMode, Prover, ProverClient, SP1Stdin}; +use sr25519_claim_lib::{ss58_decode, ProgramInput, PublicValues}; +use std::time::Duration; +use tracing::info; + +use crate::types::{ClaimContractConfig, ProveRequest, VerifyOnchainConfig}; +use crate::validation::parse_hex_bytes; + +const ELF: &[u8] = include_bytes!("../../program/elf/riscv32im-succinct-zkvm-elf"); + +sol! { + function verifyProof(bytes32 programVKey, bytes publicValues, bytes proofBytes) external view; + function claimed(bytes32 pubkey) external view returns (uint256); +} + +/// Generate a ZK proof for the given request +/// +/// # Arguments +/// * `request` - The proof request containing signature and claim details +/// * `verify_proof` - Whether to verify the proof after generation +/// * `verify_onchain` - Optional on-chain verification config +/// * `prover_mode` - The prover mode: "mock", "local", or "network" +pub fn generate_proof( + request: ProveRequest, + verify_proof: bool, + verify_onchain: Option, + prover_mode: &str, +) -> Result<(String, String), String> { + let signature = parse_hex_bytes::<64>(&request.signature).map_err(err_to_string)?; + let evm_address = parse_hex_bytes::<20>(&request.evm_address).map_err(err_to_string)?; + let challenge = parse_hex_bytes::<32>(&request.challenge).map_err(err_to_string)?; + let amount = crate::validation::parse_amount(&request.amount).map_err(err_to_string)?; + let ss58_address = request.ss58_address; + + let input = ProgramInput { + substrate_address: ss58_address.clone(), + signature, + evm_address, + amount, + challenge, + }; + + let mut stdin = SP1Stdin::new(); + stdin.write(&input); + + // Generate proof based on configured mode + // For network mode, we must explicitly use Mainnet - from_env() defaults to Reserved + // which has an invalid domain for the mainnet network + let (proof, _vk) = match prover_mode { + "mock" => { + info!("Using SP1 mock prover (test mode)"); + std::env::set_var("SP1_PROVER", "mock"); + let client = ProverClient::from_env(); + let (pk, vk) = client.setup(ELF); + let proof = client + .prove(&pk, &stdin) + .groth16() + .run() + .map_err(err_to_string)?; + if verify_proof { + client.verify(&proof, &vk).map_err(err_to_string)?; + } + (proof, vk) + } + "local" => { + info!("Using SP1 local prover"); + std::env::set_var("SP1_PROVER", "local"); + let client = ProverClient::from_env(); + let (pk, vk) = client.setup(ELF); + let proof = client + .prove(&pk, &stdin) + .groth16() + .run() + .map_err(err_to_string)?; + if verify_proof { + client.verify(&proof, &vk).map_err(err_to_string)?; + } + (proof, vk) + } + _ => { + info!("Using SP1 network prover (mainnet)"); + // Explicitly use Mainnet mode - from_env() defaults to Reserved + // which has an invalid domain for the mainnet network + let client = ProverClient::builder() + .network_for(NetworkMode::Mainnet) + .build(); + let (pk, vk) = client.setup(ELF); + let proof = client + .prove(&pk, &stdin) + .groth16() + .run() + .map_err(err_to_string)?; + if verify_proof { + client.verify(&proof, &vk).map_err(err_to_string)?; + } + (proof, vk) + } + }; + + // Log the committed public values for debugging + let committed_public_values = proof.public_values.to_vec(); + info!( + "Committed public values (hex): 0x{}", + hex::encode(&committed_public_values) + ); + info!( + "Committed public values length: {} bytes", + committed_public_values.len() + ); + + // Decode and log the individual fields + if let Ok(decoded) = PublicValues::abi_decode(&committed_public_values) { + info!("Decoded pubkey: 0x{}", hex::encode(&decoded.pubkey)); + info!( + "Decoded evm_address: 0x{}", + hex::encode(&decoded.evm_address) + ); + info!("Decoded amount: 0x{}", hex::encode(&decoded.amount)); + info!("Decoded challenge: 0x{}", hex::encode(&decoded.challenge)); + } + + let proof_bytes = proof.bytes(); + + if let Some(config) = verify_onchain { + let pubkey = ss58_decode(&ss58_address).map_err(err_to_string)?; + let public_values = PublicValues { + pubkey, + evm_address, + amount, + challenge, + }; + verify_onchain_proof(&config, public_values, proof_bytes.clone()) + .map_err(|err| format!("On-chain verify failed: {err}"))?; + } + + let proof_hex = format!("0x{}", hex::encode(proof_bytes)); + let public_values_hex = format!("0x{}", hex::encode(&committed_public_values)); + Ok((proof_hex, public_values_hex)) +} + +/// Verify a proof on-chain using eth_call +fn verify_onchain_proof( + config: &VerifyOnchainConfig, + public_values: PublicValues, + proof: Vec, +) -> Result<(), String> { + let call = verifyProofCall { + programVKey: FixedBytes::<32>::from_slice(&config.program_vkey), + publicValues: Bytes::from(public_values.abi_encode()), + proofBytes: Bytes::from(proof), + }; + let data = format!("0x{}", hex::encode(call.abi_encode())); + let to = format!("0x{}", hex::encode(config.verifier_address)); + + let payload = serde_json::json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "eth_call", + "params": [ + { "to": to, "data": data }, + "latest" + ] + }); + + let client = reqwest::blocking::Client::builder() + .timeout(Duration::from_secs(config.timeout_seconds)) + .build() + .map_err(err_to_string)?; + + let response = client + .post(&config.rpc_url) + .json(&payload) + .send() + .map_err(err_to_string)?; + + let status = response.status(); + let body: serde_json::Value = response.json().map_err(err_to_string)?; + if !status.is_success() { + return Err(format!("RPC HTTP error {status}")); + } + + if let Some(error) = body.get("error") { + return Err(format!("eth_call reverted: {error}")); + } + + if body.get("result").is_none() { + return Err("Missing eth_call result".to_string()); + } + + Ok(()) +} + +/// Check if a user has already claimed tokens on-chain +pub async fn check_already_claimed( + config: &ClaimContractConfig, + ss58_address: &str, + timeout_seconds: u64, +) -> Result { + // Decode SS58 to get the 32-byte pubkey + let pubkey = ss58_decode(ss58_address).map_err(err_to_string)?; + + // Build the eth_call for claimed(bytes32) + let call = claimedCall { + pubkey: FixedBytes::<32>::from_slice(&pubkey), + }; + let data = format!("0x{}", hex::encode(call.abi_encode())); + let to = format!("0x{}", hex::encode(config.contract_address)); + + let payload = serde_json::json!({ + "jsonrpc": "2.0", + "id": 1, + "method": "eth_call", + "params": [ + { "to": to, "data": data }, + "latest" + ] + }); + + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(timeout_seconds)) + .build() + .map_err(err_to_string)?; + + let response = client + .post(&config.rpc_url) + .json(&payload) + .send() + .await + .map_err(err_to_string)?; + + let status = response.status(); + let body: serde_json::Value = response.json().await.map_err(err_to_string)?; + + if !status.is_success() { + return Err(format!("RPC HTTP error {status}")); + } + + if let Some(error) = body.get("error") { + return Err(format!("eth_call error: {error}")); + } + + let result = body + .get("result") + .and_then(|v| v.as_str()) + .ok_or("Missing eth_call result")?; + + // Parse the returned uint256 - if > 0, user has claimed + let result_bytes = hex::decode(result.strip_prefix("0x").unwrap_or(result)) + .map_err(|e| format!("Invalid hex result: {e}"))?; + + // uint256 is 32 bytes, check if any byte is non-zero + let claimed_amount = result_bytes.iter().any(|&b| b != 0); + + Ok(claimed_amount) +} + +fn err_to_string(err: impl std::fmt::Display) -> String { + err.to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + use httpmock::{Method::POST, MockServer}; + use serde_json::json; + + // Note: These tests require the SP1 SDK which is not available in unit tests + // Integration tests should be used for actual proof generation + + #[test] + fn test_elf_is_loaded() { + // Just verify the ELF is included + assert!(!ELF.is_empty()); + } + + #[test] + fn test_verify_onchain_proof_success() { + let server = MockServer::start(); + let mock = server.mock(|when, then| { + when.method(POST); + then.status(200).json_body(json!({"result": "0x1"})); + }); + + let config = VerifyOnchainConfig { + rpc_url: server.url("/"), + verifier_address: [0x11; 20], + program_vkey: [0x22; 32], + timeout_seconds: 5, + }; + + let public_values = PublicValues { + pubkey: [0x33; 32], + evm_address: [0x44; 20], + amount: [0x55; 32], + challenge: [0x66; 32], + }; + + let result = verify_onchain_proof(&config, public_values, vec![0xaa, 0xbb]); + assert!(result.is_ok()); + mock.assert(); + } + + #[test] + fn test_verify_onchain_proof_reverted() { + let server = MockServer::start(); + let mock = server.mock(|when, then| { + when.method(POST); + then.status(200) + .json_body(json!({"error": {"code": -32000, "message": "revert"}})); + }); + + let config = VerifyOnchainConfig { + rpc_url: server.url("/"), + verifier_address: [0x11; 20], + program_vkey: [0x22; 32], + timeout_seconds: 5, + }; + + let public_values = PublicValues { + pubkey: [0x33; 32], + evm_address: [0x44; 20], + amount: [0x55; 32], + challenge: [0x66; 32], + }; + + let err = verify_onchain_proof(&config, public_values, vec![0xaa, 0xbb]) + .expect_err("expected eth_call error"); + assert!(err.contains("eth_call reverted")); + mock.assert(); + } + + #[tokio::test] + async fn test_check_already_claimed_true_false() { + let ss58_address = "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"; + + let claimed_server = MockServer::start(); + let claimed_mock = claimed_server.mock(|when, then| { + when.method(POST); + then.status(200) + .json_body(json!({"result": format!("0x{}01", "00".repeat(31))})); + }); + let claimed_config = ClaimContractConfig { + rpc_url: claimed_server.url("/"), + contract_address: [0x12; 20], + }; + let claimed = check_already_claimed(&claimed_config, ss58_address, 5) + .await + .unwrap(); + assert!(claimed); + claimed_mock.assert(); + + let unclaimed_server = MockServer::start(); + let unclaimed_mock = unclaimed_server.mock(|when, then| { + when.method(POST); + then.status(200) + .json_body(json!({"result": format!("0x{}", "00".repeat(32))})); + }); + let unclaimed_config = ClaimContractConfig { + rpc_url: unclaimed_server.url("/"), + contract_address: [0x12; 20], + }; + let claimed = check_already_claimed(&unclaimed_config, ss58_address, 5) + .await + .unwrap(); + assert!(!claimed); + unclaimed_mock.assert(); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/queue.rs b/packages/migration-claim/sp1/prover-api/src/queue.rs new file mode 100644 index 0000000..b1b4f59 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/queue.rs @@ -0,0 +1,440 @@ +use std::collections::HashMap; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{mpsc, Mutex, OwnedSemaphorePermit}; +use tracing::{error, info, warn}; + +use crate::prover::generate_proof; +use crate::types::{ + error_codes, AppConfig, CachedProof, JobEntry, JobMessage, JobStatus, ProofMetrics, +}; +use crate::validation::cache_key; + +/// Job queue for managing proof generation work +pub struct JobQueue { + pub sender: mpsc::Sender, + pub queue_size: Arc, + pub capacity: usize, +} + +/// Result of trying to enqueue a job +#[derive(Debug)] +pub enum EnqueueResult { + /// Job was successfully queued + Queued, + /// Queue is full + QueueFull, +} + +impl JobQueue { + /// Create a new job queue with the given capacity + pub fn new(capacity: usize) -> (Self, mpsc::Receiver) { + let (sender, receiver) = mpsc::channel(capacity); + let queue = Self { + sender, + queue_size: Arc::new(AtomicUsize::new(0)), + capacity, + }; + (queue, receiver) + } + + /// Try to enqueue a job + pub async fn try_enqueue(&self, message: JobMessage) -> EnqueueResult { + // Check if queue is full before trying to send + let current_size = self.queue_size.load(Ordering::SeqCst); + if current_size >= self.capacity { + return EnqueueResult::QueueFull; + } + + match self.sender.try_send(message) { + Ok(()) => { + self.queue_size.fetch_add(1, Ordering::SeqCst); + EnqueueResult::Queued + } + Err(mpsc::error::TrySendError::Full(_)) => EnqueueResult::QueueFull, + Err(mpsc::error::TrySendError::Closed(_)) => { + error!("Job queue channel closed"); + EnqueueResult::QueueFull + } + } + } + + /// Get a handle to the queue size counter + pub fn size_counter(&self) -> Arc { + self.queue_size.clone() + } + + /// Create a queue wrapper from an existing sender and shared size counter + pub fn from_sender( + sender: mpsc::Sender, + capacity: usize, + queue_size: Arc, + ) -> Self { + Self { + sender, + queue_size, + capacity, + } + } +} + +/// Worker pool for processing jobs +pub struct WorkerPool { + worker_count: usize, +} + +impl WorkerPool { + pub fn new(worker_count: usize) -> Self { + Self { worker_count } + } + + /// Start workers that process jobs from the receiver + pub fn start( + &self, + mut receiver: mpsc::Receiver, + jobs: Arc>>, + cache: Arc>>, + config: Arc, + queue_size: Arc, + metrics: Arc, + ) { + let worker_count = self.worker_count; + let proof_timeout = Duration::from_secs(config.proof_timeout_seconds); + + // Spawn a task that distributes work to workers + let jobs_clone = jobs.clone(); + let cache_clone = cache.clone(); + let config_clone = config.clone(); + let metrics_clone = metrics.clone(); + + tokio::spawn(async move { + info!("Starting {} workers for job processing", worker_count); + + // Use a semaphore to limit concurrent proof generation + // IMPORTANT: Permits are held until the underlying task completes, + // even after timeout. This prevents unbounded concurrency. + let semaphore = Arc::new(tokio::sync::Semaphore::new(worker_count)); + + while let Some(message) = receiver.recv().await { + queue_size.fetch_sub(1, Ordering::SeqCst); + + // Acquire an owned permit that can be moved into the spawned task + // The permit will be held until process_job completes (including + // waiting for timed-out blocking tasks to finish) + let permit = match semaphore.clone().acquire_owned().await { + Ok(p) => p, + Err(_) => { + error!("Semaphore closed"); + continue; + } + }; + + let jobs = jobs_clone.clone(); + let cache = cache_clone.clone(); + let config = config_clone.clone(); + let metrics = metrics_clone.clone(); + let timeout = proof_timeout; + + tokio::spawn(async move { + process_job(message, jobs, cache, config, timeout, metrics, permit).await; + }); + } + + warn!("Job receiver closed, workers stopping"); + }); + } +} + +/// Process a single job +/// +/// The `_permit` parameter holds the semaphore permit for the duration of this function. +/// This is critical: the permit is NOT released until the underlying blocking task +/// completes, even if a timeout occurs. This prevents unbounded concurrency. +async fn process_job( + message: JobMessage, + jobs: Arc>>, + cache: Arc>>, + config: Arc, + timeout: Duration, + metrics: Arc, + _permit: OwnedSemaphorePermit, // Held until function returns (after task completes) +) { + let job_id = message.job_id.clone(); + let cache_key = cache_key(&message.request); + + // Update job to running + update_job(&jobs, &job_id, JobStatus::Running).await; + + // Generate proof with timeout + let verify_proof = config.verify_proof; + let verify_onchain = config.verify_onchain.clone(); + let prover_mode = config.prover_mode.clone(); + let request = message.request.clone(); + + // Spawn the blocking task - we keep the handle to track completion after timeout + let handle = tokio::task::spawn_blocking(move || { + generate_proof(request, verify_proof, verify_onchain, &prover_mode) + }); + + // Pin the handle so we can use it with select! and still access it after timeout + tokio::pin!(handle); + + // Race timeout vs completion using select! + // biased ensures we check completion first to avoid spurious timeouts + let result = tokio::select! { + biased; + res = &mut handle => Some(res), + _ = tokio::time::sleep(timeout) => None, + }; + + match result { + Some(Ok(Ok((zk_proof, public_values)))) => { + // Record successful completion + metrics.record_completion(); + + // Store in cache + { + let mut c = cache.lock().await; + c.insert( + cache_key, + CachedProof::new(zk_proof.clone(), public_values.clone()), + ); + } + + // Update job status + update_job( + &jobs, + &job_id, + JobStatus::Completed { + zk_proof, + public_values, + }, + ) + .await; + } + Some(Ok(Err(err))) => { + error!("Proof generation failed for job {}: {}", job_id, err); + update_job( + &jobs, + &job_id, + JobStatus::Failed { + error: format!("{}: {}", error_codes::PROOF_FAILED, err), + }, + ) + .await; + } + Some(Err(join_err)) => { + error!("Job {} panicked: {}", job_id, join_err); + update_job( + &jobs, + &job_id, + JobStatus::Failed { + error: format!("{}: task panicked", error_codes::INTERNAL_ERROR), + }, + ) + .await; + } + None => { + // Timeout fired - mark job as failed immediately for user feedback + metrics.record_timeout(); + + error!( + "Job {} timed out after {:?} - waiting for task to complete before releasing permit", + job_id, + timeout, + ); + + update_job( + &jobs, + &job_id, + JobStatus::Failed { + error: format!( + "{}: proof generation exceeded {} seconds", + error_codes::TIMEOUT, + timeout.as_secs() + ), + }, + ) + .await; + + // CRITICAL: Wait for the blocking task to actually finish before releasing permit. + // This ensures true concurrency limits are enforced. The job is already marked + // as failed for the user, but we prevent resource exhaustion by not starting + // new jobs until this one completes. + // + // Note: spawn_blocking tasks cannot be cancelled. We must wait for completion. + // This may take a long time (SP1 network proofs can take 10+ minutes), but + // it's necessary to prevent unbounded cost and resource usage. + warn!( + "Job {} timed out, holding permit while waiting for blocking task to complete (timed_out_still_running={})", + job_id, + metrics.timed_out_still_running.load(Ordering::Relaxed) + ); + + // Wait for the actual task to complete (handle is still valid after select!) + match handle.await { + Ok(Ok((zk_proof, public_values))) => { + metrics.record_completion(); + { + let mut c = cache.lock().await; + c.insert( + cache_key, + CachedProof::new(zk_proof.clone(), public_values.clone()), + ); + } + update_job( + &jobs, + &job_id, + JobStatus::Completed { + zk_proof, + public_values, + }, + ) + .await; + } + Ok(Err(err)) => { + error!("Proof generation failed for job {} after timeout: {}", job_id, err); + update_job( + &jobs, + &job_id, + JobStatus::Failed { + error: format!("{}: {}", error_codes::PROOF_FAILED, err), + }, + ) + .await; + } + Err(join_err) => { + error!( + "Job {} panicked after timeout: {}", + job_id, join_err + ); + update_job( + &jobs, + &job_id, + JobStatus::Failed { + error: format!("{}: task panicked", error_codes::INTERNAL_ERROR), + }, + ) + .await; + } + } + + // Task completed - update metrics + metrics.decrement_timed_out_still_running(); + info!( + "Job {} blocking task finally completed after timeout (timed_out_still_running={})", + job_id, + metrics.timed_out_still_running.load(Ordering::Relaxed) + ); + } + } + // Permit is dropped here, releasing the semaphore slot +} + +/// Update a job's status +async fn update_job(jobs: &Arc>>, job_id: &str, status: JobStatus) { + let mut jobs = jobs.lock().await; + if let Some(entry) = jobs.get_mut(job_id) { + entry.status = status; + entry.updated_at = crate::types::now_ts(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::types::ProveRequest; + + fn test_request() -> ProveRequest { + ProveRequest { + ss58_address: "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY".to_string(), + signature: format!("0x{}", "ab".repeat(64)), + evm_address: "0x742d35Cc6634C0532925a3b844Bc9e7595f4a3b2".to_string(), + challenge: format!("0x{}", "12".repeat(32)), + amount: "1000000000000000000".to_string(), + } + } + + #[tokio::test] + async fn test_queue_enqueue() { + let (queue, _receiver) = JobQueue::new(10); + + let result = queue + .try_enqueue(JobMessage { + job_id: "job1".to_string(), + request: test_request(), + }) + .await; + + assert!(matches!(result, EnqueueResult::Queued)); + assert_eq!(queue.queue_size.load(Ordering::SeqCst), 1); + } + + #[tokio::test] + async fn test_queue_full() { + let (queue, _receiver) = JobQueue::new(2); + + // Fill the queue + queue + .try_enqueue(JobMessage { + job_id: "job1".to_string(), + request: test_request(), + }) + .await; + queue + .try_enqueue(JobMessage { + job_id: "job2".to_string(), + request: test_request(), + }) + .await; + + // Third should fail + let result = queue + .try_enqueue(JobMessage { + job_id: "job3".to_string(), + request: test_request(), + }) + .await; + + assert!(matches!(result, EnqueueResult::QueueFull)); + assert_eq!(queue.queue_size.load(Ordering::SeqCst), 2); + } + + #[tokio::test] + async fn test_queue_dequeue_updates_size() { + let (queue, mut receiver) = JobQueue::new(10); + + queue + .try_enqueue(JobMessage { + job_id: "job1".to_string(), + request: test_request(), + }) + .await; + + assert_eq!(queue.queue_size.load(Ordering::SeqCst), 1); + + // Receive the message and decrement size (simulating worker behavior) + let _msg = receiver.recv().await; + queue.queue_size.fetch_sub(1, Ordering::SeqCst); + + assert_eq!(queue.queue_size.load(Ordering::SeqCst), 0); + } + + #[tokio::test] + async fn test_multiple_enqueues() { + let (queue, _receiver) = JobQueue::new(100); + + for i in 0..50 { + let result = queue + .try_enqueue(JobMessage { + job_id: format!("job{}", i), + request: test_request(), + }) + .await; + assert!(matches!(result, EnqueueResult::Queued)); + } + + assert_eq!(queue.queue_size.load(Ordering::SeqCst), 50); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/rate_limit.rs b/packages/migration-claim/sp1/prover-api/src/rate_limit.rs new file mode 100644 index 0000000..a6d1583 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/rate_limit.rs @@ -0,0 +1,263 @@ +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::Mutex; +use tracing::info; + +use crate::types::{now_ts, RateLimitEntry}; + +/// Rate limiter for controlling request frequency +pub struct RateLimiter { + limits: Arc>>, + window_seconds: u64, + max_requests: u32, +} + +/// Result of rate limit check +#[derive(Debug, Clone)] +pub enum RateLimitResult { + /// Request is allowed + Allowed, + /// Request is rate limited, with seconds until reset + Limited { retry_after: u64 }, +} + +impl RateLimiter { + pub fn new( + limits: Arc>>, + window_seconds: u64, + max_requests: u32, + ) -> Self { + Self { + limits, + window_seconds, + max_requests, + } + } + + /// Check if a request is allowed and update the rate limit state + pub async fn check_and_update(&self, key: &str) -> RateLimitResult { + let mut limits = self.limits.lock().await; + let now = now_ts(); + + match limits.get_mut(key) { + Some(entry) => { + let elapsed = now - entry.last_request_at; + + if elapsed >= self.window_seconds { + // Window expired, reset + entry.last_request_at = now; + entry.request_count = 1; + RateLimitResult::Allowed + } else if entry.request_count >= self.max_requests { + // Rate limited + let retry_after = self.window_seconds - elapsed; + RateLimitResult::Limited { retry_after } + } else { + // Within limit, increment + entry.request_count += 1; + RateLimitResult::Allowed + } + } + None => { + // First request from this key + limits.insert(key.to_string(), RateLimitEntry::new()); + RateLimitResult::Allowed + } + } + } + + /// Clean up expired entries + pub async fn cleanup(&self) -> usize { + let mut limits = self.limits.lock().await; + let before = limits.len(); + + // Remove entries that haven't been used in 2x the window + let expiry_threshold = now_ts() - (self.window_seconds * 2); + limits.retain(|_, v| v.last_request_at > expiry_threshold); + + let removed = before - limits.len(); + if removed > 0 { + info!("Rate limit cleanup: removed {} stale entries", removed); + } + removed + } +} + +/// Start a background task to periodically clean up rate limit entries +pub fn start_rate_limit_cleanup_task( + limits: Arc>>, + window_seconds: u64, + max_requests: u32, + cleanup_interval_seconds: u64, +) { + let limiter = RateLimiter::new(limits, window_seconds, max_requests); + tokio::spawn(async move { + let mut interval = + tokio::time::interval(std::time::Duration::from_secs(cleanup_interval_seconds)); + loop { + interval.tick().await; + limiter.cleanup().await; + } + }); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_first_request_allowed() { + let limits = Arc::new(Mutex::new(HashMap::new())); + let limiter = RateLimiter::new(limits, 60, 3); + + let result = limiter.check_and_update("user1").await; + assert!(matches!(result, RateLimitResult::Allowed)); + } + + #[tokio::test] + async fn test_within_limit_allowed() { + let limits = Arc::new(Mutex::new(HashMap::new())); + let limiter = RateLimiter::new(limits, 60, 3); + + // First 3 requests should be allowed + assert!(matches!( + limiter.check_and_update("user1").await, + RateLimitResult::Allowed + )); + assert!(matches!( + limiter.check_and_update("user1").await, + RateLimitResult::Allowed + )); + assert!(matches!( + limiter.check_and_update("user1").await, + RateLimitResult::Allowed + )); + } + + #[tokio::test] + async fn test_exceeds_limit_blocked() { + let limits = Arc::new(Mutex::new(HashMap::new())); + let limiter = RateLimiter::new(limits, 60, 3); + + // Use up the limit + limiter.check_and_update("user1").await; + limiter.check_and_update("user1").await; + limiter.check_and_update("user1").await; + + // 4th request should be blocked + let result = limiter.check_and_update("user1").await; + match result { + RateLimitResult::Limited { retry_after } => { + assert!(retry_after > 0); + assert!(retry_after <= 60); + } + _ => panic!("Expected rate limited"), + } + } + + #[tokio::test] + async fn test_different_users_independent() { + let limits = Arc::new(Mutex::new(HashMap::new())); + let limiter = RateLimiter::new(limits, 60, 1); + + // User1 uses their limit + limiter.check_and_update("user1").await; + + // User2 should still be allowed + let result = limiter.check_and_update("user2").await; + assert!(matches!(result, RateLimitResult::Allowed)); + + // User1 should be blocked + let result = limiter.check_and_update("user1").await; + assert!(matches!(result, RateLimitResult::Limited { .. })); + } + + #[tokio::test] + async fn test_window_reset() { + let limits = Arc::new(Mutex::new(HashMap::new())); + + // Insert an old entry (window expired) + { + let mut l = limits.lock().await; + l.insert( + "user1".to_string(), + RateLimitEntry { + last_request_at: now_ts() - 120, // 2 minutes ago + request_count: 10, + }, + ); + } + + let limiter = RateLimiter::new(limits.clone(), 60, 3); + + // Should be allowed because window expired + let result = limiter.check_and_update("user1").await; + assert!(matches!(result, RateLimitResult::Allowed)); + + // Check that count was reset + let l = limits.lock().await; + assert_eq!(l.get("user1").unwrap().request_count, 1); + } + + #[tokio::test] + async fn test_cleanup() { + let limits = Arc::new(Mutex::new(HashMap::new())); + + // Insert a mix of fresh and stale entries + { + let mut l = limits.lock().await; + l.insert( + "fresh".to_string(), + RateLimitEntry { + last_request_at: now_ts(), + request_count: 1, + }, + ); + l.insert( + "stale".to_string(), + RateLimitEntry { + last_request_at: now_ts() - 300, // 5 minutes ago + request_count: 1, + }, + ); + } + + let limiter = RateLimiter::new(limits.clone(), 60, 3); // 2x window = 120s + + assert_eq!(limits.lock().await.len(), 2); + let removed = limiter.cleanup().await; + assert_eq!(removed, 1); + assert_eq!(limits.lock().await.len(), 1); + } + + #[tokio::test] + async fn test_retry_after_calculation() { + let limits = Arc::new(Mutex::new(HashMap::new())); + + // Insert an entry at specific time + let start_time = now_ts(); + { + let mut l = limits.lock().await; + l.insert( + "user1".to_string(), + RateLimitEntry { + last_request_at: start_time, + request_count: 3, + }, + ); + } + + let limiter = RateLimiter::new(limits, 60, 3); + + // This will check and update, but since limit is reached, will return Limited + let result = limiter.check_and_update("user1").await; + match result { + RateLimitResult::Limited { retry_after } => { + // Should be close to 60 seconds (might be slightly less due to timing) + assert!(retry_after <= 60); + assert!(retry_after >= 59); + } + _ => panic!("Expected rate limited"), + } + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/signature.rs b/packages/migration-claim/sp1/prover-api/src/signature.rs new file mode 100644 index 0000000..0c6199e --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/signature.rs @@ -0,0 +1,95 @@ +//! SR25519 signature verification +//! +//! Verifies signatures using the same logic as the SP1 program to ensure +//! early rejection of invalid signatures before expensive proof generation. + +use schnorrkel::{signing_context, PublicKey, Signature}; + +/// The signing context used by Substrate for SR25519 signatures +/// This must match what the polkadot.js extension uses +const SUBSTRATE_CONTEXT: &[u8] = b"substrate"; + +/// Substrate wallet extensions wrap messages with ... when signing +/// with signRaw({ type: 'bytes' }). We must wrap the challenge the same way. +const WRAP_PREFIX: &[u8] = b""; +const WRAP_POSTFIX: &[u8] = b""; + +/// Verify an SR25519 signature over a challenge +/// +/// The signature verification matches exactly what the SP1 program does: +/// 1. Create signing context with "substrate" +/// 2. Wrap challenge with ... +/// 3. Verify signature against wrapped challenge +/// +/// # Arguments +/// * `pubkey` - 32-byte SR25519 public key (derived from SS58 address) +/// * `signature` - 64-byte SR25519 signature +/// * `challenge` - 32-byte challenge (the keccak256 hash that was signed) +/// +/// # Returns +/// * `Ok(())` - Signature is valid +/// * `Err(String)` - Signature verification failed with reason +pub fn verify_signature( + pubkey: &[u8; 32], + signature: &[u8; 64], + challenge: &[u8; 32], +) -> Result<(), String> { + // Parse the SR25519 public key + let public_key = + PublicKey::from_bytes(pubkey).map_err(|e| format!("Invalid public key: {}", e))?; + + // Parse the signature + let sig = + Signature::from_bytes(signature).map_err(|e| format!("Invalid signature format: {}", e))?; + + // Create the signing context (must match what was used to sign) + let ctx = signing_context(SUBSTRATE_CONTEXT); + + // Wrap the challenge with ... as Substrate wallet extensions do + // when using signRaw with type: 'bytes' + // + // IMPORTANT: This must match exactly what the SP1 program does. + // The SP1 program wraps the raw challenge bytes, not the hex string. + // See: sp1/program/src/main.rs lines 56-63 + let mut wrapped_challenge = + Vec::with_capacity(WRAP_PREFIX.len() + challenge.len() + WRAP_POSTFIX.len()); + wrapped_challenge.extend_from_slice(WRAP_PREFIX); + wrapped_challenge.extend_from_slice(challenge); + wrapped_challenge.extend_from_slice(WRAP_POSTFIX); + + // Verify the signature over the wrapped challenge + public_key + .verify(ctx.bytes(&wrapped_challenge), &sig) + .map_err(|_| "Signature verification failed".to_string()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_wrapping_format() { + // The wrapping should match SP1 program exactly + let challenge: [u8; 4] = [0x12, 0x34, 0x56, 0x78]; + let mut wrapped = Vec::new(); + wrapped.extend_from_slice(WRAP_PREFIX); + wrapped.extend_from_slice(&challenge); + wrapped.extend_from_slice(WRAP_POSTFIX); + + // Should be: + raw bytes + + assert_eq!(wrapped.len(), 7 + 4 + 8); // "" + 4 bytes + "" + assert!(wrapped.starts_with(b"")); + assert!(wrapped.ends_with(b"")); + } + + #[test] + fn test_invalid_pubkey() { + let invalid_pubkey = [0u8; 32]; // All zeros is not a valid curve point + let signature = [0u8; 64]; + let challenge = [0u8; 32]; + + let result = verify_signature(&invalid_pubkey, &signature, &challenge); + // Should fail with invalid public key or signature verification failed + assert!(result.is_err()); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/types.rs b/packages/migration-claim/sp1/prover-api/src/types.rs new file mode 100644 index 0000000..423b301 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/types.rs @@ -0,0 +1,422 @@ +use crate::eligibility::EligibilityData; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::{SystemTime, UNIX_EPOCH}; +use tokio::sync::Mutex; + +/// Request payload for proof generation +#[derive(Debug, Clone, Deserialize)] +pub struct ProveRequest { + #[serde(rename = "ss58Address")] + pub ss58_address: String, + pub signature: String, + #[serde(rename = "evmAddress")] + pub evm_address: String, + pub challenge: String, + pub amount: String, +} + +/// Response with job ID after submission +#[derive(Debug, Clone, Serialize)] +pub struct JobResponse { + #[serde(rename = "jobId")] + pub job_id: String, +} + +/// Status response for job queries +#[derive(Debug, Clone, Serialize)] +pub struct StatusResponse { + pub status: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub code: Option, + #[serde(rename = "zkProof", skip_serializing_if = "Option::is_none")] + pub zk_proof: Option, + #[serde(rename = "publicValues", skip_serializing_if = "Option::is_none")] + pub public_values: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + #[serde(rename = "retryAfter", skip_serializing_if = "Option::is_none")] + pub retry_after: Option, +} + +impl StatusResponse { + pub fn pending() -> Self { + Self { + status: "pending".to_string(), + code: None, + zk_proof: None, + public_values: None, + error: None, + retry_after: None, + } + } + + pub fn running() -> Self { + Self { + status: "running".to_string(), + code: None, + zk_proof: None, + public_values: None, + error: None, + retry_after: None, + } + } + + pub fn completed(zk_proof: String, public_values: String) -> Self { + Self { + status: "completed".to_string(), + code: None, + zk_proof: Some(zk_proof), + public_values: Some(public_values), + error: None, + retry_after: None, + } + } + + pub fn failed(code: &str, error: String) -> Self { + Self { + status: "failed".to_string(), + code: Some(code.to_string()), + zk_proof: None, + public_values: None, + error: Some(error), + retry_after: None, + } + } + + pub fn failed_with_retry(code: &str, error: String, retry_after: u64) -> Self { + Self { + status: "failed".to_string(), + code: Some(code.to_string()), + zk_proof: None, + public_values: None, + error: Some(error), + retry_after: Some(retry_after), + } + } +} + +/// Health check response +#[derive(Debug, Clone, Serialize)] +pub struct HealthResponse { + pub status: String, + pub prover_mode: String, + pub verify_proof: bool, + pub verify_onchain: bool, + pub jobs: usize, + pub cache_size: usize, + pub queue_size: usize, + pub queue_capacity: usize, + /// Proof generation metrics for monitoring + pub proof_metrics: ProofMetricsSnapshot, +} + +/// Job status enum +#[derive(Debug, Clone)] +pub enum JobStatus { + Pending, + Running, + Completed { + zk_proof: String, + public_values: String, + }, + Failed { + error: String, + }, +} + +/// Job entry in the jobs map +#[derive(Debug, Clone)] +pub struct JobEntry { + pub status: JobStatus, + pub updated_at: u64, +} + +impl JobEntry { + pub fn new(status: JobStatus) -> Self { + Self { + status, + updated_at: now_ts(), + } + } +} + +/// Cached proof result +#[derive(Debug, Clone)] +pub struct CachedProof { + pub zk_proof: String, + pub public_values: String, + pub created_at: u64, +} + +impl CachedProof { + pub fn new(zk_proof: String, public_values: String) -> Self { + Self { + zk_proof, + public_values, + created_at: now_ts(), + } + } + + pub fn is_expired(&self, ttl_seconds: u64) -> bool { + now_ts() - self.created_at > ttl_seconds + } +} + +/// Rate limit entry +#[derive(Debug, Clone)] +pub struct RateLimitEntry { + pub last_request_at: u64, + pub request_count: u32, +} + +impl RateLimitEntry { + pub fn new() -> Self { + Self { + last_request_at: now_ts(), + request_count: 1, + } + } +} + +impl Default for RateLimitEntry { + fn default() -> Self { + Self::new() + } +} + +/// Metrics for monitoring proof generation +/// +/// Tracks completions, timeouts, and timed-out tasks that continue running +/// in the background (since spawn_blocking cannot be cancelled). +pub struct ProofMetrics { + /// Total number of successful proof completions + pub total_completions: AtomicUsize, + /// Total number of proof timeouts + pub total_timeouts: AtomicUsize, + /// Number of timed-out tasks still running in background + /// Note: This counter increases on timeout and decreases when the task eventually completes + pub timed_out_still_running: AtomicUsize, +} + +impl ProofMetrics { + pub fn new() -> Self { + Self { + total_completions: AtomicUsize::new(0), + total_timeouts: AtomicUsize::new(0), + timed_out_still_running: AtomicUsize::new(0), + } + } + + /// Record a successful completion + pub fn record_completion(&self) { + self.total_completions.fetch_add(1, Ordering::Relaxed); + } + + /// Record a timeout (task continues running in background) + pub fn record_timeout(&self) { + self.total_timeouts.fetch_add(1, Ordering::Relaxed); + self.timed_out_still_running.fetch_add(1, Ordering::Relaxed); + } + + /// Decrement the count of timed-out tasks still running + /// Called when a timed-out task eventually completes + pub fn decrement_timed_out_still_running(&self) { + self.timed_out_still_running.fetch_sub(1, Ordering::Relaxed); + } + + /// Get current metrics snapshot + pub fn snapshot(&self) -> ProofMetricsSnapshot { + ProofMetricsSnapshot { + total_completions: self.total_completions.load(Ordering::Relaxed), + total_timeouts: self.total_timeouts.load(Ordering::Relaxed), + timed_out_still_running: self.timed_out_still_running.load(Ordering::Relaxed), + } + } +} + +impl Default for ProofMetrics { + fn default() -> Self { + Self::new() + } +} + +/// Serializable snapshot of proof metrics +#[derive(Debug, Clone, Serialize)] +pub struct ProofMetricsSnapshot { + pub total_completions: usize, + pub total_timeouts: usize, + pub timed_out_still_running: usize, +} + +/// Job queue message +#[derive(Debug, Clone)] +pub struct JobMessage { + pub job_id: String, + pub request: ProveRequest, +} + +/// Shared application state +#[derive(Clone)] +pub struct AppState { + pub jobs: Arc>>, + pub cache: Arc>>, + pub rate_limits: Arc>>, + /// IP-based rate limits (separate from pubkey rate limits) + pub ip_rate_limits: Arc>>, + /// Eligibility data loaded from merkle-tree.json + pub eligibility: Arc, + pub config: Arc, + pub job_sender: Option>, + pub queue_size: Option>, + /// Metrics for monitoring proof generation + pub metrics: Arc, +} + +/// Application configuration +#[derive(Debug, Clone)] +pub struct AppConfig { + pub prover_mode: String, + pub verify_proof: bool, + pub verify_onchain: Option, + pub claim_contract: Option, + pub cache_ttl_seconds: u64, + pub rate_limit_window_seconds: u64, + pub rate_limit_max_requests: u32, + /// IP-based rate limit window in seconds + pub ip_rate_limit_window_seconds: u64, + /// Maximum requests per IP per window + pub ip_rate_limit_max_requests: u32, + pub queue_capacity: usize, + pub worker_count: usize, + pub proof_timeout_seconds: u64, + pub rpc_timeout_seconds: u64, + pub max_body_bytes: usize, + pub jobs_ttl_seconds: u64, + /// Path to the eligibility file (merkle-tree.json) + pub eligibility_file: String, + /// Whether to verify signatures before proof generation + pub verify_signatures: bool, + /// Whether to trust proxy headers (X-Forwarded-For, X-Real-IP) for client IP extraction + /// Set to true when behind a trusted reverse proxy/load balancer + /// Default: false (use socket address only for security) + pub trust_proxy_headers: bool, +} + +impl Default for AppConfig { + fn default() -> Self { + Self { + prover_mode: "network".to_string(), + verify_proof: false, + verify_onchain: None, + claim_contract: None, + cache_ttl_seconds: 3600, // 1 hour + rate_limit_window_seconds: 300, // 5 minutes + rate_limit_max_requests: 3, // 3 requests per window + ip_rate_limit_window_seconds: 60, // 1 minute + ip_rate_limit_max_requests: 10, // 10 requests per IP per minute + queue_capacity: 50, + worker_count: 4, + proof_timeout_seconds: 600, // 10 minutes + rpc_timeout_seconds: 10, + max_body_bytes: 4096, // 4 KB + jobs_ttl_seconds: 3600, // 1 hour + eligibility_file: "../merkle-tree.json".to_string(), + verify_signatures: true, // Enabled by default + trust_proxy_headers: false, // Disabled by default for security + } + } +} + +/// On-chain verification configuration +#[derive(Debug, Clone)] +pub struct VerifyOnchainConfig { + pub rpc_url: String, + pub verifier_address: [u8; 20], + pub program_vkey: [u8; 32], + /// RPC timeout in seconds for on-chain verification calls + pub timeout_seconds: u64, +} + +/// Claim contract configuration for checking already-claimed +#[derive(Debug, Clone)] +pub struct ClaimContractConfig { + pub rpc_url: String, + pub contract_address: [u8; 20], +} + +/// Get current unix timestamp +pub fn now_ts() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0) +} + +/// Error codes for structured error responses +pub mod error_codes { + pub const INVALID_INPUT: &str = "invalid_input"; + pub const ALREADY_CLAIMED: &str = "already_claimed"; + pub const RATE_LIMITED: &str = "rate_limited"; + pub const QUEUE_FULL: &str = "queue_full"; + pub const TIMEOUT: &str = "timeout"; + pub const RPC_UNAVAILABLE: &str = "rpc_unavailable"; + pub const PROOF_FAILED: &str = "proof_failed"; + pub const NOT_FOUND: &str = "not_found"; + pub const INTERNAL_ERROR: &str = "internal_error"; + /// Address is not in the eligibility list + pub const NOT_ELIGIBLE: &str = "not_eligible"; + /// Requested amount doesn't match the eligible balance + pub const AMOUNT_MISMATCH: &str = "amount_mismatch"; + /// Signature verification failed + pub const INVALID_SIGNATURE: &str = "invalid_signature"; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_status_response_pending() { + let resp = StatusResponse::pending(); + assert_eq!(resp.status, "pending"); + assert!(resp.zk_proof.is_none()); + assert!(resp.error.is_none()); + } + + #[test] + fn test_status_response_completed() { + let resp = StatusResponse::completed("0x123".to_string(), "0x456".to_string()); + assert_eq!(resp.status, "completed"); + assert_eq!(resp.zk_proof.unwrap(), "0x123"); + assert_eq!(resp.public_values.unwrap(), "0x456"); + } + + #[test] + fn test_status_response_failed() { + let resp = StatusResponse::failed("invalid_input", "bad data".to_string()); + assert_eq!(resp.status, "failed"); + assert_eq!(resp.code.unwrap(), "invalid_input"); + assert_eq!(resp.error.unwrap(), "bad data"); + } + + #[test] + fn test_cached_proof_expiry() { + let proof = CachedProof { + zk_proof: "0x".to_string(), + public_values: "0x".to_string(), + created_at: now_ts() - 100, + }; + assert!(!proof.is_expired(200)); + assert!(proof.is_expired(50)); + } + + #[test] + fn test_rate_limit_entry_new() { + let entry = RateLimitEntry::new(); + assert_eq!(entry.request_count, 1); + assert!(entry.last_request_at > 0); + } +} diff --git a/packages/migration-claim/sp1/prover-api/src/validation.rs b/packages/migration-claim/sp1/prover-api/src/validation.rs new file mode 100644 index 0000000..ed8f5f1 --- /dev/null +++ b/packages/migration-claim/sp1/prover-api/src/validation.rs @@ -0,0 +1,478 @@ +use alloy_primitives::U256; +use axum::http::HeaderMap; +use sr25519_claim_lib::ss58_decode; + +use crate::types::{error_codes, ProveRequest}; + +/// Validation error with code and message +#[derive(Debug, Clone)] +pub struct ValidationError { + pub code: String, + pub message: String, +} + +impl ValidationError { + pub fn new(code: &str, message: impl Into) -> Self { + Self { + code: code.to_string(), + message: message.into(), + } + } + + pub fn invalid_input(message: impl Into) -> Self { + Self::new(error_codes::INVALID_INPUT, message) + } +} + +impl std::fmt::Display for ValidationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}: {}", self.code, self.message) + } +} + +impl std::error::Error for ValidationError {} + +/// Validated proof request with parsed fields +#[derive(Debug, Clone)] +pub struct ValidatedRequest { + pub pubkey: [u8; 32], + pub signature: [u8; 64], + pub challenge: [u8; 32], + pub amount: [u8; 32], +} + +/// Validate and parse a proof request +pub fn validate_request(request: &ProveRequest) -> Result { + // Check for empty fields + if request.ss58_address.trim().is_empty() { + return Err(ValidationError::invalid_input("ss58Address is required")); + } + if request.signature.trim().is_empty() { + return Err(ValidationError::invalid_input("signature is required")); + } + if request.evm_address.trim().is_empty() { + return Err(ValidationError::invalid_input("evmAddress is required")); + } + if request.challenge.trim().is_empty() { + return Err(ValidationError::invalid_input("challenge is required")); + } + if request.amount.trim().is_empty() { + return Err(ValidationError::invalid_input("amount is required")); + } + + // Validate amount is decimal + if !is_decimal(&request.amount) { + return Err(ValidationError::invalid_input( + "amount must be a base-10 decimal string", + )); + } + + // Parse and validate SS58 address + let pubkey = ss58_decode(&request.ss58_address) + .map_err(|e| ValidationError::invalid_input(format!("Invalid ss58Address: {e}")))?; + + // Parse and validate signature (64 bytes) + let signature = parse_hex_bytes::<64>(&request.signature) + .map_err(|e| ValidationError::invalid_input(format!("Invalid signature: {e}")))?; + + // Validate EVM address format (20 bytes) - we check but don't store + parse_hex_bytes::<20>(&request.evm_address) + .map_err(|e| ValidationError::invalid_input(format!("Invalid evmAddress: {e}")))?; + + // Parse and validate challenge (32 bytes) + let challenge = parse_hex_bytes::<32>(&request.challenge) + .map_err(|e| ValidationError::invalid_input(format!("Invalid challenge: {e}")))?; + + // Parse amount + let amount = parse_amount(&request.amount) + .map_err(|e| ValidationError::invalid_input(format!("Invalid amount: {e}")))?; + + Ok(ValidatedRequest { + pubkey, + signature, + challenge, + amount, + }) +} + +/// Parse hex string to fixed-size byte array +pub fn parse_hex_bytes(value: &str) -> Result<[u8; N], String> { + let trimmed = value.strip_prefix("0x").unwrap_or(value); + let bytes = hex::decode(trimmed).map_err(|e| format!("invalid hex: {e}"))?; + if bytes.len() != N { + return Err(format!("expected {} bytes, got {}", N, bytes.len())); + } + let mut out = [0u8; N]; + out.copy_from_slice(&bytes); + Ok(out) +} + +/// Parse decimal string to U256 bytes +pub fn parse_amount(value: &str) -> Result<[u8; 32], String> { + let amount: U256 = value.parse().map_err(|_| "invalid decimal number")?; + Ok(amount.to_be_bytes()) +} + +/// Check if string is a valid decimal number +pub fn is_decimal(value: &str) -> bool { + !value.is_empty() && value.chars().all(|c| c.is_ascii_digit()) +} + +/// Generate cache key from request fields +pub fn cache_key(request: &ProveRequest) -> String { + format!( + "{}|{}|{}|{}", + request.ss58_address, request.evm_address, request.amount, request.challenge + ) +} + +/// Generate rate limit key from pubkey +pub fn rate_limit_key_pubkey(pubkey: &[u8; 32]) -> String { + format!("pubkey:{}", hex::encode(pubkey)) +} + +/// Generate rate limit key from IP address +pub fn rate_limit_key_ip(ip: &str) -> String { + format!("ip:{}", ip) +} + +/// Extract client IP from headers or socket address +/// +/// When `trust_proxy` is true: +/// Priority: X-Forwarded-For (first IP) → X-Real-IP → socket address (IP only) +/// When `trust_proxy` is false: +/// Only uses socket address (IP only) - ignores potentially spoofed headers +/// +/// The socket address port is always stripped to ensure consistent rate limiting +/// (ephemeral ports change per connection). +pub fn extract_client_ip( + headers: &HeaderMap, + socket_addr: Option<&str>, + trust_proxy: bool, +) -> String { + // Only check proxy headers if we trust them (i.e., we're behind a trusted reverse proxy) + if trust_proxy { + // Check X-Forwarded-For (first IP in chain is the original client) + if let Some(xff) = headers.get("x-forwarded-for") { + if let Ok(value) = xff.to_str() { + if let Some(first_ip) = value.split(',').next() { + let trimmed = first_ip.trim(); + if !trimmed.is_empty() { + return trimmed.to_string(); + } + } + } + } + + // Check X-Real-IP + if let Some(xri) = headers.get("x-real-ip") { + if let Ok(value) = xri.to_str() { + let trimmed = value.trim(); + if !trimmed.is_empty() { + return trimmed.to_string(); + } + } + } + } + + // Fallback to socket address, stripping the port + // Socket address format: "192.168.1.1:54321" or "[::1]:54321" for IPv6 + socket_addr + .map(|addr| strip_port(addr)) + .unwrap_or_else(|| "unknown".to_string()) +} + +/// Strip port from a socket address string +/// Handles both IPv4 ("192.168.1.1:8080") and IPv6 ("[::1]:8080") formats +fn strip_port(addr: &str) -> String { + // Check for IPv6 with brackets: [::1]:8080 + if addr.starts_with('[') { + // Find the closing bracket and return everything up to and including it + if let Some(bracket_end) = addr.find(']') { + return addr[..=bracket_end].to_string(); + } + } + + // For IPv4 or unbracketed addresses, split on last colon + // (last colon is the port separator for IPv4, but IPv6 without brackets is rare) + if let Some(last_colon) = addr.rfind(':') { + // Verify the part after colon looks like a port (all digits) + let after_colon = &addr[last_colon + 1..]; + if !after_colon.is_empty() && after_colon.chars().all(|c| c.is_ascii_digit()) { + return addr[..last_colon].to_string(); + } + } + + // No port found, return as-is + addr.to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + fn valid_request() -> ProveRequest { + ProveRequest { + ss58_address: "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY".to_string(), + signature: format!("0x{}", "ab".repeat(64)), + evm_address: "0x742d35Cc6634C0532925a3b844Bc9e7595f4a3b2".to_string(), + challenge: format!("0x{}", "12".repeat(32)), + amount: "1000000000000000000".to_string(), + } + } + + #[test] + fn test_validate_request_valid() { + let request = valid_request(); + let result = validate_request(&request); + assert!(result.is_ok()); + let validated = result.unwrap(); + assert_eq!(validated.signature.len(), 64); + assert_eq!(validated.challenge.len(), 32); + assert_eq!(validated.amount.len(), 32); + assert_eq!(validated.pubkey.len(), 32); + } + + #[test] + fn test_validate_request_empty_ss58() { + let mut request = valid_request(); + request.ss58_address = "".to_string(); + let result = validate_request(&request); + assert!(result.is_err()); + assert!(result.unwrap_err().message.contains("ss58Address")); + } + + #[test] + fn test_validate_request_empty_signature() { + let mut request = valid_request(); + request.signature = " ".to_string(); + let result = validate_request(&request); + assert!(result.is_err()); + assert!(result.unwrap_err().message.contains("signature")); + } + + #[test] + fn test_validate_request_invalid_ss58() { + let mut request = valid_request(); + request.ss58_address = "invalid_address".to_string(); + let result = validate_request(&request); + assert!(result.is_err()); + assert!(result.unwrap_err().message.contains("ss58Address")); + } + + #[test] + fn test_validate_request_wrong_signature_length() { + let mut request = valid_request(); + request.signature = "0x1234".to_string(); // Only 2 bytes + let result = validate_request(&request); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.message.contains("signature")); + assert!(err.message.contains("64 bytes")); + } + + #[test] + fn test_validate_request_wrong_evm_address_length() { + let mut request = valid_request(); + request.evm_address = "0x1234".to_string(); // Only 2 bytes + let result = validate_request(&request); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.message.contains("evmAddress")); + assert!(err.message.contains("20 bytes")); + } + + #[test] + fn test_validate_request_wrong_challenge_length() { + let mut request = valid_request(); + request.challenge = "0x1234".to_string(); // Only 2 bytes + let result = validate_request(&request); + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err.message.contains("challenge")); + assert!(err.message.contains("32 bytes")); + } + + #[test] + fn test_validate_request_hex_amount() { + let mut request = valid_request(); + request.amount = "0x1234".to_string(); // Hex, not decimal + let result = validate_request(&request); + assert!(result.is_err()); + assert!(result.unwrap_err().message.contains("amount")); + } + + #[test] + fn test_validate_request_negative_amount() { + let mut request = valid_request(); + request.amount = "-100".to_string(); + let result = validate_request(&request); + assert!(result.is_err()); + assert!(result.unwrap_err().message.contains("amount")); + } + + #[test] + fn test_validate_request_invalid_hex() { + let mut request = valid_request(); + request.signature = "0xGGGG".to_string(); // Invalid hex + let result = validate_request(&request); + assert!(result.is_err()); + assert!(result.unwrap_err().message.contains("signature")); + } + + #[test] + fn test_parse_hex_bytes_with_prefix() { + let result = parse_hex_bytes::<4>("0xdeadbeef"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), [0xde, 0xad, 0xbe, 0xef]); + } + + #[test] + fn test_parse_hex_bytes_without_prefix() { + let result = parse_hex_bytes::<4>("deadbeef"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), [0xde, 0xad, 0xbe, 0xef]); + } + + #[test] + fn test_parse_hex_bytes_wrong_length() { + let result = parse_hex_bytes::<4>("0x1234"); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("expected 4 bytes")); + } + + #[test] + fn test_is_decimal() { + assert!(is_decimal("123456789")); + assert!(is_decimal("0")); + assert!(is_decimal("1000000000000000000")); + assert!(!is_decimal("")); + assert!(!is_decimal("-100")); + assert!(!is_decimal("12.34")); + assert!(!is_decimal("0x1234")); + assert!(!is_decimal("abc")); + assert!(!is_decimal("123abc")); + } + + #[test] + fn test_parse_amount() { + let result = parse_amount("1000000000000000000"); + assert!(result.is_ok()); + let bytes = result.unwrap(); + // 1e18 = 0x0de0b6b3a7640000 + assert_eq!( + &bytes[24..], + &[0x0d, 0xe0, 0xb6, 0xb3, 0xa7, 0x64, 0x00, 0x00] + ); + } + + #[test] + fn test_cache_key() { + let request = valid_request(); + let key = cache_key(&request); + assert!(key.contains(&request.ss58_address)); + assert!(key.contains(&request.evm_address)); + assert!(key.contains(&request.amount)); + assert!(key.contains(&request.challenge)); + } + + #[test] + fn test_rate_limit_key_pubkey() { + let pubkey = [0xab; 32]; + let key = rate_limit_key_pubkey(&pubkey); + assert!(key.starts_with("pubkey:")); + assert!(key.contains(&hex::encode(&pubkey))); + } + + #[test] + fn test_rate_limit_key_ip() { + let key = rate_limit_key_ip("192.168.1.1"); + assert_eq!(key, "ip:192.168.1.1"); + } + + #[test] + fn test_extract_client_ip_xff_trusted() { + let mut headers = HeaderMap::new(); + headers.insert( + "x-forwarded-for", + "203.0.113.195, 70.41.3.18, 150.172.238.178" + .parse() + .unwrap(), + ); + // With trust_proxy=true, use X-Forwarded-For + let ip = extract_client_ip(&headers, Some("127.0.0.1:8080"), true); + assert_eq!(ip, "203.0.113.195"); + } + + #[test] + fn test_extract_client_ip_xff_untrusted() { + let mut headers = HeaderMap::new(); + headers.insert( + "x-forwarded-for", + "203.0.113.195, 70.41.3.18, 150.172.238.178" + .parse() + .unwrap(), + ); + // With trust_proxy=false, ignore X-Forwarded-For and use socket address + let ip = extract_client_ip(&headers, Some("127.0.0.1:8080"), false); + assert_eq!(ip, "127.0.0.1"); + } + + #[test] + fn test_extract_client_ip_xri_trusted() { + let mut headers = HeaderMap::new(); + headers.insert("x-real-ip", "203.0.113.195".parse().unwrap()); + let ip = extract_client_ip(&headers, Some("127.0.0.1:8080"), true); + assert_eq!(ip, "203.0.113.195"); + } + + #[test] + fn test_extract_client_ip_xri_untrusted() { + let mut headers = HeaderMap::new(); + headers.insert("x-real-ip", "203.0.113.195".parse().unwrap()); + // With trust_proxy=false, ignore X-Real-IP and use socket address + let ip = extract_client_ip(&headers, Some("127.0.0.1:8080"), false); + assert_eq!(ip, "127.0.0.1"); + } + + #[test] + fn test_extract_client_ip_socket_strips_port() { + let headers = HeaderMap::new(); + // Port should be stripped from socket address + let ip = extract_client_ip(&headers, Some("192.168.1.100:54321"), false); + assert_eq!(ip, "192.168.1.100"); + } + + #[test] + fn test_extract_client_ip_ipv6_strips_port() { + let headers = HeaderMap::new(); + // IPv6 with brackets should strip port correctly + let ip = extract_client_ip(&headers, Some("[::1]:8080"), false); + assert_eq!(ip, "[::1]"); + } + + #[test] + fn test_extract_client_ip_unknown() { + let headers = HeaderMap::new(); + let ip = extract_client_ip(&headers, None, false); + assert_eq!(ip, "unknown"); + } + + #[test] + fn test_strip_port_ipv4() { + assert_eq!(strip_port("192.168.1.1:8080"), "192.168.1.1"); + assert_eq!(strip_port("10.0.0.1:54321"), "10.0.0.1"); + } + + #[test] + fn test_strip_port_ipv6() { + assert_eq!(strip_port("[::1]:8080"), "[::1]"); + assert_eq!(strip_port("[2001:db8::1]:443"), "[2001:db8::1]"); + } + + #[test] + fn test_strip_port_no_port() { + assert_eq!(strip_port("192.168.1.1"), "192.168.1.1"); + assert_eq!(strip_port("[::1]"), "[::1]"); + } +}