diff --git a/Cargo.lock b/Cargo.lock index 25200f7..f17944a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,21 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "addr2line" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "aho-corasick" version = "1.1.3" @@ -90,6 +75,24 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "async-trait" version = "0.1.81" @@ -171,21 +174,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "backtrace" -version = "0.3.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - [[package]] name = "base64" version = "0.21.7" @@ -204,6 +192,19 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -233,20 +234,28 @@ dependencies = [ name = "blockfrost-gateway" version = "1.3.3" dependencies = [ + "anyhow", "axum", "base64 0.21.7", + "blake3", "blockfrost", + "bytes", "chrono", "clap", "colored", "deadpool-diesel", "diesel", "diesel_migrations", + "dirs", "dotenvy", "futures", "futures-util", + "getrandom 0.3.4", "hex", "hyper", + "kameo", + "machine-uid", + "nix", "rand", "reqwest", "rstest", @@ -255,6 +264,7 @@ dependencies = [ "thiserror 1.0.63", "tokio", "tokio-tungstenite", + "tokio-util", "toml 0.9.5", "tracing", "tracing-subscriber", @@ -294,9 +304,13 @@ checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.1.10" +version = "1.2.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" +checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" +dependencies = [ + "find-msvc-tools", + "shlex", +] [[package]] name = "cfg-if" @@ -304,6 +318,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -375,6 +395,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + [[package]] name = "core-foundation" version = "0.9.4" @@ -558,12 +584,39 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "dirs" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.61.2", +] + [[package]] name = "dotenvy" version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast-rs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" + [[package]] name = "dsl_auto_type" version = "0.1.2" @@ -578,6 +631,12 @@ dependencies = [ "syn", ] +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "either" version = "1.13.0" @@ -615,6 +674,12 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + [[package]] name = "fnv" version = "1.0.7" @@ -757,15 +822,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.29.0" +name = "getrandom" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] [[package]] name = "glob" @@ -904,6 +977,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", + "webpki-roots 0.26.11", ] [[package]] @@ -935,7 +1009,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -1029,6 +1103,33 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "kameo" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c4af7638c67029fd6821d02813c3913c803784648725d4df4082c9b91d7cbb1" +dependencies = [ + "downcast-rs", + "dyn-clone", + "futures", + "kameo_macros", + "serde", + "tokio", + "tracing", +] + +[[package]] +name = "kameo_macros" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13c324e2d8c8e126e63e66087448b4267e263e6cb8770c56d10a9d0d279d9e2" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1037,9 +1138,19 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags", + "libc", +] [[package]] name = "linux-raw-sys" @@ -1063,6 +1174,17 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "machine-uid" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d7217d573cdb141d6da43113b098172e057d39915d79c4bdedbc3aacd46bd96" +dependencies = [ + "libc", + "windows-registry 0.6.1", + "windows-sys 0.61.2", +] + [[package]] name = "matchers" version = "0.1.0" @@ -1121,15 +1243,6 @@ dependencies = [ "unicase", ] -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "mio" version = "1.0.1" @@ -1159,6 +1272,18 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1194,15 +1319,6 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.36.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" version = "1.19.0" @@ -1253,6 +1369,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "overload" version = "0.1.1" @@ -1348,6 +1470,58 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "quinn" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +dependencies = [ + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.5.7", + "thiserror 2.0.11", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +dependencies = [ + "bytes", + "getrandom 0.2.15", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.11", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.1", + "tracing", + "windows-sys 0.60.2", +] + [[package]] name = "quote" version = "1.0.40" @@ -1357,6 +1531,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -1384,7 +1564,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -1396,6 +1576,17 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" +dependencies = [ + "getrandom 0.2.15", + "libredox", + "thiserror 2.0.11", +] + [[package]] name = "regex" version = "1.10.6" @@ -1455,6 +1646,7 @@ dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", + "futures-channel", "futures-core", "futures-util", "h2", @@ -1474,7 +1666,10 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", @@ -1482,13 +1677,15 @@ dependencies = [ "system-configuration", "tokio", "tokio-native-tls", + "tokio-rustls", "tower", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", + "webpki-roots 0.26.11", + "windows-registry 0.2.0", ] [[package]] @@ -1499,7 +1696,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -1536,10 +1733,10 @@ dependencies = [ ] [[package]] -name = "rustc-demangle" -version = "0.1.24" +name = "rustc-hash" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -1570,6 +1767,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "once_cell", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -1591,6 +1789,9 @@ name = "rustls-pki-types" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -1781,6 +1982,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -1815,6 +2022,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "spin" version = "0.9.8" @@ -1985,27 +2202,27 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", - "windows-sys 0.52.0", + "tracing", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", @@ -2323,7 +2540,7 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom", + "getrandom 0.2.15", "serde", ] @@ -2360,6 +2577,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.92" @@ -2436,6 +2662,34 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.3", +] + +[[package]] +name = "webpki-roots" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -2467,17 +2721,34 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + [[package]] name = "windows-registry" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ - "windows-result", - "windows-strings", + "windows-result 0.2.0", + "windows-strings 0.1.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result 0.4.1", + "windows-strings 0.5.1", +] + [[package]] name = "windows-result" version = "0.2.0" @@ -2487,16 +2758,34 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -2524,6 +2813,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -2548,13 +2855,30 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -2567,6 +2891,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -2579,6 +2909,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -2591,12 +2927,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -2609,6 +2957,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -2621,6 +2975,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -2633,6 +2993,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -2645,6 +3011,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + [[package]] name = "winnow" version = "0.7.12" @@ -2654,6 +3026,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + [[package]] name = "zerocopy" version = "0.7.35" diff --git a/Cargo.toml b/Cargo.toml index 600090f..55806ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,11 +6,19 @@ publish = false edition = "2021" build = "build.rs" +[features] +default = [] +dev_mock_db = [] + [dependencies] +anyhow = "1.0.98" axum = { version = "0.7.5", features = ["ws"] } tokio = { version = "1.39.2", features = ["full"] } +tokio-util = "0.7" futures = "0.3" futures-util = "0.3" +kameo = "0.19" +bytes = "1" tokio-tungstenite = "0.24" tungstenite = "0.24" tracing = "0.1.40" @@ -22,6 +30,7 @@ serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.122" colored = "2.1.0" clap = { version = "4.5.14", features = ["derive"] } +dirs = "6.0.0" toml = "0.9.5" thiserror = "1.0.63" chrono = { version = "0.4.3", features = ["serde"] } @@ -33,6 +42,12 @@ rand = "0.8.5" base64 = "0.21" uuid = "1.10" hyper = "1.4.1" +machine-uid = "0.5" +blake3 = "1" +getrandom = "0.3" + +[target.'cfg(unix)'.dependencies] +nix = { version = "0.30", default-features = false, features = ["signal"] } [lib] name = "blockfrost_gateway" @@ -40,3 +55,11 @@ path = "src/lib.rs" [dev-dependencies] rstest = "0.26.1" + +[build-dependencies] +serde = { version = "1", features = ["derive"] } +serde_json = "1" +reqwest = { version = "0.12", default-features = false, features = [ + "blocking", + "rustls-tls", +] } diff --git a/build.rs b/build.rs index e57b485..0e744c9 100644 --- a/build.rs +++ b/build.rs @@ -1,5 +1,6 @@ fn main() { git_revision::set(); + hydra_scripts_id::set(); } mod git_revision { @@ -27,9 +28,176 @@ mod git_revision { .args(["rev-parse", "HEAD"]) .output() .expect("git-rev-parse"); - String::from_utf8_lossy(&git_rev_parse.stdout).trim().to_string() + String::from_utf8_lossy(&git_rev_parse.stdout) + .trim() + .to_string() }; println!("cargo:rustc-env={}={}", GIT_REVISION, revision); } } + +mod hydra_scripts_id { + use std::{ + collections::HashMap, + env, fs, + path::{Path, PathBuf}, + time::Duration, + }; + + pub fn set() { + println!("cargo:rerun-if-env-changed=HYDRA_SCRIPTS_TX_ID_MAINNET"); + println!("cargo:rerun-if-env-changed=HYDRA_SCRIPTS_TX_ID_PREPROD"); + println!("cargo:rerun-if-env-changed=HYDRA_SCRIPTS_TX_ID_PREVIEW"); + + // If user already provided the values at build time, honor them and avoid network. + if let (Ok(m), Ok(p), Ok(v)) = ( + env::var("HYDRA_SCRIPTS_TX_ID_MAINNET"), + env::var("HYDRA_SCRIPTS_TX_ID_PREPROD"), + env::var("HYDRA_SCRIPTS_TX_ID_PREVIEW"), + ) { + set_envs(&m, &p, &v, None, None); + return; + } + + let manifest_dir = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap()); + let flake_lock = find_upwards(&manifest_dir, "flake.lock").unwrap_or_else(|| { + panic!( + "Could not find flake.lock by walking up from {}", + manifest_dir.display() + ) + }); + + println!("cargo:rerun-if-changed={}", flake_lock.display()); + + let flake_lock_json = fs::read_to_string(&flake_lock) + .unwrap_or_else(|e| panic!("Failed to read {}: {e}", flake_lock.display())); + + let (rev, href) = read_hydra_rev_and_ref(&flake_lock_json); + + let url = format!( + "https://raw.githubusercontent.com/cardano-scaling/hydra/{}/hydra-node/networks.json", + rev + ); + + let networks_json = fetch_cached(&url, &rev); + let networks_map: HashMap> = + serde_json::from_str(&networks_json).unwrap_or_else(|e| { + panic!("Failed to parse networks.json downloaded from {url}: {e}") + }); + + let mainnet = lookup(&networks_map, "mainnet", &href); + let preprod = lookup(&networks_map, "preprod", &href); + let preview = lookup(&networks_map, "preview", &href); + + set_envs(&mainnet, &preprod, &preview, Some(&rev), Some(&href)); + } + + fn set_envs( + mainnet: &str, + preprod: &str, + preview: &str, + rev: Option<&str>, + href: Option<&str>, + ) { + // These are what your env!("...") will see. + println!("cargo:rustc-env=HYDRA_SCRIPTS_TX_ID_MAINNET={mainnet}"); + println!("cargo:rustc-env=HYDRA_SCRIPTS_TX_ID_PREPROD={preprod}"); + println!("cargo:rustc-env=HYDRA_SCRIPTS_TX_ID_PREVIEW={preview}"); + + // Extra metadata (optional, but often handy) + if let Some(rev) = rev { + println!("cargo:rustc-env=HYDRA_INPUT_REV={rev}"); + } + if let Some(href) = href { + println!("cargo:rustc-env=HYDRA_INPUT_REF={href}"); + } + } + + fn read_hydra_rev_and_ref(flake_lock_json: &str) -> (String, String) { + let v: serde_json::Value = serde_json::from_str(flake_lock_json) + .unwrap_or_else(|e| panic!("Failed to parse flake.lock JSON: {e}")); + + let rev = v + .pointer("/nodes/hydra/locked/rev") + .and_then(|x| x.as_str()) + .unwrap_or_else(|| panic!("flake.lock missing /nodes/hydra/locked/rev")) + .to_string(); + + let href = v + .pointer("/nodes/hydra/original/ref") + .and_then(|x| x.as_str()) + .unwrap_or_else(|| panic!("flake.lock missing /nodes/hydra/original/ref")) + .to_string(); + + (rev, href) + } + + fn lookup( + networks: &HashMap>, + network: &str, + href: &str, + ) -> String { + networks + .get(network) + .unwrap_or_else(|| panic!("networks.json missing top-level key {network:?}")) + .get(href) + .cloned() + .unwrap_or_else(|| { + let mut versions: Vec<_> = networks[network].keys().cloned().collect(); + versions.sort(); + panic!( + "networks.json has no entry for network {network:?} version/ref {href:?}. \ +Available versions: {}", + versions.join(", ") + ) + }) + } + + fn fetch_cached(url: &str, rev: &str) -> String { + let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); + let cache_path = out_dir.join(format!("hydra-networks.{rev}.json")); + + // If it's already in OUT_DIR, reuse it (build scripts can run a lot). + if let Ok(s) = fs::read_to_string(&cache_path) { + return s; + } + + let client = reqwest::blocking::Client::builder() + .timeout(Duration::from_secs(20)) + .user_agent("cargo-build-script (hydra networks.json fetch)") + .build() + .expect("Failed to build reqwest client"); + + let resp = client + .get(url) + .send() + .unwrap_or_else(|e| panic!("Failed to GET {url}: {e}")); + + if !resp.status().is_success() { + panic!("GET {url} failed with status {}", resp.status()); + } + + let text = resp + .text() + .unwrap_or_else(|e| panic!("Failed to read response body from {url}: {e}")); + + // Best-effort cache; ignore failures. + let _ = fs::write(&cache_path, &text); + + text + } + + fn find_upwards(start: &Path, file_name: &str) -> Option { + let mut dir = Some(start); + + while let Some(d) = dir { + let candidate = d.join(file_name); + if candidate.is_file() { + return Some(candidate); + } + dir = d.parent(); + } + None + } +} diff --git a/config/development.toml b/config/development.toml index 3a1e04c..b4293fb 100644 --- a/config/development.toml +++ b/config/development.toml @@ -7,5 +7,14 @@ url = 'https://api.domain.com' connection_string = 'postgresql://user:pass@host:port/db' [blockfrost] -project_id = 'BLOCKFROST_PROJECT_ID' +project_id = 'preview_BLOCKFROST_PROJECT_ID' nft_asset = '4213fc3eac8c781ac85514dd1de9aaabcd5a3a81cc2df4f413b9b295' + +[hydra] +max_concurrent_hydra_nodes = 2 +cardano_signing_key = "/home/mw/.config/blockfrost-platform/hydra/tmp_their_keys/payment.sk" +node_socket_path = "/home/mw/.local/share/blockfrost-platform/preview/node.socket" +commit_ada = 3.0 +lovelace_per_request = 100_000 +requests_per_microtransaction = 10 +microtransactions_per_fanout = 2 diff --git a/deny.toml b/deny.toml index 0f0ba10..cec73ea 100644 --- a/deny.toml +++ b/deny.toml @@ -1,5 +1,22 @@ [licenses] # See for list of possible licenses -allow = ["Apache-2.0", "BSD-3-Clause", "MIT", "MPL-2.0", "Zlib", "Unicode-3.0"] +allow = [ + "Apache-2.0", + "BSD-2-Clause", + "BSD-3-Clause", + "MIT", + "MPL-2.0", + "Zlib", + "Unicode-3.0", + "ISC", + "CDLA-Permissive-2.0", + "OpenSSL", +] private = { ignore = true } confidence-threshold = 0.8 + +[[licenses.clarify]] +name = "ring" +version = "0.17.8" +expression = "Apache-2.0 AND ISC AND MIT AND OpenSSL" +license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] diff --git a/flake.lock b/flake.lock index bfa0584..6db9b2e 100644 --- a/flake.lock +++ b/flake.lock @@ -16,13 +16,30 @@ "type": "github" } }, + "cardano-node": { + "flake": false, + "locked": { + "lastModified": 1763736877, + "narHash": "sha256-c1a6DzDlm+wzwa85TWeOFrPEldsfjiZw7+DcMMW9nc4=", + "owner": "IntersectMBO", + "repo": "cardano-node", + "rev": "6c034ec038d8d276a3595e10e2d38643f09bd1f2", + "type": "github" + }, + "original": { + "owner": "IntersectMBO", + "ref": "10.5.3", + "repo": "cardano-node", + "type": "github" + } + }, "crane": { "locked": { - "lastModified": 1727316705, - "narHash": "sha256-/mumx8AQ5xFuCJqxCIOFCHTVlxHkMT21idpbgbm/TIE=", + "lastModified": 1765145449, + "narHash": "sha256-aBVHGWWRzSpfL++LubA0CwOOQ64WNLegrYHwsVuVN7A=", "owner": "ipetkov", "repo": "crane", - "rev": "5b03654ce046b5167e7b0bccbd8244cb56c16f0e", + "rev": "69f538cdce5955fcd47abfed4395dc6d5194c1c5", "type": "github" }, "original": { @@ -59,11 +76,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737527504, - "narHash": "sha256-Z8S5gLPdIYeKwBXDaSxlJ72ZmiilYhu3418h3RSQZA0=", + "lastModified": 1765435813, + "narHash": "sha256-C6tT7K1Lx6VsYw1BY5S3OavtapUvEnDQtmQB5DSgbCc=", "owner": "nix-community", "repo": "fenix", - "rev": "aa13f23e3e91b95377a693ac655bbc6545ebec0d", + "rev": "6399553b7a300c77e7f07342904eb696a5b6bf9d", "type": "github" }, "original": { @@ -72,6 +89,22 @@ "type": "github" } }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1761588595, + "narHash": "sha256-XKUZz9zewJNUj46b4AJdiRZJAvSZ0Dqj2BNfXvFlJC4=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "f387cd2afec9419c8ee37694406ca490c3f34ee5", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-parts": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" @@ -90,18 +123,35 @@ "type": "github" } }, + "hydra": { + "flake": false, + "locked": { + "lastModified": 1759947091, + "narHash": "sha256-V9VBA5cFLcZ/M8g12Bzye5tVSVW3uoUIaRm+Ws0mFbo=", + "owner": "cardano-scaling", + "repo": "hydra", + "rev": "b5e33b55e9fba442c562f82cec6c36b1716d9847", + "type": "github" + }, + "original": { + "owner": "cardano-scaling", + "ref": "1.0.0", + "repo": "hydra", + "type": "github" + } + }, "nixpkgs": { "locked": { - "lastModified": 1731890469, - "narHash": "sha256-D1FNZ70NmQEwNxpSSdTXCSklBH1z2isPR84J6DQrJGs=", + "lastModified": 1765363881, + "narHash": "sha256-3C3xWn8/2Zzr7sxVBmpc1H1QfxjNfta5IMFe3O9ZEPw=", "owner": "nixos", "repo": "nixpkgs", - "rev": "5083ec887760adfe12af64830a66807423a859a7", + "rev": "d2b1213bf5ec5e62d96b003ab4b5cbc42abfc0d0", "type": "github" }, "original": { "owner": "nixos", - "ref": "nixpkgs-unstable", + "ref": "nixos-25.05", "repo": "nixpkgs", "type": "github" } @@ -121,10 +171,13 @@ "root": { "inputs": { "advisory-db": "advisory-db", + "cardano-node": "cardano-node", "crane": "crane", "devshell": "devshell", "fenix": "fenix", + "flake-compat": "flake-compat", "flake-parts": "flake-parts", + "hydra": "hydra", "nixpkgs": "nixpkgs", "treefmt-nix": "treefmt-nix" } @@ -132,11 +185,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737453499, - "narHash": "sha256-fa5AJI9mjFU2oVXqdCq2oA2pripAXbHzkUkewJRQpxA=", + "lastModified": 1765400135, + "narHash": "sha256-D3+4hfNwUhG0fdCpDhOASLwEQ1jKuHi4mV72up4kLQM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "0b68402d781955d526b80e5d479e9e47addb4075", + "rev": "fface27171988b3d605ef45cf986c25533116f7e", "type": "github" }, "original": { @@ -153,11 +206,11 @@ ] }, "locked": { - "lastModified": 1727431250, - "narHash": "sha256-uGRlRT47ecicF9iLD1G3g43jn2e+b5KaMptb59LHnvM=", + "lastModified": 1762938485, + "narHash": "sha256-AlEObg0syDl+Spi4LsZIBrjw+snSVU4T8MOeuZJUJjM=", "owner": "numtide", "repo": "treefmt-nix", - "rev": "879b29ae9a0378904fbbefe0dadaed43c8905754", + "rev": "5b4ee75aeefd1e2d5a1cc43cf6ba65eba75e83e4", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 041508b..b95c598 100644 --- a/flake.nix +++ b/flake.nix @@ -1,16 +1,37 @@ { inputs = { - nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-unstable"; + nixpkgs.url = "github:nixos/nixpkgs/nixos-25.05"; flake-parts.url = "github:hercules-ci/flake-parts"; - treefmt-nix.url = "github:numtide/treefmt-nix"; - treefmt-nix.inputs.nixpkgs.follows = "nixpkgs"; + flake-compat = { + url = "github:edolstra/flake-compat"; + flake = false; + }; + treefmt-nix = { + url = "github:numtide/treefmt-nix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; crane.url = "github:ipetkov/crane"; - fenix.url = "github:nix-community/fenix"; - fenix.inputs.nixpkgs.follows = "nixpkgs"; - devshell.url = "github:numtide/devshell"; - devshell.inputs.nixpkgs.follows = "nixpkgs"; - advisory-db.url = "github:rustsec/advisory-db"; - advisory-db.flake = false; + fenix = { + url = "github:nix-community/fenix"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + cardano-node = { + # Temporarily for `cardano-cli`, for `hydra-node`: + url = "github:IntersectMBO/cardano-node/10.5.3"; + flake = false; # otherwise, +2k dependencies we don’t really use + }; + hydra = { + url = "github:cardano-scaling/hydra/1.0.0"; + flake = false; + }; + devshell = { + url = "github:numtide/devshell"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + advisory-db = { + url = "github:rustsec/advisory-db"; + flake = false; + }; }; outputs = inputs: let @@ -43,7 +64,7 @@ alejandra.enable = true; # Nix prettier.enable = true; rustfmt.enable = true; - rustfmt.package = internal.rustfmt; + rustfmt.package = internal.rustPackages.rustfmt; yamlfmt.enable = pkgs.system != "x86_64-darwin"; # a treefmt-nix+yamlfmt bug on Intel Macs taplo.enable = true; # TOML shfmt.enable = true; diff --git a/nix/devshells.nix b/nix/devshells.nix index 2dd6a93..0c6133b 100644 --- a/nix/devshells.nix +++ b/nix/devshells.nix @@ -14,7 +14,10 @@ in { ]; devshell.packages = - [pkgs.unixtools.xxd] + [ + pkgs.unixtools.xxd + internal.rustPackages.clippy + ] ++ lib.optionals pkgs.stdenv.isLinux [ pkgs.pkg-config ] @@ -24,10 +27,19 @@ in { commands = [ {package = inputs.self.formatter.${pkgs.system};} - {package = config.language.rust.packageSet.cargo;} + { + name = "cargo"; + package = internal.rustPackages.cargo; + } {package = pkgs.cargo-nextest;} - {package = pkgs.rust-analyzer;} + # TODO: add .envrc.local with node env. exports + { + name = "cardano-cli"; + package = internal.cardano-cli; + } + {package = internal.rustPackages.rust-analyzer;} {package = pkgs.doctl;} + {package = internal.hydra-node;} ]; language.c = { @@ -36,13 +48,34 @@ in { then pkgs.gcc else pkgs.clang; includes = internal.commonArgs.buildInputs; + libraries = internal.commonArgs.buildInputs; + }; + + language.rust = { + packageSet = internal.rustPackages; + tools = ["cargo" "rustfmt"]; # The rest is provided below. + enableDefaultToolchain = true; }; - language.rust.packageSet = - pkgs.rustPackages - // { - inherit (internal) rustfmt; - }; + env = + internal.hydraScriptsEnvVars + ++ lib.optionals pkgs.stdenv.isDarwin [ + { + name = "LIBCLANG_PATH"; + value = internal.commonArgs.LIBCLANG_PATH; + } + ] + ++ lib.optionals pkgs.stdenv.isLinux [ + # Embed runtime libs in `RPATH`: + { + name = "RUSTFLAGS"; + eval = ''"-Clink-arg=-fuse-ld=bfd -Clink-arg=-Wl,-rpath,$(pkg-config --variable=libdir openssl libpq | tr ' ' :)"''; + } + { + name = "LD_LIBRARY_PATH"; + eval = lib.mkForce ""; + } + ]; devshell.motd = '' diff --git a/nix/internal/unix.nix b/nix/internal/unix.nix index ce55551..1660779 100644 --- a/nix/internal/unix.nix +++ b/nix/internal/unix.nix @@ -16,7 +16,9 @@ assert builtins.elem targetSystem ["x86_64-linux" "aarch64-linux" "aarch64-darwi ) {inherit inputs targetSystem unix;}; in extendForTarget rec { - craneLib = inputs.crane.mkLib pkgs; + rustPackages = inputs.fenix.packages.${pkgs.system}.stable; + + craneLib = (inputs.crane.mkLib pkgs).overrideToolchain rustPackages.toolchain; src = lib.cleanSourceWith { src = lib.cleanSource ../../.; @@ -27,24 +29,33 @@ in name = "source"; }; - commonArgs = { - inherit src; - strictDeps = true; - nativeBuildInputs = lib.optionals pkgs.stdenv.isLinux [ - pkgs.pkg-config - ]; - buildInputs = - [pkgs.postgresql] - ++ lib.optionals pkgs.stdenv.isLinux [ - pkgs.openssl - ] - ++ lib.optionals pkgs.stdenv.isDarwin [ - pkgs.libiconv - pkgs.darwin.apple_sdk_12_3.frameworks.SystemConfiguration - pkgs.darwin.apple_sdk_12_3.frameworks.Security - pkgs.darwin.apple_sdk_12_3.frameworks.CoreFoundation + commonArgs = + { + inherit src; + strictDeps = true; + nativeBuildInputs = lib.optionals pkgs.stdenv.isLinux [ + pkgs.pkg-config ]; - }; + buildInputs = + [pkgs.postgresql] + ++ lib.optionals pkgs.stdenv.isLinux [ + pkgs.openssl + ] + ++ lib.optionals pkgs.stdenv.isDarwin [ + pkgs.libiconv + pkgs.darwin.apple_sdk_12_3.frameworks.SystemConfiguration + pkgs.darwin.apple_sdk_12_3.frameworks.Security + pkgs.darwin.apple_sdk_12_3.frameworks.CoreFoundation + ]; + } + // lib.optionalAttrs pkgs.stdenv.isDarwin { + # for bindgen, used by libproc, used by metrics_process + LIBCLANG_PATH = "${lib.getLib pkgs.llvmPackages.libclang}/lib"; + } + // lib.optionalAttrs pkgs.stdenv.isLinux { + # The linker bundled with Fenix has wrong interpreter path, and it fails with ENOENT, so: + RUSTFLAGS = "-Clink-arg=-fuse-ld=bfd"; + }; # For better caching: cargoArtifacts = craneLib.buildDepsOnly commonArgs; @@ -58,10 +69,14 @@ in inherit cargoArtifacts GIT_REVISION; doCheck = false; # we run tests with `cargo-nextest` below meta.mainProgram = packageName; - }); - - # We use a newer `rustfmt`: - inherit (inputs.fenix.packages.${pkgs.system}.stable) rustfmt; + postInstall = '' + mv $out/bin $out/libexec + mkdir -p $out/bin + ( cd $out/bin && ln -s ../libexec/${packageName} ./ ; ) + ln -s ${hydra-node}/bin/hydra-node $out/libexec/ + ''; + } + // (builtins.listToAttrs hydraScriptsEnvVars)); cargoChecks = { cargo-clippy = craneLib.cargoClippy (commonArgs @@ -69,13 +84,15 @@ in inherit cargoArtifacts GIT_REVISION; # Maybe also add `--deny clippy::pedantic`? cargoClippyExtraArgs = "--all-targets --all-features -- --deny warnings"; - }); + } + // (builtins.listToAttrs hydraScriptsEnvVars)); cargo-doc = craneLib.cargoDoc (commonArgs // { inherit cargoArtifacts GIT_REVISION; RUSTDOCFLAGS = "-D warnings"; - }); + } + // (builtins.listToAttrs hydraScriptsEnvVars)); cargo-audit = craneLib.cargoAudit { inherit src; @@ -89,7 +106,8 @@ in cargo-test = craneLib.cargoNextest (commonArgs // { inherit cargoArtifacts GIT_REVISION; - }); + } + // (builtins.listToAttrs hydraScriptsEnvVars)); }; nixChecks = { @@ -143,4 +161,54 @@ in exit $ec ''; }; + + hydra-flake = (import inputs.flake-compat {src = inputs.hydra;}).defaultNix; + + hydraVersion = hydra-flake.legacyPackages.${targetSystem}.hydra-node.identifier.version; + + hydraNetworksJson = builtins.path { + path = hydra-flake + "/hydra-node/networks.json"; + }; + + hydraScriptsEnvVars = map (network: { + name = "HYDRA_SCRIPTS_TX_ID_${lib.strings.toUpper network}"; + value = (builtins.fromJSON (builtins.readFile hydraNetworksJson)).${network}.${hydraVersion}; + }) ["mainnet" "preprod" "preview"]; + + hydra-node = lib.recursiveUpdate hydra-flake.packages.${targetSystem}.hydra-node { + meta.description = "Layer 2 scalability solution for Cardano"; + }; + + cardano-node-flake = let + unpatched = inputs.cardano-node; + in + (import inputs.flake-compat { + src = + if targetSystem != "aarch64-darwin" && targetSystem != "aarch64-linux" + then unpatched + else { + outPath = toString (pkgs.runCommand "source" {} '' + cp -r ${unpatched} $out + chmod -R +w $out + cd $out + echo ${lib.escapeShellArg (builtins.toJSON [targetSystem])} >$out/nix/supported-systems.nix + ${lib.optionalString (targetSystem == "aarch64-linux") '' + sed -r 's/"-fexternal-interpreter"//g' -i $out/nix/haskell.nix + ''} + ''); + inherit (unpatched) rev shortRev lastModified lastModifiedDate; + }; + }) + .defaultNix; + + cardano-node-packages = + { + x86_64-linux = cardano-node-flake.hydraJobs.x86_64-linux.musl; + inherit (cardano-node-flake.packages) x86_64-darwin aarch64-darwin aarch64-linux; + } + .${ + targetSystem + }; + + inherit (cardano-node-packages) cardano-cli; } diff --git a/rustfmt.toml b/rustfmt.toml index 379f405..33912ff 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,17 +1,14 @@ -# General hard_tabs = false tab_spaces = 4 -newline_style = "Unix" -max_width = 120 +max_width = 100 -# Control Flow fn_call_width = 60 + match_block_trailing_comma = true -# Ordering reorder_imports = true reorder_modules = true -# Misc use_try_shorthand = true use_field_init_shorthand = true +style_edition = "2024" diff --git a/src/api/register.rs b/src/api/register.rs index f65f301..b4969b2 100644 --- a/src/api/register.rs +++ b/src/api/register.rs @@ -59,7 +59,7 @@ pub async fn route( let is_testnet_address = payload.reward_address.starts_with("addr_test"); - if config.server.is_testnet { + if config.server.network.is_testnet() { if !is_testnet_address { return Err(APIError::Validation( "Network and address mismatch: mainnet address provided on testnet".to_string(), @@ -92,7 +92,12 @@ pub async fn route( .and_then(|val| val.to_str().ok()) { // multiple ips are provided, take the first. - ip_header_value.split(',').next().unwrap_or("").trim().to_string() + ip_header_value + .split(',') + .next() + .unwrap_or("") + .trim() + .to_string() } else { // fallback to the IP from the connection info (useful for localhost testing) addr.ip().to_string() @@ -160,7 +165,11 @@ pub async fn route( }; let token = load_balancer - .new_access_token(asset.asset_name, payload.api_prefix) + .new_access_token( + asset.asset_name, + payload.api_prefix, + &payload.reward_address, + ) .await; let success_response = ResponseSuccess { diff --git a/src/blockfrost.rs b/src/blockfrost.rs index 0073be5..b3b1d8b 100644 --- a/src/blockfrost.rs +++ b/src/blockfrost.rs @@ -1,6 +1,6 @@ use crate::errors::APIError; +use crate::types::AssetName; use blockfrost::{BlockFrostSettings, BlockfrostAPI as bf_sdk}; -use serde::{Deserialize, Serialize}; #[derive(Clone)] pub struct BlockfrostAPI { @@ -8,14 +8,6 @@ pub struct BlockfrostAPI { policy_id_size: usize, } -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub struct AssetName(pub String); -impl AssetName { - pub fn as_str(&self) -> &str { - &self.0 - } -} - pub struct Asset { pub asset_name: AssetName, } @@ -39,8 +31,8 @@ impl BlockfrostAPI { } let asset_hex = &unit[self.policy_id_size..]; - let decoded = - hex::decode(asset_hex).map_err(|err| APIError::License(format!("Hex decoding failed: {}", err)))?; + let decoded = hex::decode(asset_hex) + .map_err(|err| APIError::License(format!("Hex decoding failed: {}", err)))?; let asset_name = AssetName(String::from_utf8_lossy(&decoded).to_string()); @@ -49,17 +41,27 @@ impl BlockfrostAPI { // Check if NFT exists at the address pub async fn nft_exists(&self, address: &str, asset: &str) -> Result { + if cfg!(feature = "dev_mock_db") { + return Ok(Asset { + asset_name: AssetName("IcebreakerX".to_string()), + }); + } + let bf_result = self .api .addresses(address) .await .map_err(|err| APIError::License(err.to_string()))?; - let found_asset = bf_result.amount.iter().filter(|x| x.unit != "lovelace").find(|x| { - x.unit.len() >= self.policy_id_size - && &x.unit[..self.policy_id_size] == asset - && x.quantity.parse::().unwrap_or(0) > 0 - }); + let found_asset = bf_result + .amount + .iter() + .filter(|x| x.unit != "lovelace") + .find(|x| { + x.unit.len() >= self.policy_id_size + && &x.unit[..self.policy_id_size] == asset + && x.quantity.parse::().unwrap_or(0) > 0 + }); let found_asset_unit = match found_asset { Some(a) => &a.unit, diff --git a/src/config.rs b/src/config.rs index 52418ee..54269fa 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,3 +1,5 @@ +use crate::types::Network; +use anyhow::{Result, anyhow}; use clap::Parser; use serde::{Deserialize, Deserializer}; use std::env::var; @@ -49,7 +51,7 @@ pub struct Server { pub address: String, #[serde(deserialize_with = "deserialize_log_level")] pub log_level: Level, - pub is_testnet: bool, + pub network: Network, pub url: Option, } @@ -63,6 +65,7 @@ pub struct ConfigInput { pub server: ServerInput, pub database: DbInput, pub blockfrost: BlockfrostInput, + pub hydra: Option, } #[derive(Debug, Deserialize, Clone)] @@ -70,6 +73,7 @@ pub struct Config { pub server: Server, pub database: Db, pub blockfrost: Blockfrost, + pub hydra: Option, } #[derive(Debug, Deserialize, Clone)] @@ -78,9 +82,25 @@ pub struct Blockfrost { pub nft_asset: String, } +#[derive(Debug, Deserialize, Clone)] +pub struct HydraConfig { + pub cardano_signing_key: PathBuf, + pub max_concurrent_hydra_nodes: u64, + pub node_socket_path: PathBuf, + /// How much to commit from [`Self::cardano_signing_key`] when starting a new L2 session. + pub commit_ada: f64, + /// How much is a single request worth? + pub lovelace_per_request: u64, + /// How many requests to bundle for a single microtransaction payment on L2. + pub requests_per_microtransaction: u64, + /// How many L2 microtransactions until we flush to L1. + pub microtransactions_per_fanout: u64, +} + pub fn load_config(path: PathBuf) -> Config { let config_file_content = fs::read_to_string(path).expect("Reading config failed"); - let toml_config: ConfigInput = toml::from_str(&config_file_content).expect("Config file is invalid"); + let toml_config: ConfigInput = + toml::from_str(&config_file_content).expect("Config file is invalid"); let log_level = match toml_config.server.log_level.to_lowercase().as_str() { "debug" => Level::DEBUG, @@ -105,16 +125,19 @@ pub fn load_config(path: PathBuf) -> Config { Some(file_path) => read_to_string(file_path) .expect("Failed to read project ID file") .to_string(), - None => toml_config.blockfrost.project_id.expect("project_id must be provided"), + None => toml_config + .blockfrost + .project_id + .expect("project_id must be provided"), }; - let is_testnet = project_id.contains("preview"); + let network = network_from_project_id(&project_id).unwrap(); let config = Config { server: Server { address: toml_config.server.address, log_level, - is_testnet, + network, url: toml_config.server.url, }, database: Db { connection_string }, @@ -122,19 +145,35 @@ pub fn load_config(path: PathBuf) -> Config { project_id, nft_asset: toml_config.blockfrost.nft_asset, }, + hydra: toml_config.hydra, }; override_with_env(config) } +fn network_from_project_id(project_id: &str) -> Result { + if project_id.starts_with("mainnet") { + Ok(Network::Mainnet) + } else if project_id.starts_with("preprod") { + Ok(Network::Preprod) + } else if project_id.starts_with("preview") { + Ok(Network::Preview) + } else { + Err(anyhow!( + "cannot infer Cardano network from the Blockfrost project id" + )) + } +} + fn override_with_env(config: Config) -> Config { let server_url = var("SERVER_URL").ok().or(config.server.url.clone()); let server_address = var("SERVER_ADDRESS").unwrap_or(config.server.address); - let log_level_str = var("SERVER_LOG_LEVEL").unwrap_or_else(|_| config.server.log_level.to_string()); + let log_level_str = + var("SERVER_LOG_LEVEL").unwrap_or_else(|_| config.server.log_level.to_string()); let db_connection = var("DB_CONNECTION_STRING").unwrap_or(config.database.connection_string); let project_id = var("BLOCKFROST_PROJECT_ID").unwrap_or(config.blockfrost.project_id); let nft_asset = var("BLOCKFROST_NFT_ASSET").unwrap_or(config.blockfrost.nft_asset); - let is_testnet = project_id.contains("preview"); + let network = network_from_project_id(&project_id).unwrap(); let final_log_level = match log_level_str.to_lowercase().as_str() { "debug" => Level::DEBUG, @@ -149,12 +188,16 @@ fn override_with_env(config: Config) -> Config { server: Server { address: server_address, log_level: final_log_level, - is_testnet, + network, url: server_url, }, database: Db { connection_string: db_connection, }, - blockfrost: Blockfrost { project_id, nft_asset }, + blockfrost: Blockfrost { + project_id, + nft_asset, + }, + hydra: config.hydra, } } diff --git a/src/db.rs b/src/db.rs index 45e9b54..a78e270 100644 --- a/src/db.rs +++ b/src/db.rs @@ -5,7 +5,7 @@ use crate::{ }; use deadpool_diesel::postgres::{Manager, Pool}; use diesel::prelude::*; -use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; +use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; use schema::users::dsl::*; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/"); @@ -18,9 +18,15 @@ pub struct DB { impl DB { pub async fn new(database_url: &str) -> Self { let manager = Manager::new(database_url, deadpool_diesel::Runtime::Tokio1); - let pool = Pool::builder(manager).build().expect("Failed to create pool."); - let connection = pool.get().await.expect("Failed to get a connection."); + let pool = Pool::builder(manager) + .build() + .expect("Failed to create pool."); + + if cfg!(feature = "dev_mock_db") { + return Self { pool }; + } + let connection = pool.get().await.expect("Failed to get a connection."); connection .interact(|c| c.run_pending_migrations(MIGRATIONS).map(|_| ())) .await @@ -31,6 +37,17 @@ impl DB { } pub async fn insert_request(&self, request: RequestNewItem) -> Result { + if cfg!(feature = "dev_mock_db") { + return Ok(Request { + id: 42, + route: request.route, + mode: request.mode, + ip_address: request.ip_address, + port: request.port, + reward_address: request.reward_address, + }); + } + let db_pool = self.pool.get().await?; let result = db_pool @@ -46,10 +63,29 @@ impl DB { } pub async fn authorize_user(&self, secret_param: String) -> Result { + if cfg!(feature = "dev_mock_db") { + return Ok(User { + id: 31337, + created_at: chrono::NaiveDateTime::parse_from_str( + "2015-09-05 23:56:04", + "%Y-%m-%d %H:%M:%S", + ) + .unwrap(), + user_id: 31337, + email: "xxx@xxx.xxx".to_string(), + secret: "xxxxxxxx".to_string(), + }); + } + let db_pool = self.pool.get().await?; let user_result: Option = db_pool - .interact(|db_pool| users.filter(secret.eq(secret_param)).first::(db_pool).optional()) + .interact(|db_pool| { + users + .filter(secret.eq(secret_param)) + .first::(db_pool) + .optional() + }) .await??; if let Some(user) = user_result { diff --git a/src/errors.rs b/src/errors.rs index c851460..34508d5 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,5 +1,5 @@ use axum::response::{IntoResponse, Response}; -use axum::{http, Json}; +use axum::{Json, http}; use http::StatusCode; use serde::{Deserialize, Serialize}; use std::net::SocketAddr; @@ -78,7 +78,9 @@ impl IntoResponse for APIError { details: "You are not authorized to access the registration.".to_string(), }, ), - APIError::DatabaseConnection(_) | APIError::DatabaseQuery(_) | APIError::DatabaseInteraction(_) => ( + APIError::DatabaseConnection(_) + | APIError::DatabaseQuery(_) + | APIError::DatabaseInteraction(_) => ( StatusCode::INTERNAL_SERVER_ERROR, ApiError { status: "failed".to_string(), diff --git a/src/find_libexec.rs b/src/find_libexec.rs new file mode 100644 index 0000000..5e7eb9a --- /dev/null +++ b/src/find_libexec.rs @@ -0,0 +1,94 @@ +use std::{ + env, + path::{Path, PathBuf}, + process::Command, +}; + +use tracing::debug; + +/// Searches for a “libexec” executable in multiple expected directories. +/// +/// These are executables we use sort of like libraries, without linking them +/// into our executable. E.g. `hydra-node`, `testgen-hs`. +/// +/// # Arguments +/// +/// * `exe_name` - The name of the executable (without `.exe` on Windows). +/// +/// * `env_name` - Allow overriding the path to the executable with this +/// environment variable name. +/// +/// * `test_args` - Arguments to a test invocation of the found command (to +/// check that it really is executable). Maybe in the future we should have a +/// lambda to actually look at the output of this invocation? +/// +pub fn find_libexec(exe_name: &str, env_name: &str, test_args: &[&str]) -> Result { + let env_var_dir: Option = env::var(env_name) + .ok() + .and_then(|a| PathBuf::from(a).parent().map(|a| a.to_path_buf())); + + // This is the most important one for relocatable directories (that keep the initial + // structure) on Windows, Linux, macOS: + let current_exe_dir: Option = + std::fs::canonicalize(env::current_exe().map_err(|e| e.to_string())?) + .map_err(|e| e.to_string())? + .parent() + .map(|a| a.to_path_buf().join(exe_name)); + + // Similar, but accounts for the `nix-bundle-exe` structure on Linux: + let current_package_dir: Option = current_exe_dir + .clone() + .and_then(|a| a.parent().map(PathBuf::from)) + .and_then(|a| a.parent().map(PathBuf::from)); + + let cargo_target_dir: Option = env::var("CARGO_MANIFEST_DIR") + .ok() + .map(|root| PathBuf::from(root).join("target/testgen-hs/extracted/testgen-hs")); + + let docker_path: Option = Some(PathBuf::from(format!("/app/{exe_name}"))); + + let system_path: Vec = env::var("PATH") + .map(|p| env::split_paths(&p).collect()) + .unwrap_or_default(); + + let search_path: Vec = vec![ + env_var_dir, + current_exe_dir, + current_package_dir, + cargo_target_dir, + docker_path, + ] + .into_iter() + .flatten() + .chain(system_path) + .collect(); + + let extension = if cfg!(target_os = "windows") { + ".exe" + } else { + "" + }; + + let exe_name_ext = format!("{exe_name}{extension}"); + + debug!("{} search directories = {:?}", exe_name_ext, search_path); + + // Checks if the path is runnable. Adjust for platform specifics if needed. + // TODO: check that the --version matches what we expect. + let is_our_executable = + |path: &Path| -> bool { Command::new(path).args(test_args).output().is_ok() }; + + // Look in each candidate directory to find a matching file + for candidate in &search_path { + let path = candidate.join(&exe_name_ext); + + if path.is_file() && is_our_executable(path.as_path()) { + return Ok(path.to_string_lossy().to_string()); + } + } + + Err(format!( + "No valid `{}` binary found in {:?}.", + exe_name_ext, &search_path + )) +} diff --git a/src/hydra.rs b/src/hydra.rs new file mode 100644 index 0000000..9080a16 --- /dev/null +++ b/src/hydra.rs @@ -0,0 +1,871 @@ +use crate::config::HydraConfig as HydraTomlConfig; +use crate::types::{AssetName, Network}; +use anyhow::{Result, anyhow}; +use serde::Deserialize; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::mpsc; +use tracing::{debug, error, info, warn}; + +pub mod tunnel; +pub mod tunnel2; +pub mod verifications; + +// FIXME: this should most probably be back to the default of 600 seconds: +const CONTESTATION_PERIOD_SECONDS: Duration = Duration::from_secs(60); + +// FIXME: shouldn’t this be multiplied by `max_concurrent_hydra_nodes`? +const MIN_FUEL_LOVELACE: u64 = 15_000_000; + +// TODO: At least on Preview that is. Where does this come from exactly? +const MIN_LOVELACE_PER_TRANSACTION: u64 = 840_450; + +/// After cloning, it still represents the same set of [`HydraController`]s. +#[derive(Clone, Debug)] +pub struct HydrasManager { + config: HydraConfig, + /// This is `Arc>` because we want all clones of the controller to only hold a single copy. + #[allow(clippy::redundant_allocation)] + controller_counter: Arc>, +} + +impl HydrasManager { + pub async fn new(config: &HydraTomlConfig, network: &Network) -> Result { + // Let’s add some ε of 1% just to be sure about rounding etc. + let minimal_commit: f64 = 1.01 + * (config.lovelace_per_request + * config.requests_per_microtransaction + * config.microtransactions_per_fanout + + MIN_LOVELACE_PER_TRANSACTION) as f64 + / 1_000_000.0; + if config.commit_ada < minimal_commit { + Err(anyhow!( + "hydras-manager: Please make sure that configured commit_ada ≥ lovelace_per_request * requests_per_microtransaction * microtransactions_per_fanout + {}.", + MIN_LOVELACE_PER_TRANSACTION as f64 / 1_000_000.0 + ))? + } + + let microtransaction_lovelace: u64 = + config.lovelace_per_request * config.requests_per_microtransaction; + if microtransaction_lovelace < MIN_LOVELACE_PER_TRANSACTION { + Err(anyhow!( + "hydras-manager: Please make sure that each microtransaction will be larger than {} lovelace. Currently it would be {}.", + MIN_LOVELACE_PER_TRANSACTION, + microtransaction_lovelace, + ))? + } + + Ok(Self { + config: HydraConfig::load(config.clone(), network).await?, + controller_counter: Arc::new(Arc::new(())), + }) + } + + pub async fn initialize_key_exchange( + &self, + originator: &AssetName, + req: KeyExchangeRequest, + ) -> Result { + if req.accepted_platform_h2h_port.is_some() { + Err(anyhow!( + "`accepted_platform_h2h_port` must not be set in `initialize_key_exchange`" + ))? + } + + let cur_count = Arc::strong_count(self.controller_counter.as_ref()).saturating_sub(1); // subtract the manager + if cur_count as u64 >= self.config.toml.max_concurrent_hydra_nodes { + let err = anyhow!( + "Too many concurrent `hydra-node`s already running. You can increase the limit in config." + ); + warn!("{}", err); + Err(err)? + } + + let have_funds: f64 = self + .config + .lovelace_on_addr(&self.config.gateway_cardano_addr) + .await? as f64 + / 1_000_000.0; + let required_funds_ada: f64 = + self.config.toml.commit_ada + (MIN_FUEL_LOVELACE as f64 / 1_000_000.0); + if have_funds < required_funds_ada { + let err = anyhow!( + "hydra-controller: {}: {} ADA is too little for the Hydra L1 fees and committed funds on the enterprise address associated with {:?}. Please provide at least {} ADA", + originator.as_str(), + have_funds, + self.config.toml.cardano_signing_key, + required_funds_ada, + ); + error!("{}", err); + Err(err)? + } + info!( + "hydra-controller: {}: funds on cardano_signing_key: {:?} ADA", + originator.as_str(), + have_funds + ); + + use verifications::{find_free_tcp_port, read_json_file}; + + let config_dir = mk_config_dir(&self.config.network, originator)?; + self.config.gen_hydra_keys(&config_dir).await?; + + Ok(KeyExchangeResponse { + machine_id: verifications::hashed_machine_id(), + gateway_cardano_vkey: self.config.gateway_cardano_vkey.clone(), + gateway_hydra_vkey: read_json_file(&config_dir.join("hydra.vk"))?, + hydra_scripts_tx_id: hydra_scripts_tx_id(&self.config.network).to_string(), + protocol_parameters: self.config.protocol_parameters.clone(), + contestation_period: CONTESTATION_PERIOD_SECONDS, + proposed_platform_h2h_port: find_free_tcp_port().await?, + gateway_h2h_port: find_free_tcp_port().await?, + kex_done: false, + }) + } + + /// You should first call [`Self::initialize_key_exchange`], and then this + /// function with the initial request/response pair. + pub async fn spawn_new( + &self, + originator: &AssetName, + reward_addr: &str, + initial: (KeyExchangeRequest, KeyExchangeResponse), + final_req: KeyExchangeRequest, + ) -> Result<(HydraController, KeyExchangeResponse)> { + if initial.0 + != (KeyExchangeRequest { + accepted_platform_h2h_port: None, + ..final_req.clone() + }) + { + Err(anyhow!( + "The 2nd `KeyExchangeRequest` must be the same as the 1st one." + ))? + } + + if final_req.accepted_platform_h2h_port != Some(initial.1.proposed_platform_h2h_port) { + Err(anyhow!( + "The Platform must accept the same port that was proposed to it." + ))? + } + + // Clone first, to prevent the nastier race condition: + let maybe_new = Arc::clone(self.controller_counter.as_ref()); + let new_count = Arc::strong_count(self.controller_counter.as_ref()).saturating_sub(1); // subtract the manager + if new_count as u64 > self.config.toml.max_concurrent_hydra_nodes { + Err(anyhow!( + "Too many concurrent `hydra-node`s already running. You can increase the limit in config." + ))? + } + + if !(matches!( + verifications::is_tcp_port_free(initial.1.gateway_h2h_port).await, + Ok(true) + ) && matches!( + verifications::is_tcp_port_free(initial.1.proposed_platform_h2h_port).await, + Ok(true) + )) { + Err(anyhow!( + "The exchanged ports are no longer free on the gateway, please perform another KEx." + ))? + } + + let final_resp = KeyExchangeResponse { + kex_done: true, + ..initial.1 + }; + + let ctl = HydraController::spawn( + self.config.clone(), + originator.clone(), + reward_addr.to_string(), + maybe_new, + final_req, + final_resp.clone(), + ) + .await?; + + Ok((ctl, final_resp)) + } +} + +#[derive(Debug, Deserialize, Clone)] +struct HydraConfig { + pub toml: HydraTomlConfig, + pub network: Network, + pub hydra_node_exe: String, + pub cardano_cli_exe: String, + pub gateway_cardano_vkey: serde_json::Value, + pub gateway_cardano_addr: String, + pub protocol_parameters: serde_json::Value, +} + +impl HydraConfig { + pub async fn load(toml: HydraTomlConfig, network: &Network) -> Result { + let hydra_node_exe = + crate::find_libexec::find_libexec("hydra-node", "HYDRA_NODE_PATH", &["--version"]) + .map_err(|e| anyhow!(e))?; + let cardano_cli_exe = + crate::find_libexec::find_libexec("cardano-cli", "CARDANO_CLI_PATH", &["version"]) + .map_err(|e| anyhow!(e))?; + let self_ = Self { + toml, + network: network.clone(), + hydra_node_exe, + cardano_cli_exe, + gateway_cardano_vkey: serde_json::Value::Null, + gateway_cardano_addr: String::new(), + protocol_parameters: serde_json::Value::Null, + }; + let gateway_cardano_addr = self_ + .derive_enterprise_address_from_skey(&self_.toml.cardano_signing_key) + .await?; + let gateway_cardano_vkey = self_ + .derive_vkey_from_skey(&self_.toml.cardano_signing_key) + .await?; + let protocol_parameters = self_.gen_protocol_parameters().await?; + let self_ = Self { + gateway_cardano_vkey, + gateway_cardano_addr, + protocol_parameters, + ..self_ + }; + Ok(self_) + } +} + +/// Runs a `hydra-node` and sets up an L2 network with the Platform for microtransactions. +/// +/// You can safely clone it, and the clone will represent the same `hydra-node` etc. +#[derive(Clone)] +pub struct HydraController { + event_tx: mpsc::Sender, + originator: AssetName, + _controller_counter: Arc<()>, +} + +// FIXME: send a Quit event on `drop()` of all controller instances + +#[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq, Eq, Clone)] +pub struct KeyExchangeRequest { + pub machine_id: String, + pub platform_cardano_vkey: serde_json::Value, + pub platform_hydra_vkey: serde_json::Value, + pub accepted_platform_h2h_port: Option, +} + +#[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq, Eq, Clone)] +pub struct KeyExchangeResponse { + pub machine_id: String, + pub gateway_cardano_vkey: serde_json::Value, + pub gateway_hydra_vkey: serde_json::Value, + pub hydra_scripts_tx_id: String, + pub protocol_parameters: serde_json::Value, + pub contestation_period: Duration, + /// Unfortunately the ports have to be the same on both sides, so + /// since we’re tunneling through the WebSocket, and our hosts are + /// both 127.0.0.1, the Gateway has to propose the port on the + /// Platform, too (as both sides open both ports). + pub proposed_platform_h2h_port: u16, + pub gateway_h2h_port: u16, + /// This being set to `true` means that the ceremony is successful, and the + /// Gateway is going to start its own `hydra-node`, and the Platform should too. + pub kex_done: bool, +} + +impl HydraController { + async fn spawn( + config: HydraConfig, + originator: AssetName, + reward_addr: String, + controller_counter: Arc<()>, + kex_req: KeyExchangeRequest, + kex_resp: KeyExchangeResponse, + ) -> Result { + let event_tx = + State::spawn(config, originator.clone(), reward_addr, kex_req, kex_resp).await?; + Ok(Self { + event_tx, + originator, + _controller_counter: controller_counter, + }) + } + + // FIXME: this is too primitive + pub fn is_alive(&self) -> bool { + !self.event_tx.is_closed() + } + + pub async fn account_one_request(&self) { + self.event_tx + .send(Event::AccountOneRequest) + .await + .unwrap_or_else(|_| { + error!( + "hydra-controller: {}: failed to account one request: event channel closed", + self.originator.as_str() + ) + }) + } + + pub async fn terminate(&self) { + let _ = self.event_tx.send(Event::Terminate).await; + } +} + +enum Event { + Restart, + Terminate, + TryToInitHead, + FundCommitAddr, + TryToCommit, + WaitForOpen, + AccountOneRequest, + WaitForUtxoCount, + TryToClose, + WaitForClosed { retries_before_reclose: u64 }, + DoFanout, + WaitForIdleAfterClose, +} + +fn mk_config_dir(network: &Network, originator: &AssetName) -> Result { + let config_dir = dirs::config_dir() + .ok_or(anyhow!("`dirs::config_dir()` returned `None`"))? + .join("blockfrost-gateway") + .join("hydra") + .join(network.as_str()) + .join(originator.as_str()); + std::fs::create_dir_all(&config_dir)?; + Ok(config_dir) +} + +// FIXME: don’t construct all key and other paths manually, keep them in a single place +struct State { + config: HydraConfig, + originator: AssetName, + reward_addr: String, + config_dir: PathBuf, + event_tx: mpsc::Sender, + kex_req: KeyExchangeRequest, + kex_resp: KeyExchangeResponse, + api_port: u16, + metrics_port: u16, + hydra_peers_connected: bool, // FIXME: they can become disconnected… + hydra_head_open: bool, + accounted_requests: u64, + sent_microtransactions: u64, + commit_wallet_skey: PathBuf, + commit_wallet_addr: String, + is_closing: bool, + hydra_pid: Option, +} + +impl State { + const RESTART_DELAY: Duration = Duration::from_secs(5); + + async fn spawn( + config: HydraConfig, + originator: AssetName, + reward_addr: String, + kex_req: KeyExchangeRequest, + kex_resp: KeyExchangeResponse, + ) -> Result> { + let config_dir = mk_config_dir(&config.network, &originator)?; + + let (event_tx, mut event_rx) = mpsc::channel::(32); + + let mut self_ = Self { + config, + originator, + reward_addr, + config_dir, + event_tx: event_tx.clone(), + kex_req, + kex_resp, + api_port: 0, + metrics_port: 0, + hydra_peers_connected: false, + hydra_head_open: false, + accounted_requests: 0, + sent_microtransactions: 0, + commit_wallet_skey: PathBuf::new(), + commit_wallet_addr: String::new(), + is_closing: false, + hydra_pid: None, + }; + + self_.send(Event::Restart).await; + + tokio::spawn(async move { + while let Some(event) = event_rx.recv().await { + match self_.process_event(event).await { + Ok(()) => (), + Err(err) => { + error!( + "hydra-controller: {}: error: {}; will restart in {:?}…", + self_.originator.as_str(), + err, + Self::RESTART_DELAY + ); + tokio::time::sleep(Self::RESTART_DELAY).await; + self_.send(Event::Restart).await; + }, + } + } + }); + + Ok(event_tx) + } + + async fn send(&self, event: Event) { + self.event_tx + .send(event) + .await + .expect("we never close the event receiver"); + } + + async fn send_delayed(&self, event: Event, delay: Duration) { + let event_tx = self.event_tx.clone(); + tokio::spawn(async move { + tokio::time::sleep(delay).await; + event_tx.send(event).await + }); + } + + async fn process_event(&mut self, event: Event) -> Result<()> { + match event { + Event::Restart => { + info!("hydra-controller: {}: starting…", self.originator.as_str()); + self.start_hydra_node().await?; + self.send_delayed(Event::TryToInitHead, Duration::from_secs(1)) + .await + }, + + Event::Terminate => { + if let Some(pid) = self.hydra_pid { + verifications::sigterm(pid)? + } + }, + + Event::TryToInitHead => { + let ready = verifications::prometheus_metric_at_least( + &format!("http://127.0.0.1:{}/metrics", self.metrics_port), + "hydra_head_peers_connected", + 1.0, + ) + .await; + + info!( + "hydra-controller: {}: waiting for hydras to connect: ready={:?}", + self.originator.as_str(), + ready + ); + + if matches!(ready, Ok(true)) { + self.hydra_peers_connected = true; + + verifications::send_one_websocket_msg( + &format!("ws://127.0.0.1:{}/", self.api_port), + serde_json::json!({"tag":"Init"}), + Duration::from_secs(2), + ) + .await?; + + self.send_delayed(Event::FundCommitAddr, Duration::from_secs(3)) + .await + } else { + self.send_delayed(Event::TryToInitHead, Duration::from_secs(1)) + .await + } + }, + + Event::FundCommitAddr => { + let status = verifications::fetch_head_tag(self.api_port).await?; + + info!( + "hydra-controller: {}: waiting for the Initial head status: status={:?}", + self.originator.as_str(), + status + ); + + if status == "Initial" || status == "Open" { + let commit_wallet = self.config_dir.join("commit-funds"); + self.commit_wallet_skey = commit_wallet.with_extension("sk"); + + if !std::fs::exists(&self.commit_wallet_skey)? { + if status == "Open" { + Err(anyhow!( + "Head status is Open, but there’s no commit wallet anymore; this shouldn’t really happen, we don’t yet know how to handle it" + ))? + } + + self.config.new_cardano_keypair(&commit_wallet).await?; + } + + self.commit_wallet_addr = self + .config + .derive_enterprise_address_from_skey(&self.commit_wallet_skey) + .await?; + + if status == "Initial" { + self.config + .fund_address( + &self.config.gateway_cardano_addr, + &self.commit_wallet_addr, + (self.config.toml.commit_ada * 1_000_000.0).round() as u64, + &self.config.toml.cardano_signing_key, + ) + .await?; + + self.send_delayed(Event::TryToCommit, Duration::from_secs(3)) + .await + } else if status == "Open" { + warn!( + "hydra-controller: {}: turns out the Head is already Open, skipping Commit", + self.originator.as_str(), + ); + self.send_delayed(Event::WaitForOpen, Duration::from_secs(3)) + .await + } + } else { + self.send_delayed(Event::FundCommitAddr, Duration::from_secs(3)) + .await + } + }, + + Event::TryToCommit => { + let commit_wallet_lovelace = self + .config + .lovelace_on_addr(&self.commit_wallet_addr) + .await?; + + let lovelace_needed = 0.99 * self.config.toml.commit_ada * 1_000_000.0; + + info!( + "hydra-controller: {}: waiting for enough lovelace (> {}) to appear on the commit address: lovelace={:?}", + self.originator.as_str(), + lovelace_needed.round(), + commit_wallet_lovelace + ); + + if commit_wallet_lovelace as f64 >= lovelace_needed { + info!( + "hydra-controller: {}: submitting a Commit transaction to join the Hydra Head", + self.originator.as_str() + ); + self.config + .commit_all_utxo_to_hydra( + &self.commit_wallet_addr, + self.api_port, + &self.commit_wallet_skey, + ) + .await?; + + self.send_delayed(Event::WaitForOpen, Duration::from_secs(3)) + .await + } else { + self.send_delayed(Event::TryToCommit, Duration::from_secs(3)) + .await + } + }, + + Event::WaitForOpen => { + let status = verifications::fetch_head_tag(self.api_port).await?; + info!( + "hydra-controller: {}: waiting for the Open head status: status={:?}", + self.originator.as_str(), + status + ); + if status == "Open" { + self.hydra_head_open = true; + } else { + self.send_delayed(Event::WaitForOpen, Duration::from_secs(3)) + .await + } + }, + + Event::AccountOneRequest => { + self.accounted_requests += 1; + + if self.accounted_requests >= self.config.toml.requests_per_microtransaction { + if self.is_closing { + warn!( + "hydra-controller: {}: would send a microtransaction, but the Hydra Head state is currently closing for `Fanout` (backlog of requests: {})", + self.originator.as_str(), + self.accounted_requests + ) + } else if self.hydra_head_open { + info!( + "hydra-controller: {}: sending a microtransaction", + self.originator.as_str() + ); + let amount_lovelace: u64 = + self.accounted_requests * self.config.toml.lovelace_per_request; + self.config + .send_hydra_transaction( + self.api_port, + &self.commit_wallet_addr, + &self.reward_addr, + &self.commit_wallet_skey, + amount_lovelace, + ) + .await?; + + self.accounted_requests = 0; + self.sent_microtransactions += 1; + + if self.sent_microtransactions + >= self.config.toml.microtransactions_per_fanout + { + self.is_closing = true; + self.send_delayed(Event::WaitForUtxoCount, Duration::from_secs(3)) + .await; + } + } else { + warn!( + "hydra-controller: {}: would send a microtransaction, but the Hydra Head state is still not `Open` (backlog of requests: {})", + self.originator.as_str(), + self.accounted_requests + ) + } + } + }, + + Event::WaitForUtxoCount => { + // XXX: `1 +`, because we also have the source UTxO of the `commit_wallet` + let expected_count = 1 + self.sent_microtransactions; + let current_count = self.config.hydra_utxo_count(self.api_port).await?; + + if current_count >= expected_count { + info!( + "hydra-controller: {}: got correct L2 UTxO count, will Close now…", + self.originator.as_str() + ); + self.send_delayed(Event::TryToClose, Duration::from_secs(1)) + .await; + } else { + warn!( + "hydra-controller: {}: still have incorrect L2 UTxO count: {}, expected {}", + self.originator.as_str(), + current_count, + expected_count + ); + self.send_delayed(Event::WaitForUtxoCount, Duration::from_secs(3)) + .await; + } + }, + + Event::TryToClose => { + verifications::send_one_websocket_msg( + &format!("ws://127.0.0.1:{}", self.api_port), + serde_json::json!({"tag":"Close"}), + Duration::from_secs(2), + ) + .await?; + self.send_delayed( + Event::WaitForClosed { + retries_before_reclose: 10, + }, + Duration::from_secs(3), + ) + .await; + }, + + Event::WaitForClosed { + retries_before_reclose, + } => { + let status = verifications::fetch_head_tag(self.api_port).await?; + info!( + "hydra-controller: {}: waiting for the Closed head status: status={:?}", + self.originator.as_str(), + status + ); + if status == "Closed" { + let invalidity_period = (2 + 1) * CONTESTATION_PERIOD_SECONDS; + info!( + "hydra-controller: {}: will wait through the invalidity period ({:?}) before requesting `Fanout`", + self.originator.as_str(), + invalidity_period, + ); + self.send_delayed(Event::DoFanout, invalidity_period).await + } else { + self.send_delayed( + if retries_before_reclose <= 1 { + Event::TryToClose + } else { + Event::WaitForClosed { + retries_before_reclose: retries_before_reclose - 1, + } + }, + Duration::from_secs(3), + ) + .await + } + }, + + Event::DoFanout => { + info!( + "hydra-controller: {}: requesting `Fanout`", + self.originator.as_str(), + ); + verifications::send_one_websocket_msg( + &format!("ws://127.0.0.1:{}", self.api_port), + serde_json::json!({"tag":"Fanout"}), + Duration::from_secs(2), + ) + .await?; + self.send_delayed(Event::WaitForIdleAfterClose, Duration::from_secs(3)) + .await; + }, + + Event::WaitForIdleAfterClose => { + let status = verifications::fetch_head_tag(self.api_port).await?; + info!( + "hydra-controller: {}: waiting for the Idle head status (after Fanout): status={:?}", + self.originator.as_str(), + status + ); + if status == "Idle" { + info!( + "hydra-controller: {}: re-initializing the Hydra Head for another L2 session", + self.originator.as_str(), + ); + + self.send_delayed(Event::TryToInitHead, Duration::from_secs(3)) + .await; + } else { + self.send_delayed(Event::WaitForIdleAfterClose, Duration::from_secs(3)) + .await; + } + }, + } + Ok(()) + } + + async fn start_hydra_node(&mut self) -> Result<()> { + use std::process::Stdio; + use tokio::io::{AsyncBufReadExt, BufReader}; + + self.api_port = verifications::find_free_tcp_port().await?; + self.metrics_port = verifications::find_free_tcp_port().await?; + + // FIXME: somehow do shutdown once we’re killed + // cf. + // cf. + // TODO: Write a ticket in `hydra-node`. + + let protocol_parameters_path = self.config_dir.join("protocol-parameters.json"); + verifications::write_json_if_changed( + &protocol_parameters_path, + &self.kex_resp.protocol_parameters, + )?; + + let platform_hydra_vkey_path = self.config_dir.join("platform-hydra.vk"); + verifications::write_json_if_changed( + &platform_hydra_vkey_path, + &self.kex_req.platform_hydra_vkey, + )?; + + let platform_cardano_vkey_path = self.config_dir.join("platform-payment.vk"); + verifications::write_json_if_changed( + &platform_cardano_vkey_path, + &self.kex_req.platform_cardano_vkey, + )?; + + let mut child = tokio::process::Command::new(&self.config.hydra_node_exe) + .arg("--node-id") + .arg("platform-node") + .arg("--persistence-dir") + .arg(self.config_dir.join("persistence")) + .arg("--cardano-signing-key") + .arg(&self.config.toml.cardano_signing_key) // FIXME: copy it somewhere else in case the source file changes + .arg("--hydra-signing-key") + .arg(self.config_dir.join("hydra.sk")) + .arg("--hydra-scripts-tx-id") + .arg(&self.kex_resp.hydra_scripts_tx_id) + .arg("--ledger-protocol-parameters") + .arg(&protocol_parameters_path) // FIXME: copy it somewhere else in case the source file changes + .arg("--contestation-period") + .arg(format!("{}s", self.kex_resp.contestation_period.as_secs())) + .args(if self.config.network == Network::Mainnet { + vec!["-mainnet".to_string()] + } else { + vec![ + "--testnet-magic".to_string(), + format!("{}", self.config.network.network_magic()), + ] + }) + .arg("--node-socket") + .arg(&self.config.toml.node_socket_path) + .arg("--api-port") + .arg(format!("{}", self.api_port)) + .arg("--api-host") + .arg("127.0.0.1") + .arg("--listen") + .arg(format!("127.0.0.1:{}", self.kex_resp.gateway_h2h_port)) + .arg("--peer") + .arg(format!("127.0.0.1:{}", self.kex_resp.proposed_platform_h2h_port)) + .arg("--monitoring-port") + .arg(format!("{}", self.metrics_port)) + .arg("--hydra-verification-key") + .arg(platform_hydra_vkey_path) + .arg("--cardano-verification-key") + .arg(platform_cardano_vkey_path) + .stdin(Stdio::null()) // FIXME: try an empty pipe, and see if it exitst on our `kill -9` + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn()?; + + self.hydra_pid = child.id(); + + let stdout = child.stdout.take().expect("child stdout"); + let stderr = child.stderr.take().expect("child stderr"); + + tokio::spawn(async move { + let mut lines = BufReader::new(stdout).lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("hydra-node: {}", line); + } + debug!("hydra-node: stdout closed"); + }); + + tokio::spawn(async move { + let mut lines = BufReader::new(stderr).lines(); + while let Ok(Some(line)) = lines.next_line().await { + info!("hydra-node: {}", line); + } + info!("hydra-node: stderr closed"); + }); + + let event_tx = self.event_tx.clone(); + tokio::spawn(async move { + match child.wait().await { + Ok(status) => { + warn!("hydra-node: exited: {}", status); + tokio::time::sleep(Self::RESTART_DELAY).await; + event_tx + .send(Event::Restart) + .await + .expect("we never close the event receiver"); + }, + Err(e) => { + error!("hydra-node: failed to wait: {e}"); + }, + } + }); + + Ok(()) + } +} + +pub fn hydra_scripts_tx_id(network: &Network) -> &'static str { + // FIXME: also define them in a `build.rs` script without Nix – consult + // `flake.lock` to get the exact Hydra version. + use Network::*; + match network { + Mainnet => env!("HYDRA_SCRIPTS_TX_ID_MAINNET"), + Preprod => env!("HYDRA_SCRIPTS_TX_ID_PREPROD"), + Preview => env!("HYDRA_SCRIPTS_TX_ID_PREVIEW"), + } +} diff --git a/src/hydra/tunnel.rs b/src/hydra/tunnel.rs new file mode 100644 index 0000000..1debbb4 --- /dev/null +++ b/src/hydra/tunnel.rs @@ -0,0 +1,274 @@ +use anyhow::Result; +use bytes::{Bytes, BytesMut}; +use std::sync::atomic::{AtomicU64, Ordering}; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +pub enum TunnelEventThere { + Connect { + id: u64, + write_back: mpsc::Sender, + }, + Write { + id: u64, + chunk: Bytes, + }, + Disconnect { + id: u64, + reason: Option, + }, +} + +pub enum TunnelEventBack { + Write { chunk: Bytes }, + Disconnect { reason: Option }, +} + +pub mod connect_here { + use super::*; + use std::collections::HashMap; + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + use tokio::net::TcpStream; + use tokio::task::JoinSet; + + enum ConnCmd { + Write(Bytes), + Disconnect(Option), + } + + pub async fn run_tunnel( + connect_port: u16, + mut event_rx: mpsc::Receiver, + cancel: CancellationToken, + ) -> Result<()> { + let mut conns: HashMap> = HashMap::new(); + let mut joinset: JoinSet = JoinSet::new(); + + loop { + tokio::select! { + _ = cancel.cancelled() => break, + + // Route incoming tunnel events to per-connection tasks. + ev = event_rx.recv() => { + let Some(ev) = ev else { break; }; + + match ev { + TunnelEventThere::Connect { id, write_back } => { + // If a duplicate id appears, drop the old sender (old task will see recv(None) and exit). + conns.remove(&id); + + let (cmd_tx, cmd_rx) = mpsc::channel::(128); + conns.insert(id, cmd_tx); + + let cancel_conn = cancel.clone(); + joinset.spawn(async move { + run_one_connection(connect_port, id, write_back, cmd_rx, cancel_conn).await; + id + }); + } + + TunnelEventThere::Write { id, chunk } => { + if let Some(tx) = conns.get(&id) { + if tx.send(ConnCmd::Write(chunk)).await.is_err() { + conns.remove(&id); + } + } + } + + TunnelEventThere::Disconnect { id, reason } => { + if let Some(tx) = conns.remove(&id) { + // Best-effort; if it fails, the task is already gone. + let _ = tx.send(ConnCmd::Disconnect(reason)).await; + } + } + } + } + + // Reap finished per-connection tasks and drop their routing entry. + res = joinset.join_next(), if !joinset.is_empty() => { + if let Some(Ok(id)) = res { + conns.remove(&id); + } + } + } + } + + // Stop all remaining connections (dropping the senders makes cmd_rx.recv() return None). + conns.clear(); + while joinset.join_next().await.is_some() {} + + Ok(()) + } + + async fn run_one_connection( + connect_port: u16, + _id: u64, + write_back: mpsc::Sender, + mut cmd_rx: mpsc::Receiver, + cancel: CancellationToken, + ) { + let mut sock = match TcpStream::connect(("127.0.0.1", connect_port)).await { + Ok(s) => s, + Err(e) => { + let _ = write_back + .send(TunnelEventBack::Disconnect { reason: Some(e) }) + .await; + return; + }, + }; + + let mut buf = BytesMut::with_capacity(8 * 1024); + let cancel_err = + || std::io::Error::new(std::io::ErrorKind::Interrupted, "tunnel cancelled"); + + let mut notify_disconnect = true; + let reason: Option = loop { + tokio::select! { + _ = cancel.cancelled() => break Some(cancel_err()), + + rv = async { + buf.clear(); + buf.reserve(8 * 1024); + sock.read_buf(&mut buf).await + } => { + match rv { + Ok(0) => break None, // clean EOF + Ok(_) => { + let chunk = buf.split().freeze(); + if write_back.send(TunnelEventBack::Write { chunk }).await.is_err() { + // Other side is gone; no point continuing. + notify_disconnect = false; + break None; + } + } + Err(e) => break Some(e), + } + } + + cmd = cmd_rx.recv() => { + match cmd { + Some(ConnCmd::Write(chunk)) => { + if let Err(e) = sock.write_all(&chunk).await { + break Some(e); + } + } + Some(ConnCmd::Disconnect(r)) => { + // Peer initiated; don't bother echoing a Disconnect back. + notify_disconnect = false; + break r; + } + None => { + // Router dropped; exit quietly. + notify_disconnect = false; + break None; + } + } + } + } + }; + + if notify_disconnect { + let _ = write_back + .send(TunnelEventBack::Disconnect { reason }) + .await; + } + + let _ = sock.shutdown().await; + } +} + +pub mod listen_here { + use super::*; + + static NEXT_CONNECTION_ID: AtomicU64 = AtomicU64::new(1); + + pub async fn run_tunnel( + listen_port: u16, + event_tx_: mpsc::Sender, + cancel: CancellationToken, + ) -> Result<()> { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + + let listener = tokio::net::TcpListener::bind(("127.0.0.1", listen_port)).await?; + + loop { + let (mut sock, _peer) = tokio::select! { + _ = cancel.cancelled() => break, + res = listener.accept() => res?, + }; + + let event_tx = event_tx_.clone(); + let cancel_conn = cancel.clone(); + + tokio::spawn(async move { + let conn_id = NEXT_CONNECTION_ID.fetch_add(1, Ordering::Relaxed); + let (event_back_tx, mut event_back_rx) = mpsc::channel::(128); + + // If the tunnel receiver is gone, close the socket/task. + if event_tx + .send(TunnelEventThere::Connect { + id: conn_id, + write_back: event_back_tx, + }) + .await + .is_err() + { + let _ = sock.shutdown().await; + return; + } + + let mut buf = BytesMut::with_capacity(8 * 1024); + let cancel_err = + || std::io::Error::new(std::io::ErrorKind::Interrupted, "tunnel cancelled"); + + let reason: Option = loop { + tokio::select! { + _ = cancel_conn.cancelled() => break Some(cancel_err()), + + rv = async { + buf.clear(); + buf.reserve(8 * 1024); + sock.read_buf(&mut buf).await + } => { + match rv { + Ok(0) => break None, // clean EOF + Ok(_) => { + let chunk = buf.split().freeze(); // no copy + if event_tx.send(TunnelEventThere::Write { id: conn_id, chunk }).await.is_err() { + let _ = sock.shutdown().await; + return; + } + } + Err(e) => break Some(e), + } + } + + event_back = event_back_rx.recv() => { + match event_back { + Some(TunnelEventBack::Write { chunk }) => { + if let Err(e) = sock.write_all(&chunk).await { + break Some(e); + } + } + Some(TunnelEventBack::Disconnect { reason }) => break reason, + None => break None, // all back-senders dropped + } + } + } + }; + + // Best-effort; if it fails, the tunnel is already gone. + let _ = event_tx + .send(TunnelEventThere::Disconnect { + id: conn_id, + reason, + }) + .await; + + let _ = sock.shutdown().await; + }); + } + + Ok(()) + } +} diff --git a/src/hydra/tunnel2.rs b/src/hydra/tunnel2.rs new file mode 100644 index 0000000..9b783d1 --- /dev/null +++ b/src/hydra/tunnel2.rs @@ -0,0 +1,336 @@ +use anyhow::Result; +use base64::{Engine as _, engine::general_purpose::STANDARD as B64}; +use bytes::{Bytes, BytesMut}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering}, + }, +}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpStream}, + sync::{Mutex, mpsc}, +}; +use tokio_util::sync::CancellationToken; + +/// JSON-serializable tunnel messages (base64 for buffers). +/// +/// Plug into a WebSocket protocol as e.g. `WsProto::HydraTunnel(TunnelMsg)`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "t", rename_all = "snake_case")] +pub enum TunnelMsg { + /// Ask peer to open its *configured* local service port for stream `id`. + Open { id: u64 }, + + /// Bytes for connection `id` encoded as base64 string. + Data { id: u64, b64: String }, + + /// Close stream `id`. `code` is small+stable, `msg` is optional. + Close { + id: u64, + code: u8, + msg: Option, + }, +} + +pub mod close_code { + pub const CLEAN: u8 = 0; + pub const IO: u8 = 1; + pub const CANCELLED: u8 = 2; + pub const PROTOCOL: u8 = 3; +} + +/// Tunnel config. +#[derive(Debug, Clone)] +pub struct TunnelConfig { + /// Host used for local TCP connects when peer sends Open. + pub local_connect_host: IpAddr, + + /// The *only* local port that the peer is allowed to connect to (via Open). + pub expose_port: u16, + + /// If true, set bit 63 in all locally-allocated IDs. + /// Set opposite values on the two peers to avoid ID collisions. + pub id_prefix_bit: bool, + + /// Outbound TunnelMsg buffer (what the WebSocket event loop drains). + pub outbound_capacity: usize, + + /// Per-connection command channel capacity. + pub per_conn_cmd_capacity: usize, + + /// Max bytes per TCP read. + pub read_chunk: usize, +} + +impl Default for TunnelConfig { + fn default() -> Self { + Self { + local_connect_host: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + expose_port: 0, + id_prefix_bit: false, + outbound_capacity: 256, + per_conn_cmd_capacity: 128, + read_chunk: 8 * 1024, + } + } +} + +enum ConnCmd { + Write(Bytes), + /// Close local TCP. If notify_peer=false, don’t emit a Close back. + Close { + notify_peer: bool, + }, +} + +struct Inner { + cfg: TunnelConfig, + cancel: CancellationToken, + out_tx: mpsc::Sender, + conns: Mutex>>, + next_id: AtomicU64, +} + +/// Cloneable handle kept in a WebSocket connection state/event-loop. +#[derive(Clone)] +pub struct Tunnel { + inner: Arc, +} + +impl Tunnel { + /// Create tunnel + outbound receiver (to drain in the WebSocket event loop). + pub fn new(cfg: TunnelConfig, cancel: CancellationToken) -> (Self, mpsc::Receiver) { + let (out_tx, out_rx) = mpsc::channel(cfg.outbound_capacity); + + let prefix = if cfg.id_prefix_bit { 1u64 << 63 } else { 0 }; + let next_id = AtomicU64::new(1 | prefix); + + let inner = Arc::new(Inner { + cfg, + cancel, + out_tx, + conns: Mutex::new(HashMap::new()), + next_id, + }); + + (Self { inner }, out_rx) + } + + pub fn cancel(&self) { + self.inner.cancel.cancel(); + } + + /// Call this from the WebSocket event loop when it receives a tunnel message. + pub async fn on_msg(&self, msg: TunnelMsg) -> Result<()> { + match msg { + TunnelMsg::Open { id } => { + // Always connect to the single configured local port. + let addr = SocketAddr::new( + self.inner.cfg.local_connect_host, + self.inner.cfg.expose_port, + ); + match TcpStream::connect(addr).await { + Ok(sock) => self.attach_stream_with_id(id, sock).await?, + Err(e) => { + let _ = self + .inner + .out_tx + .send(TunnelMsg::Close { + id, + code: close_code::IO, + msg: Some(e.to_string()), + }) + .await; + }, + } + }, + + TunnelMsg::Data { id, b64 } => { + let bytes = match B64.decode(b64.as_bytes()) { + Ok(v) => Bytes::from(v), + Err(_) => { + let _ = self + .inner + .out_tx + .send(TunnelMsg::Close { + id, + code: close_code::PROTOCOL, + msg: Some("invalid base64".into()), + }) + .await; + return Ok(()); + }, + }; + + let tx = { self.inner.conns.lock().await.get(&id).cloned() }; + if let Some(tx) = tx { + let _ = tx.send(ConnCmd::Write(bytes)).await; + } + }, + + TunnelMsg::Close { id, .. } => { + let tx = { self.inner.conns.lock().await.remove(&id) }; + if let Some(tx) = tx { + let _ = tx.send(ConnCmd::Close { notify_peer: false }).await; + } + }, + } + + Ok(()) + } + + /// Spawn a TCP listener on *this* side. Each accepted local TCP connection becomes + /// a tunneled stream to the peer’s configured `expose_port`. + pub async fn spawn_listener(&self, listen_port: u16) -> Result<()> { + let listener = TcpListener::bind((self.inner.cfg.local_connect_host, listen_port)).await?; + let this = self.clone(); + + tokio::spawn(async move { + loop { + let (mut sock, _) = tokio::select! { + _ = this.inner.cancel.cancelled() => break, + res = listener.accept() => match res { Ok(v) => v, Err(_) => break } + }; + + let id = this.alloc_local_id(); + + // Ask peer to open its configured port. + if this + .inner + .out_tx + .send(TunnelMsg::Open { id }) + .await + .is_err() + { + let _ = sock.shutdown().await; + break; + } + + // Attach local accepted socket. + if this.attach_stream_with_id(id, sock).await.is_err() { + let _ = this + .inner + .out_tx + .send(TunnelMsg::Close { + id, + code: close_code::PROTOCOL, + msg: Some("attach failed".into()), + }) + .await; + } + } + }); + + Ok(()) + } + + /// If you already accepted a TcpStream elsewhere and want to tunnel it: + /// sends Open (no port) and returns the allocated id. + /// + /// *Warning*: it’s a little controversial, but trivial to add. Probably + /// shouldn’t be used. + pub async fn attach_stream(&self, sock: TcpStream) -> Result { + let id = self.alloc_local_id(); + self.inner.out_tx.send(TunnelMsg::Open { id }).await?; + self.attach_stream_with_id(id, sock).await?; + Ok(id) + } + + fn alloc_local_id(&self) -> u64 { + let prefix = if self.inner.cfg.id_prefix_bit { + 1u64 << 63 + } else { + 0 + }; + let base = self.inner.next_id.fetch_add(1, Ordering::Relaxed) & !(1u64 << 63); + base | prefix + } + + async fn attach_stream_with_id(&self, id: u64, sock: TcpStream) -> Result<()> { + let (cmd_tx, mut cmd_rx) = mpsc::channel::(self.inner.cfg.per_conn_cmd_capacity); + + // Insert route, replacing duplicates (and closing old). + { + let mut m = self.inner.conns.lock().await; + if let Some(old) = m.insert(id, cmd_tx) { + let _ = old.send(ConnCmd::Close { notify_peer: false }).await; + } + } + + let out_tx = self.inner.out_tx.clone(); + let cancel = self.inner.cancel.clone(); + let cfg = self.inner.cfg.clone(); + let inner = Arc::clone(&self.inner); + + tokio::spawn(async move { + let mut sock = sock; + let mut buf = BytesMut::with_capacity(cfg.read_chunk); + let mut notify_peer_close = true; + + let close_reason: Option<(u8, Option)> = loop { + tokio::select! { + _ = cancel.cancelled() => { + break Some((close_code::CANCELLED, Some("cancelled".into()))); + } + + // TCP -> WS (encode bytes as base64) + rv = async { + buf.clear(); + buf.reserve(cfg.read_chunk); + sock.read_buf(&mut buf).await + } => { + match rv { + Ok(0) => break Some((close_code::CLEAN, None)), // EOF + Ok(_) => { + let chunk = buf.split().freeze(); + let b64 = B64.encode(&chunk); + if out_tx.send(TunnelMsg::Data { id, b64 }).await.is_err() { + notify_peer_close = false; + break None; + } + } + Err(e) => break Some((close_code::IO, Some(e.to_string()))), + } + } + + // WS -> TCP (decode already done in on_msg) + cmd = cmd_rx.recv() => { + match cmd { + Some(ConnCmd::Write(chunk)) => { + if let Err(e) = sock.write_all(&chunk).await { + break Some((close_code::IO, Some(e.to_string()))); + } + } + Some(ConnCmd::Close { notify_peer }) => { + notify_peer_close = notify_peer; + break Some((close_code::CLEAN, None)); + } + None => { + notify_peer_close = false; + break None; + } + } + } + } + }; + + // Remove route + let _ = inner.conns.lock().await.remove(&id); + + if notify_peer_close { + let (code, msg) = + close_reason.unwrap_or((close_code::CANCELLED, Some("closed".into()))); + let _ = out_tx.send(TunnelMsg::Close { id, code, msg }).await; + } + + let _ = sock.shutdown().await; + }); + + Ok(()) + } +} diff --git a/src/hydra/verifications.rs b/src/hydra/verifications.rs new file mode 100644 index 0000000..de926ac --- /dev/null +++ b/src/hydra/verifications.rs @@ -0,0 +1,809 @@ +use anyhow::{Result, anyhow}; +use serde_json::Value; +use std::path::Path; +use tracing::info; + +use crate::types::Network; + +/// FIXME: don’t use `cardano-cli`. +/// +/// FIXME: proper errors, not `anyhow!` +impl super::HydraConfig { + /// Generates Hydra keys if they don’t exist. + pub(super) async fn gen_hydra_keys(&self, target_dir: &Path) -> Result<()> { + std::fs::create_dir_all(target_dir)?; + + let key_path = target_dir.join("hydra.sk"); + + if !key_path.exists() { + info!("hydra-controller: generating hydra keys"); + + let status = tokio::process::Command::new(&self.hydra_node_exe) + .arg("gen-hydra-key") + .arg("--output-file") + .arg(target_dir.join("hydra")) + .status() + .await?; + + if !status.success() { + Err(anyhow!("gen-hydra-key failed with status: {status}"))?; + } + } else { + info!("hydra-controller: hydra keys already exist"); + } + + Ok(()) + } + + fn cardano_cli_env(&self) -> Vec<(&str, String)> { + vec![ + ( + "CARDANO_NODE_SOCKET_PATH", + self.toml.node_socket_path.to_string_lossy().to_string(), + ), + ( + "CARDANO_NODE_NETWORK_ID", + match &self.network { + Network::Mainnet => self.network.as_str().to_string(), + other => other.network_magic().to_string(), + }, + ), + ] + } + + /// Generates Hydra `protocol-parameters.json` if they don’t exist. These + /// are L1 parameters with zeroed transaction fees. + pub(super) async fn gen_protocol_parameters(&self) -> Result { + use serde_json::Value; + + let output = tokio::process::Command::new(&self.cardano_cli_exe) + .envs(self.cardano_cli_env()) + .args(["query", "protocol-parameters"]) + .output() + .await?; + + if !output.status.success() { + Err(anyhow!( + "cardano-cli failed with status: {} (stdout: {}) (stderr: {})", + output.status, + String::from_utf8_lossy(&output.stdout).trim(), + String::from_utf8_lossy(&output.stderr).trim() + ))?; + } + + let mut params: Value = serde_json::from_slice(&output.stdout)?; + + // .txFeeFixed := 0 + // .txFeePerByte := 0 + if let Some(obj) = params.as_object_mut() { + obj.insert("txFeeFixed".to_string(), 0.into()); + obj.insert("txFeePerByte".to_string(), 0.into()); + + // .executionUnitPrices.priceMemory := 0 + // .executionUnitPrices.priceSteps := 0 + if let Some(exec_prices) = obj + .get_mut("executionUnitPrices") + .and_then(Value::as_object_mut) + { + exec_prices.insert("priceMemory".to_string(), 0.into()); + exec_prices.insert("priceSteps".to_string(), 0.into()); + } + } + + Ok(params) + } + + /// Check how much lovelace is available on an address. + pub(super) async fn lovelace_on_addr(&self, address: &str) -> Result { + let utxo_json = self.query_utxo_json(address).await?; + Self::sum_lovelace_from_utxo_json(&utxo_json) + } + + pub(super) async fn derive_vkey_from_skey( + &self, + skey_path: &Path, + ) -> Result { + let vkey_output = tokio::process::Command::new(&self.cardano_cli_exe) + .envs(self.cardano_cli_env()) + .args(["key", "verification-key", "--signing-key-file"]) + .arg(skey_path) + .args(["--verification-key-file", "/dev/stdout"]) + .output() + .await?; + Ok(serde_json::from_slice(&vkey_output.stdout)?) + } + + pub(super) async fn derive_enterprise_address_from_skey( + &self, + skey_path: &Path, + ) -> Result { + let vkey_output = tokio::process::Command::new(&self.cardano_cli_exe) + .envs(self.cardano_cli_env()) + .args(["key", "verification-key", "--signing-key-file"]) + .arg(skey_path) + .args(["--verification-key-file", "/dev/stdout"]) + .output() + .await?; + + if !vkey_output.status.success() { + return Err(anyhow!( + "cardano-cli key verification-key failed: {}", + String::from_utf8_lossy(&vkey_output.stderr) + )); + } + + let mut child = tokio::process::Command::new(&self.cardano_cli_exe) + .envs(self.cardano_cli_env()) + .args([ + "address", + "build", + "--payment-verification-key-file", + "/dev/stdin", + ]) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .spawn()?; + + { + let stdin = child.stdin.as_mut().ok_or(anyhow!( + "failed to open stdin for cardano-cli address build" + ))?; + use tokio::io::AsyncWriteExt; + stdin.write_all(&vkey_output.stdout).await?; + } + + let addr_output = child.wait_with_output().await?; + if !addr_output.status.success() { + Err(anyhow!( + "cardano-cli address build failed: {}", + String::from_utf8_lossy(&addr_output.stderr) + ))?; + } + + let address = String::from_utf8(addr_output.stdout)?.trim().to_string(); + if address.is_empty() { + return Err(anyhow!("derived address is empty")); + } + + Ok(address) + } + + pub(super) async fn query_utxo_json(&self, address: &str) -> Result { + let utxo_json = self + .cardano_cli_capture( + &[ + "query", + "utxo", + "--address", + address, + "--out-file", + "/dev/stdout", + ], + None, + ) + .await? + .0; + Ok(utxo_json) + } + + pub async fn new_cardano_keypair(&self, base_path: &Path) -> Result<()> { + let output = tokio::process::Command::new(&self.cardano_cli_exe) + .envs(self.cardano_cli_env()) + .args(["address", "key-gen", "--verification-key-file"]) + .arg(base_path.with_extension("vk")) + .arg("--signing-key-file") + .arg(base_path.with_extension("sk")) + .output() + .await?; + + if !output.status.success() { + return Err(anyhow!( + "cardano-cli address key-gen failed: {}", + String::from_utf8_lossy(&output.stderr) + )); + } + + Ok(()) + } + + fn sum_lovelace_from_utxo_json(json: &serde_json::Value) -> Result { + let obj = json + .as_object() + .ok_or(anyhow!("UTxO JSON root is not an object"))?; + + let mut total: u64 = 0; + + for (_txin, utxo) in obj { + if let Some(value_obj) = utxo.get("value").and_then(|v| v.as_object()) { + if let Some(lovelace_val) = value_obj.get("lovelace") { + total = total + .checked_add(Self::as_u64(lovelace_val)?) + .ok_or(anyhow!("cannot add"))?; + continue; + } + } + + if let Some(amount_arr) = utxo.get("amount").and_then(|v| v.as_array()) { + if let Some(lovelace_val) = amount_arr.first() { + total = total + .checked_add(Self::as_u64(lovelace_val)?) + .ok_or(anyhow!("cannot add"))?; + } + } + } + + Ok(total) + } + + /// Convert a JSON value into u64, allowing either number or string. + fn as_u64(v: &Value) -> Result { + if let Some(n) = v.as_u64() { + return Ok(n); + } + if let Some(s) = v.as_str() { + return Ok(s.parse()?); + } + Err(anyhow!("lovelace value is neither u64 nor string")) + } + + async fn cardano_cli_capture( + &self, + args: &[&str], + stdin_bytes: Option<&[u8]>, + ) -> Result<(serde_json::Value, Vec)> { + use tokio::io::AsyncWriteExt; + + let mut cmd = tokio::process::Command::new(&self.cardano_cli_exe); + cmd.envs(self.cardano_cli_env()); + cmd.args(args) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()); + + if stdin_bytes.is_some() { + cmd.stdin(std::process::Stdio::piped()); + } else { + cmd.stdin(std::process::Stdio::null()); + } + + let mut child = cmd.spawn()?; + + if let Some(bytes) = stdin_bytes { + let mut stdin = child + .stdin + .take() + .ok_or_else(|| anyhow!("failed to open stdin pipe"))?; + stdin.write_all(bytes).await?; + stdin.shutdown().await?; + } + + let out = child.wait_with_output().await?; + + if !out.status.success() { + return Err(anyhow!( + "cardano-cli failed (exit={}):\nstdout: {}\nstderr: {}", + out.status, + String::from_utf8_lossy(&out.stdout).trim(), + String::from_utf8_lossy(&out.stderr).trim(), + )); + } + + let (json, rest) = parse_first_json_and_rest(&out.stdout)?; + Ok((json, rest)) + } + + pub(super) async fn fund_address( + &self, + addr_from: &str, + addr_to: &str, + amount_lovelace: u64, + payment_skey_path: &Path, + ) -> Result<()> { + let utxo_json = self + .cardano_cli_capture( + &[ + "query", + "utxo", + "--address", + addr_from, + "--out-file", + "/dev/stdout", + ], + None, + ) + .await? + .0; + + // XXX: we’re only taking the first 200 UTxOs below, because the test address on + // CI has too many of them, and we’d hit `MaxTxSizeUTxO`. + let obj = utxo_json + .as_object() + .ok_or_else(|| anyhow!("UTxO JSON is not an object"))?; + + let tx_in_keys: Vec<&str> = obj.keys().take(200).map(|k| k.as_str()).collect(); + if tx_in_keys.is_empty() { + Err(anyhow!("no UTxOs found for addr_from"))? + } + + let tx_out = format!("{addr_to}+{amount_lovelace}"); + let mut build_args: Vec = + vec!["latest".into(), "transaction".into(), "build".into()]; + for k in &tx_in_keys { + build_args.push("--tx-in".into()); + build_args.push((*k).into()); + } + build_args.extend([ + "--change-address".into(), + addr_from.into(), + "--tx-out".into(), + tx_out, + "--out-file".into(), + "/dev/stdout".into(), + ]); + + let build_args_ref: Vec<&str> = build_args.iter().map(|s| s.as_str()).collect(); + let tx_json = self.cardano_cli_capture(&build_args_ref, None).await?.0; + + let skey = payment_skey_path + .to_str() + .ok_or_else(|| anyhow!("payment_skey_path is not valid UTF-8"))?; + + let tx_signed = self + .cardano_cli_capture( + &[ + "latest", + "transaction", + "sign", + "--tx-file", + "/dev/stdin", + "--signing-key-file", + skey, + "--out-file", + "/dev/stdout", + ], + Some(&serde_json::to_vec(&tx_json)?), + ) + .await? + .0; + + let _ = self + .cardano_cli_capture( + &["latest", "transaction", "submit", "--tx-file", "/dev/stdin"], + Some(&serde_json::to_vec(&tx_signed)?), + ) + .await? + .0; + + Ok(()) + } + + pub(super) async fn commit_all_utxo_to_hydra( + &self, + from_addr: &str, + hydra_api_port: u16, + commit_funds_skey: &Path, + ) -> Result<()> { + use anyhow::Context; + use reqwest::header; + + let utxo_json = self.query_utxo_json(from_addr).await?; + let utxo_body = serde_json::to_vec(&utxo_json).context("failed to serialize utxo JSON")?; + + let url = format!("http://127.0.0.1:{}/commit", hydra_api_port); + let client = reqwest::Client::new(); + let resp = client + .post(url) + .header(header::CONTENT_TYPE, "application/json") + .body(utxo_body) + .send() + .await + .context("failed to POST /commit to hydra-node")?; + + if !resp.status().is_success() { + let status = resp.status(); + let body = resp.bytes().await.unwrap_or_default(); + return Err(anyhow!( + "hydra /commit failed with {}: {}", + status, + String::from_utf8_lossy(&body) + )); + } + + let commit_tx_bytes = resp + .bytes() + .await + .context("failed to read hydra /commit response body")? + .to_vec(); + + let _: serde_json::Value = serde_json::from_slice(&commit_tx_bytes) + .context("hydra /commit response was not valid JSON")?; + + let signed_tx = self + .cardano_cli_capture( + &[ + "latest", + "transaction", + "sign", + "--tx-file", + "/dev/stdin", + "--signing-key-file", + commit_funds_skey + .to_str() + .ok_or_else(|| anyhow!("commit_funds_skey is not valid UTF-8"))?, + "--out-file", + "/dev/stdout", + ], + Some(&commit_tx_bytes), + ) + .await? + .0; + + let _ = self + .cardano_cli_capture( + &["latest", "transaction", "submit", "--tx-file", "/dev/stdin"], + Some(&serde_json::to_vec(&signed_tx)?), + ) + .await? + .0; + Ok(()) + } + + pub(super) async fn send_hydra_transaction( + &self, + hydra_api_port: u16, + sender_addr: &str, + receiver_addr: &str, + sender_skey_path: &Path, + amount_lovelace: u64, + ) -> Result<()> { + use anyhow::Context; + + let snapshot_url = format!("http://127.0.0.1:{}/snapshot/utxo", hydra_api_port); + let utxo: Value = reqwest::Client::new() + .get(&snapshot_url) + .send() + .await? + .error_for_status()? + .json() + .await + .context("snapshot/utxo: failed to decode JSON")?; + + let utxo_obj = utxo + .as_object() + .context("snapshot/utxo: expected top-level JSON object")?; + + let mut filtered: serde_json::Map = serde_json::Map::new(); + for (k, v) in utxo_obj.iter() { + if v.get("address").and_then(Value::as_str) == Some(sender_addr) { + filtered.insert(k.clone(), v.clone()); + } + } + + let (tx_in, chosen_entry) = filtered + .iter() + .next() + .map(|(k, v)| (k.clone(), v.clone())) + .ok_or_else(|| anyhow!("no UTxO found for sender address"))?; + + let lovelace_total = chosen_entry + .pointer("/value/lovelace") + .and_then(Value::as_u64) + .context("utxo entry: expected .value.lovelace as integer")?; + + if lovelace_total < amount_lovelace { + return Err(anyhow!( + "insufficient lovelace in selected UTxO: {} < {}", + lovelace_total, + amount_lovelace + )); + } + + let change = lovelace_total - amount_lovelace; + + let tx_body: serde_json::Value = { + let args: &[&str] = &[ + "latest", + "transaction", + "build-raw", + "--tx-in", + &tx_in, + "--tx-out", + &format!("{}+{}", receiver_addr, amount_lovelace), + "--tx-out", + &format!("{}+{}", sender_addr, change), + "--fee", + "0", + "--out-file", + "/dev/stdout", + ]; + self.cardano_cli_capture(args, None).await?.0 + }; + + let tx_signed: serde_json::Value = { + let skey_str = sender_skey_path + .to_str() + .context("sender_skey_path is not valid UTF-8")?; + + let args: &[&str] = &[ + "latest", + "transaction", + "sign", + "--tx-body-file", + "/dev/stdin", + "--signing-key-file", + skey_str, + "--out-file", + "/dev/stdout", + ]; + + self.cardano_cli_capture(args, Some(&serde_json::to_vec(&tx_body)?)) + .await? + .0 + }; + + let payload = serde_json::json!({ + "tag": "NewTx", + "transaction": tx_signed, + }); + + tracing::info!( + "hydra-controller: sending WebSocket payload: {}", + serde_json::to_string(&payload)? + ); + + let ws_url = format!("ws://127.0.0.1:{}/", hydra_api_port); + send_one_websocket_msg(&ws_url, payload, std::time::Duration::from_secs(2)).await?; + + Ok(()) + } + + pub(super) async fn hydra_utxo_count(&self, hydra_api_port: u16) -> Result { + use anyhow::Context; + + let url = format!("http://127.0.0.1:{}/snapshot/utxo", hydra_api_port); + + let v: Value = reqwest::Client::new() + .get(&url) + .send() + .await? + .error_for_status()? + .json() + .await + .context("snapshot/utxo: failed to decode JSON")?; + + v.as_object() + .context("snapshot/utxo: expected top-level JSON object")? + .len() + .try_into() + .context("utxo length does not fit into u64 (?)") + } +} + +/// Reads a JSON file from disk. +pub fn read_json_file(path: &Path) -> Result { + let contents = std::fs::read_to_string(path)?; + let json: serde_json::Value = serde_json::from_str(&contents)?; + Ok(json) +} + +/// Writes `json` to `path` (pretty-printed) **only if** the JSON content differs +/// from what is already on disk. Returns `true` if the file was written. +pub fn write_json_if_changed(path: &Path, json: &serde_json::Value) -> Result { + use std::fs::File; + use std::io::Write; + + if path.exists() { + if let Ok(existing_str) = std::fs::read_to_string(path) { + if let Ok(existing_json) = serde_json::from_str::(&existing_str) { + if existing_json == *json { + return Ok(false); + } + } + } + } + + if let Some(parent) = path.parent() { + if !parent.as_os_str().is_empty() { + std::fs::create_dir_all(parent)?; + } + } + + let mut file = File::create(path)?; + serde_json::to_writer_pretty(&mut file, json)?; + file.write_all(b"\n")?; + + Ok(true) +} + +/// Finds a free port by bind to port 0, to let the OS pick a free port. +pub async fn find_free_tcp_port() -> std::io::Result { + let listener = tokio::net::TcpListener::bind(("127.0.0.1", 0)).await?; + let port = listener.local_addr()?.port(); + drop(listener); + Ok(port) +} + +/// Returns `Ok(true)` if `port` can be bound on 127.0.0.1 (so it's free), +/// `Ok(false)` if it's already in use, and `Err(_)` for other IO errors. +pub async fn is_tcp_port_free(port: u16) -> std::io::Result { + match tokio::net::TcpListener::bind(("127.0.0.1", port)).await { + Ok(listener) => { + drop(listener); + Ok(true) + }, + Err(e) if e.kind() == std::io::ErrorKind::AddrInUse => Ok(false), + Err(e) => Err(e), + } +} + +/// Checks if a Prometheus `metric` at `url` is greater or equal to `threshold`. +pub async fn prometheus_metric_at_least(url: &str, metric: &str, threshold: f64) -> Result { + let client = reqwest::Client::new(); + let body = client + .get(url) + .send() + .await? + .error_for_status()? + .text() + .await?; + + let mut found_any = false; + let mut max_value: Option = None; + + for line in body.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + + // {labels} [timestamp] + let mut parts = line.split_whitespace(); + let name_and_labels = match parts.next() { + Some(x) => x, + None => continue, + }; + + let name = name_and_labels.split('{').next().unwrap_or(name_and_labels); + if name != metric { + continue; + } + + let value_str = match parts.next() { + Some(v) => v, + None => continue, + }; + + let value: f64 = value_str.parse()?; + found_any = true; + max_value = Some(max_value.map_or(value, |m| m.max(value))); + } + + if !found_any { + return Err(anyhow!("metric {metric} not found in /metrics output")); + } + + Ok(max_value.unwrap_or(f64::NEG_INFINITY) >= threshold) +} + +/// Sends a single WebSocket message, and waits a bit before closing the +/// connection cleanly. Particularly useful for Hydra. +pub async fn send_one_websocket_msg( + url: &str, + payload: serde_json::Value, + wait_before_close: std::time::Duration, +) -> Result<()> { + use futures_util::{SinkExt, StreamExt}; + use tokio_tungstenite::{connect_async, tungstenite::protocol::Message}; + + let (ws_stream, _resp) = connect_async(url).await?; + let (mut write, mut read) = ws_stream.split(); + + write.send(Message::Text(payload.to_string())).await?; + + tokio::time::sleep(wait_before_close).await; + + write.send(Message::Close(None)).await?; + + // Drain until we observe the close handshake (or the peer drops): + while let Some(msg) = read.next().await { + match msg? { + Message::Close(_) => break, + Message::Text(msg) => { + tracing::info!("hydra-controller: got WebSocket message: {}", msg) + }, + msg => tracing::info!("hydra-controller: got WebSocket message: {:?}", msg), + } + } + + Ok(()) +} + +pub async fn fetch_head_tag(hydra_api_port: u16) -> Result { + let url = format!("http://127.0.0.1:{}/head", hydra_api_port); + + let v: serde_json::Value = reqwest::get(url).await?.error_for_status()?.json().await?; + + v.get("tag") + .ok_or(anyhow!("missing tag")) + .and_then(|a| a.as_str().ok_or(anyhow!("tag is not a string"))) + .map(|a| a.to_string()) +} + +/// Parse the first JSON value from e.g. stdout, and return the remainder. +fn parse_first_json_and_rest(stdout: &[u8]) -> Result<(serde_json::Value, Vec)> { + let mut start = stdout + .iter() + .position(|b| !b.is_ascii_whitespace()) + .unwrap_or(0); + + if !matches!(stdout.get(start), Some(b'{') | Some(b'[')) { + if let Some(i) = stdout.iter().position(|&b| b == b'{' || b == b'[') { + start = i; + } + } + + let mut it = serde_json::Deserializer::from_slice(&stdout[start..]).into_iter::(); + + let first = it + .next() + .ok_or_else(|| anyhow!("no JSON value found in stdout"))? + .map_err(|e| anyhow!("failed to parse first JSON value from stdout: {e}"))?; + + let consumed = it.byte_offset(); // <-- works here + let rest = stdout[start + consumed..].to_vec(); + + Ok((first, rest)) +} + +#[cfg(unix)] +pub fn sigterm(pid: u32) -> Result<()> { + use nix::sys::signal::{Signal, kill}; + use nix::unistd::Pid; + Ok(kill(Pid::from_raw(pid as i32), Signal::SIGTERM)?) +} + +#[cfg(windows)] +pub fn sigterm(pid: u32) -> Result<()> { + unreachable!() +} + +/// We use it for `localhost` tests, to detect if the Gateway and Platform are +/// running on the same host. Then we cannot set up a +/// `[crate::hydra::tunnel2::Tunnel]`, because the ports are already taken. +pub fn hashed_machine_id() -> String { + const MACHINE_ID_NAMESPACE: &str = "blockfrost.machine-id.v1"; + + let mut hasher = blake3::Hasher::new(); + hasher.update(MACHINE_ID_NAMESPACE.as_bytes()); + hasher.update(b":"); + + match machine_uid::get() { + Ok(id) => { + hasher.update(id.as_bytes()); + }, + Err(e) => { + tracing::warn!(error = ?e, "machine_uid::get() failed; falling back to random bytes"); + let mut fallback = [0u8; 32]; + getrandom::fill(&mut fallback) + .expect("getrandom::fill shouldn’t fail in normal circumstances"); + hasher.update(&fallback); + }, + } + + hasher.finalize().to_hex().to_string() +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + #[test] + fn parse_first_json_and_rest() -> Result<()> { + let input = r#" +{"cborHex":"84a300d9010"} +blah blah +"#; + let (obj, rest) = super::parse_first_json_and_rest(String::from(input).as_bytes())?; + assert_eq!(obj, serde_json::json!({"cborHex":"84a300d9010"})); + assert_eq!(rest, String::from("\nblah blah\n").as_bytes()); + Ok(()) + } +} diff --git a/src/lib.rs b/src/lib.rs index c8b77ab..ae37612 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,12 @@ +pub mod api; pub mod blockfrost; +pub mod config; +pub mod db; pub mod errors; +pub mod find_libexec; +pub mod hydra; pub mod load_balancer; +pub mod models; +pub mod payload; +pub mod schema; +pub mod types; diff --git a/src/load_balancer.rs b/src/load_balancer.rs index e1c725e..c82b63f 100644 --- a/src/load_balancer.rs +++ b/src/load_balancer.rs @@ -1,10 +1,12 @@ -use crate::blockfrost::AssetName; use crate::errors::APIError; +use crate::hydra; +use crate::types::AssetName; use serde::{Deserialize, Serialize}; use std::collections::HashMap; -use std::sync::{atomic, Arc}; -use tokio::sync::{mpsc, oneshot, Mutex}; +use std::sync::{Arc, atomic}; +use tokio::sync::{Mutex, mpsc, oneshot}; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use tracing::{error, info, warn}; use uuid::Uuid; @@ -53,14 +55,19 @@ pub enum JsonRequestMethod { #[derive(Serialize, Deserialize, Debug)] pub enum LoadBalancerMessage { Request(JsonRequest), + HydraKExResponse(hydra::KeyExchangeResponse), + HydraTunnel(hydra::tunnel2::TunnelMsg), Ping(u64), Pong(u64), + Error { code: u64, msg: String }, } /// The WebSocket messages that we receive. #[derive(Serialize, Deserialize, Debug)] pub enum RelayMessage { Response(JsonResponse), + HydraKExRequest(hydra::KeyExchangeRequest), + HydraTunnel(hydra::tunnel2::TunnelMsg), Ping(u64), Pong(u64), } @@ -70,11 +77,13 @@ pub struct LoadBalancerState { pub access_tokens: Arc>>, pub active_relays: Arc>>, pub background_worker: Arc>, + pub hydras: Option, } #[derive(Debug)] pub struct AccessTokenState { pub name: AssetName, + pub reward_addr: String, pub api_prefix: Uuid, pub expires: std::time::Instant, } @@ -102,7 +111,7 @@ pub struct RequestState { } impl LoadBalancerState { - pub async fn new() -> LoadBalancerState { + pub async fn new(hydras: Option) -> LoadBalancerState { let access_tokens = Arc::new(Mutex::new(HashMap::new())); let active_relays = Arc::new(Mutex::new(HashMap::new())); let background_worker = Arc::new(tokio::spawn(Self::clean_up_expired_tokens_periodically( @@ -113,16 +122,23 @@ impl LoadBalancerState { access_tokens, active_relays, background_worker, + hydras, } } - pub async fn new_access_token(&self, name: AssetName, api_prefix: Uuid) -> AccessToken { + pub async fn new_access_token( + &self, + name: AssetName, + api_prefix: Uuid, + reward_addr: &str, + ) -> AccessToken { let expires = std::time::Instant::now() + ACCESS_TOKEN_TIMEOUT; let token = random_token(); self.access_tokens.lock().await.insert( token.clone(), AccessTokenState { name, + reward_addr: reward_addr.to_string(), api_prefix, expires, }, @@ -144,14 +160,18 @@ impl LoadBalancerState { Ok(state) } - async fn clean_up_expired_tokens_periodically(access_tokens: Arc>>) { + async fn clean_up_expired_tokens_periodically( + access_tokens: Arc>>, + ) { loop { tokio::time::sleep(std::time::Duration::from_secs(60)).await; Self::clean_up_expired_tokens(&access_tokens).await; } } - async fn clean_up_expired_tokens(access_tokens: &Arc>>) { + async fn clean_up_expired_tokens( + access_tokens: &Arc>>, + ) { let now = std::time::Instant::now(); access_tokens.lock().await.retain(|_, state| { @@ -179,7 +199,7 @@ impl Drop for LoadBalancerState { /// Generates a random Base64-encoded string. Used for generating access tokens. pub fn random_token() -> AccessToken { - use base64::{engine::general_purpose, Engine as _}; + use base64::{Engine as _, engine::general_purpose}; use rand::RngCore; let mut bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut bytes); @@ -192,10 +212,10 @@ pub mod api { use super::*; use crate::errors::APIError; use axum::{ + Extension, extract::{Path, Request, WebSocketUpgrade}, http::{HeaderMap, StatusCode}, response::IntoResponse, - Extension, }; use tokio::sync::oneshot; use uuid::Uuid; @@ -247,7 +267,9 @@ pub mod api { /// This route shows some stats about all relays connected with a WebSocket, /// and their RTT (round-trip time). - pub async fn stats_route(Extension(load_balancer): Extension) -> impl IntoResponse { + pub async fn stats_route( + Extension(load_balancer): Extension, + ) -> impl IntoResponse { let mut rv: HashMap = HashMap::new(); let now_chrono = chrono::Utc::now(); let now_instant = std::time::Instant::now(); @@ -257,11 +279,18 @@ pub mod api { relay_state.name.clone(), RelayStats { api_prefix: *api_prefix, - network_rtt_seconds: relay_state.network_rtt.lock().await.map(|a| a.as_secs_f64()), + network_rtt_seconds: relay_state + .network_rtt + .lock() + .await + .map(|a| a.as_secs_f64()), connected_since: now_chrono - (now_instant - relay_state.connected_since), requests_sent: relay_state.requests_sent.load(atomic::Ordering::SeqCst), - responses_received: relay_state.responses_received.load(atomic::Ordering::SeqCst), - requests_in_progress: relay_state.requests_in_progress.lock().await.len() as u64, + responses_received: relay_state + .responses_received + .load(atomic::Ordering::SeqCst), + requests_in_progress: relay_state.requests_in_progress.lock().await.len() + as u64, }, ); } @@ -288,18 +317,19 @@ pub mod api { ) })?; - let (new_request_channel, relay_name): (mpsc::Sender, AssetName) = load_balancer - .active_relays - .lock() - .await - .get(&api_prefix) - .ok_or_else(|| { - ( - StatusCode::NOT_FOUND, - format!("relay {} not found for request: {}", api_prefix, rest), - ) - }) - .map(|rs| (rs.new_request_channel.clone(), rs.name.clone()))?; + let (new_request_channel, relay_name): (mpsc::Sender, AssetName) = + load_balancer + .active_relays + .lock() + .await + .get(&api_prefix) + .ok_or_else(|| { + ( + StatusCode::NOT_FOUND, + format!("relay {} not found for request: {}", api_prefix, rest), + ) + }) + .map(|rs| (rs.new_request_channel.clone(), rs.name.clone()))?; let json_req = request_to_json(req, rest.clone(), &relay_name).await?; @@ -359,10 +389,6 @@ pub mod event_loop { use super::*; use axum::extract::ws::{Message, WebSocket}; use axum::http::StatusCode; - use futures_util::{ - sink::SinkExt, - stream::{SplitSink, StreamExt}, - }; /// For clarity, let’s have a single connection 'event_loop per WebSocket /// connection, with the following events: @@ -374,8 +400,13 @@ pub mod event_loop { } /// Top-level logic of a single WebSocket connection with a relay. - pub async fn run(load_balancer: LoadBalancerState, token_state: AccessTokenState, socket: WebSocket) { + pub async fn run( + load_balancer: LoadBalancerState, + token_state: AccessTokenState, + socket: WebSocket, + ) { let asset_name = &token_state.name; + let reward_addr = token_state.reward_addr.clone(); // Allow only 1 connection per NFT: disconnect_existing_sessions_of(&token_state, &load_balancer).await; @@ -388,7 +419,8 @@ pub mod event_loop { let (event_tx, mut event_rx) = mpsc::channel::(64); let (request_tx, request_task) = wire_requests(event_tx.clone()).await; let (finish_tx, finish_task) = wire_do_finish(event_tx.clone()).await; - let (mut socket_tx, response_task) = wire_responses(event_tx.clone(), socket, asset_name).await; + let (socket_tx, response_task, arbitrary_msg_task) = + wire_responses(event_tx.clone(), socket, asset_name).await; let relay_state = RelayState { name: token_state.name.clone(), @@ -431,6 +463,13 @@ pub mod event_loop { let mut last_ping_id: u64 = 0; let mut disconnection_reason = None; + let mut initial_hydra_kex: Option<(hydra::KeyExchangeRequest, hydra::KeyExchangeResponse)> = + None; + let mut hydra_controller: Option = None; + + let tunnel_cancellation = CancellationToken::new(); + let mut tunnel_controller: Option = None; + // The actual connection event loop: 'event_loop: while let Some(msg) = event_rx.recv().await { match msg { @@ -440,7 +479,7 @@ pub mod event_loop { }, LBEvent::NewRequest(request) => { - if pass_on_request(request, &relay_state, asset_name, &mut socket_tx) + if pass_on_request(request, &relay_state, asset_name, &socket_tx) .await .is_err() { @@ -448,18 +487,122 @@ pub mod event_loop { } }, + LBEvent::NewRelayMessage(RelayMessage::HydraTunnel(tun_msg)) => { + if let Some(tunnel_ctl) = &tunnel_controller { + match tunnel_ctl.on_msg(tun_msg).await { + Ok(()) => (), + Err(err) => error!( + "hydra-tunnel: got an error when passing message through WebSocket: {err}; ignoring" + ), + } + } + }, + + LBEvent::NewRelayMessage(RelayMessage::HydraKExRequest(req)) => { + let already_exists = match &hydra_controller { + None => false, + Some(ctl) => ctl.is_alive(), + }; + + let reply = match ( + already_exists, + &load_balancer.hydras, + &req.accepted_platform_h2h_port, + initial_hydra_kex.take(), + ) { + (true, _, _, _) => LoadBalancerMessage::Error { + code: 538, + msg: "Hydra controller already exists on this connection".to_string(), + }, + (false, None, _, _) => LoadBalancerMessage::Error { + code: 536, + msg: "Hydra micropayments not supported".to_string(), + }, + (false, Some(hydras), Some(_accepted_port), Some(initial_kex)) => { + let platform_machine_id = req.machine_id.clone(); + match hydras + .spawn_new(asset_name, &reward_addr, initial_kex, req) + .await + { + Ok((ctl, resp)) => { + hydra_controller = Some(ctl); + + // Only start the TCP-over-WebSocket tunnels if we’re running + // on different machines: + if platform_machine_id != resp.machine_id { + let (tunnel_ctl, mut tunnel_rx) = + hydra::tunnel2::Tunnel::new( + hydra::tunnel2::TunnelConfig { + expose_port: resp.gateway_h2h_port, + id_prefix_bit: true, + ..(hydra::tunnel2::TunnelConfig::default()) + }, + tunnel_cancellation.clone(), + ); + + tunnel_ctl.spawn_listener(resp.proposed_platform_h2h_port).await.expect("FIXME: this really shouldn’t fail, unless we hit the TOCTOU race condition…"); + + let socket_tx_ = socket_tx.clone(); + let asset_name_ = asset_name.clone(); + tokio::spawn(async move { + while let Some(tun_msg) = tunnel_rx.recv().await { + if send_json_msg( + &socket_tx_, + &LoadBalancerMessage::HydraTunnel(tun_msg), + &asset_name_, + ) + .await + .is_err() + { + break; + } + } + }); + + tunnel_controller = Some(tunnel_ctl); + } + + LoadBalancerMessage::HydraKExResponse(resp) + }, + Err(err) => LoadBalancerMessage::Error { + code: 537, + msg: format!("Hydra micropayments setup error: {err}"), + }, + } + }, + (false, Some(hydras), _, _) => { + match hydras + .initialize_key_exchange(asset_name, req.clone()) + .await + { + Ok(resp) => { + initial_hydra_kex = Some((req, resp.clone())); + LoadBalancerMessage::HydraKExResponse(resp) + }, + Err(err) => LoadBalancerMessage::Error { + code: 537, + msg: format!("Hydra micropayments setup error: {err}"), + }, + } + }, + }; + + if send_json_msg(&socket_tx, &reply, asset_name).await.is_err() { + break 'event_loop; + } + }, + LBEvent::NewRelayMessage(RelayMessage::Response(response)) => { + if let Some(ctl) = &hydra_controller { + ctl.account_one_request().await + } pass_on_response(response, &relay_state, asset_name).await; }, LBEvent::NewRelayMessage(RelayMessage::Ping(ping_id)) => { - if send_json_msg( - &mut socket_tx, - &LoadBalancerMessage::Pong(ping_id), - asset_name, - ) - .await - .is_err() + if send_json_msg(&socket_tx, &LoadBalancerMessage::Pong(ping_id), asset_name) + .await + .is_err() { break 'event_loop; } @@ -487,7 +630,7 @@ pub mod event_loop { last_ping_id += 1; last_ping_sent_at = Some(std::time::Instant::now()); if send_json_msg( - &mut socket_tx, + &socket_tx, &LoadBalancerMessage::Ping(last_ping_id), asset_name, ) @@ -501,7 +644,15 @@ pub mod event_loop { } } - let disconnection_reason_ = disconnection_reason.clone().unwrap_or("reason unknown".to_string()); + if let Some(ctl) = hydra_controller { + ctl.terminate().await + } + + tunnel_cancellation.cancel(); + + let disconnection_reason_ = disconnection_reason + .clone() + .unwrap_or("reason unknown".to_string()); warn!( "load balancer: {}: connection event loop finished: {}", @@ -520,7 +671,11 @@ pub mod event_loop { // Stop ingress of new requests to this already broken connection by // deleting its producer (`request_tx`) from the `LoadBalancerState`: - load_balancer.active_relays.lock().await.remove(&token_state.api_prefix); + load_balancer + .active_relays + .lock() + .await + .remove(&token_state.api_prefix); // Fail all remaining requests for this relay that possibly are still on // the channel after `break 'event_loop`. @@ -566,7 +721,13 @@ pub mod event_loop { } // Wait for all children to finish: - let children = [request_task, finish_task, response_task, clean_up_task]; + let children = [ + request_task, + finish_task, + response_task, + arbitrary_msg_task, + clean_up_task, + ]; children.iter().for_each(|t| t.abort()); futures::future::join_all(children).await; @@ -574,7 +735,10 @@ pub mod event_loop { } /// We currently want to allow only a single connection per NFT: - async fn disconnect_existing_sessions_of(token_state: &AccessTokenState, load_balancer: &LoadBalancerState) { + async fn disconnect_existing_sessions_of( + token_state: &AccessTokenState, + load_balancer: &LoadBalancerState, + ) { let mut other_do_finish_tx: Vec> = Vec::with_capacity(1); load_balancer .active_relays @@ -636,7 +800,9 @@ pub mod event_loop { } /// Wire HTTP requests to the connection 'event_loop: - async fn wire_requests(event_tx: mpsc::Sender) -> (mpsc::Sender, JoinHandle<()>) { + async fn wire_requests( + event_tx: mpsc::Sender, + ) -> (mpsc::Sender, JoinHandle<()>) { let (tx, mut rx) = mpsc::channel(64); let task = tokio::spawn(async move { while let Some(msg) = rx.recv().await { @@ -649,7 +815,9 @@ pub mod event_loop { } /// Wire `do_finish` signals to the connection 'event_loop: - async fn wire_do_finish(event_tx: mpsc::Sender) -> (mpsc::Sender, JoinHandle<()>) { + async fn wire_do_finish( + event_tx: mpsc::Sender, + ) -> (mpsc::Sender, JoinHandle<()>) { let (tx, mut rx) = mpsc::channel(64); let task = tokio::spawn(async move { while let Some(msg) = rx.recv().await { @@ -666,11 +834,13 @@ pub mod event_loop { event_tx: mpsc::Sender, socket: WebSocket, asset_name: &AssetName, - ) -> (SplitSink, JoinHandle<()>) { - let (tx, mut rx) = socket.split(); - let asset_name = asset_name.clone(); - let task = tokio::spawn(async move { - while let Some(Ok(msg)) = rx.next().await { + ) -> (mpsc::Sender, JoinHandle<()>, JoinHandle<()>) { + use futures_util::{sink::SinkExt, stream::StreamExt}; + let (msg_tx, mut msg_rx) = mpsc::channel::(64); + let (mut sock_tx, mut sock_rx) = socket.split(); + let asset_name_ = asset_name.clone(); + let response_task = tokio::spawn(async move { + while let Some(Ok(msg)) = sock_rx.next().await { match msg { Message::Text(text) => { match serde_json::from_str::(&text) { @@ -681,7 +851,7 @@ pub mod event_loop { }, Err(err) => warn!( "load balancer: {}: received unparsable text message: {:?}: {:?}", - asset_name.as_str(), + asset_name_.as_str(), text, err, ), @@ -690,31 +860,50 @@ pub mod event_loop { Message::Binary(bin) => { warn!( "load balancer: {}: received unexpected binary message: {:?}", - asset_name.as_str(), + asset_name_.as_str(), hex::encode(bin), ); }, Message::Close(frame) => { warn!( "load balancer: {}: relay disconnected (CloseFrame: {:?})", - asset_name.as_str(), + asset_name_.as_str(), frame, ); - let _ignored_failure: Result<_, _> = - event_tx.send(LBEvent::Finish("relay disconnected".to_string())).await; + let _ignored_failure: Result<_, _> = event_tx + .send(LBEvent::Finish("relay disconnected".to_string())) + .await; break; }, Message::Ping(_) | Message::Pong(_) => {}, } } }); - (tx, task) + let asset_name_ = asset_name.clone(); + let arbitrary_msg_task = tokio::spawn(async move { + while let Some(msg) = msg_rx.recv().await { + match sock_tx.send(msg).await { + Ok(()) => (), + Err(err) => { + error!( + "load balancer: {}: error when sending a message: {:?}", + asset_name_.as_str(), + err + ); + // Something wrong with the socket, let’s break the 'event_loop + // (eventually, by closing `msg_rx`): + break; + }, + } + } + }); + (msg_tx, response_task, arbitrary_msg_task) } /// Sends a JSON message to a WebSocket. `Err(_)` is returned when you /// need to break the 'event_loop, because the connection is already broken. async fn send_json_msg( - socket_tx: &mut SplitSink, + socket_tx: &mpsc::Sender, msg: &J, asset_name: &AssetName, ) -> Result<(), String> @@ -727,7 +916,7 @@ pub mod event_loop { Ok(_) => Ok(()), Err(err) => { error!( - "load balancer: {}: error when sending a Pong: {:?}", + "load balancer: {}: error when sending a message: {:?}", asset_name.as_str(), err ); @@ -750,12 +939,23 @@ pub mod event_loop { } /// Passes a WebSocket response on to the original HTTP requester. - async fn pass_on_response(response: JsonResponse, relay_state: &RelayState, asset_name: &AssetName) { + async fn pass_on_response( + response: JsonResponse, + relay_state: &RelayState, + asset_name: &AssetName, + ) { let request_id = response.id.clone(); - match relay_state.requests_in_progress.lock().await.remove(&request_id) { + match relay_state + .requests_in_progress + .lock() + .await + .remove(&request_id) + { Some(request_state) => { - relay_state.responses_received.fetch_add(1, atomic::Ordering::SeqCst); + relay_state + .responses_received + .fetch_add(1, atomic::Ordering::SeqCst); match request_state.respond_to.send(response) { Ok(_) => (), Err(_) => warn!( @@ -779,7 +979,7 @@ pub mod event_loop { request: RequestState, relay_state: &RelayState, asset_name: &AssetName, - socket_tx: &mut SplitSink, + socket_tx: &mpsc::Sender, ) -> Result<(), String> { let request_id = request.underlying.id.clone(); let (request, json) = serialize_request(request); @@ -796,13 +996,20 @@ pub mod event_loop { match send_result { Ok(_) => { - relay_state.requests_sent.fetch_add(1, atomic::Ordering::SeqCst); + relay_state + .requests_sent + .fetch_add(1, atomic::Ordering::SeqCst); Ok(()) }, Err(err) => { let err = format!("error when sending request to relay: {:?}", err); - if let Some(request) = relay_state.requests_in_progress.lock().await.remove(&request_id) { + if let Some(request) = relay_state + .requests_in_progress + .lock() + .await + .remove(&request_id) + { fail_request(request, StatusCode::BAD_REQUEST, &err, asset_name).await; } @@ -813,7 +1020,12 @@ pub mod event_loop { } /// Returns a failure to the HTTP client of a given [`RequestState`]. - async fn fail_request(request: RequestState, code: StatusCode, why: &str, asset_name: &AssetName) { + async fn fail_request( + request: RequestState, + code: StatusCode, + why: &str, + asset_name: &AssetName, + ) { let request_id = request.underlying.id.clone(); error!( "load balancer: {}: failing request with {}: {}: {:?}", @@ -869,19 +1081,21 @@ async fn request_to_json( .collect(); let body = request.into_body(); - let body_bytes = axum::body::to_bytes(body, MAX_BODY_BYTES).await.map_err(|err| { - ( - StatusCode::BAD_REQUEST, - format!( - "failed to read body bytes for request to {}: {}: {:?}", - relay_name.as_str(), - path_override, - err - ), - ) - })?; + let body_bytes = axum::body::to_bytes(body, MAX_BODY_BYTES) + .await + .map_err(|err| { + ( + StatusCode::BAD_REQUEST, + format!( + "failed to read body bytes for request to {}: {}: {:?}", + relay_name.as_str(), + path_override, + err + ), + ) + })?; - use base64::{engine::general_purpose, Engine as _}; + use base64::{Engine as _, engine::general_purpose}; let body_base64 = general_purpose::STANDARD.encode(body_bytes); Ok(JsonRequest { @@ -906,17 +1120,20 @@ async fn json_to_response( if json.body_base64.is_empty() { Body::empty() } else { - use base64::{engine::general_purpose, Engine as _}; - let body_bytes: Vec = general_purpose::STANDARD.decode(json.body_base64).map_err(|err| { - ( - StatusCode::BAD_GATEWAY, - format!( - "{}: Invalid base64 encoding of response body_base64: {}", - relay_name.as_str(), - err - ), - ) - })?; + use base64::{Engine as _, engine::general_purpose}; + let body_bytes: Vec = + general_purpose::STANDARD + .decode(json.body_base64) + .map_err(|err| { + ( + StatusCode::BAD_GATEWAY, + format!( + "{}: Invalid base64 encoding of response body_base64: {}", + relay_name.as_str(), + err + ), + ) + })?; Body::from(body_bytes) } }; @@ -955,7 +1172,7 @@ mod tests { #[tokio::test] async fn test_new_creates_empty_state() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let tokens = lb.access_tokens.lock().await; assert!(tokens.is_empty()); @@ -966,10 +1183,10 @@ mod tests { #[tokio::test] async fn test_new_access_token_register() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let name = AssetName("x-asset-x".to_string()); let prefix = Uuid::new_v4(); - let token = lb.new_access_token(name.clone(), prefix).await; + let token = lb.new_access_token(name.clone(), prefix, "addr1…").await; let state = lb.register(&token.0).await.expect("should register"); assert_eq!(state.name, name); @@ -982,14 +1199,14 @@ mod tests { #[tokio::test] async fn test_register_invalid_token() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let res = lb.register("invalid").await; assert!(matches!(res, Err(APIError::Unauthorized()))); } #[tokio::test] async fn test_register_expired_token() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let name = AssetName("x-asset-x".to_string()); let prefix = Uuid::new_v4(); let token = random_token(); @@ -999,6 +1216,7 @@ mod tests { token.clone(), AccessTokenState { name, + reward_addr: "addr1…".to_string(), api_prefix: prefix, expires, }, @@ -1010,7 +1228,7 @@ mod tests { #[tokio::test] async fn test_clean_up_expired_tokens_logic() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let name = AssetName("x-asset-x".to_string()); let prefix = Uuid::new_v4(); @@ -1021,6 +1239,7 @@ mod tests { token_expired.clone(), AccessTokenState { name: name.clone(), + reward_addr: "addr1…".to_string(), api_prefix: prefix, expires: expires_expired, }, @@ -1034,6 +1253,7 @@ mod tests { token_valid.clone(), AccessTokenState { name, + reward_addr: "addr1…".to_string(), api_prefix: prefix, expires: expires_valid, }, diff --git a/src/main.rs b/src/main.rs index 924cada..721f4c3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,18 +1,10 @@ -mod api; -mod blockfrost; -mod config; -mod db; -mod errors; -mod load_balancer; -mod models; -mod payload; -mod schema; - +use anyhow::Result; use api::{register, root}; use axum::{ - routing::{get, post}, Extension, Router, + routing::{get, post}, }; +use blockfrost_gateway::{api, blockfrost, config, db, hydra, load_balancer}; use clap::Parser; use colored::Colorize; use config::{Args, Config}; @@ -21,7 +13,7 @@ use std::net::SocketAddr; use tracing_subscriber::fmt::format::Format; #[tokio::main] -async fn main() { +async fn main() -> Result<()> { dotenvy::dotenv().ok(); let arguments = Args::parse(); @@ -40,7 +32,12 @@ async fn main() { let pool = DB::new(&config.database.connection_string).await; let blockfrost_api = blockfrost::BlockfrostAPI::new(&config.blockfrost.project_id); - let load_balancer = load_balancer::LoadBalancerState::new().await; + let hydras_manager = if let Some(hydra) = &config.hydra { + Some(hydra::HydrasManager::new(hydra, &config.server.network).await?) + } else { + None + }; + let load_balancer = load_balancer::LoadBalancerState::new(hydras_manager).await; let app = Router::new() .route("/", get(root::route)) @@ -88,4 +85,6 @@ async fn main() { eprintln!("Server error: {}", e); std::process::exit(1); }); + + Ok(()) } diff --git a/src/payload.rs b/src/payload.rs index ac0b390..669236d 100644 --- a/src/payload.rs +++ b/src/payload.rs @@ -116,13 +116,20 @@ mod tests { fn port_out_of_range_fails(mut valid_payload: Payload, #[case] port: i32) { valid_payload.port = port; - assert_validation_err_contains(valid_payload.validate(), "Port must be between 1 and 65535"); + assert_validation_err_contains( + valid_payload.validate(), + "Port must be between 1 and 65535", + ); } #[rstest] #[case("1234567", true)] // 7 -> error #[case("12345678", false)] // 8 -> ok - fn secret_min_length(mut valid_payload: Payload, #[case] secret: &str, #[case] expect_err: bool) { + fn secret_min_length( + mut valid_payload: Payload, + #[case] secret: &str, + #[case] expect_err: bool, + ) { valid_payload.secret = secret.into(); let res = valid_payload.validate(); diff --git a/src/types.rs b/src/types.rs new file mode 100644 index 0000000..c47a6c4 --- /dev/null +++ b/src/types.rs @@ -0,0 +1,41 @@ +use clap::ValueEnum; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, ValueEnum, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum Network { + Mainnet, + Preprod, + Preview, +} + +impl Network { + pub fn network_magic(&self) -> u64 { + match self { + Self::Mainnet => 764824073, + Self::Preprod => 1, + Self::Preview => 2, + } + } + + pub fn is_testnet(&self) -> bool { + *self != Self::Mainnet + } + + // FIXME: use serde? But it allocs + pub fn as_str(&self) -> &'static str { + match self { + Self::Mainnet => "mainnet", + Self::Preprod => "preprod", + Self::Preview => "preview", + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub struct AssetName(pub String); +impl AssetName { + pub fn as_str(&self) -> &str { + &self.0 + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index ede3b14..14dc58f 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,8 +1,8 @@ use axum::{ - routing::{any, get}, Extension, Router, + routing::{any, get}, }; -use blockfrost_gateway::load_balancer::{api, LoadBalancerState}; +use blockfrost_gateway::load_balancer::{LoadBalancerState, api}; use std::net::SocketAddr; use tokio::net::TcpListener; use tokio::task::JoinHandle; diff --git a/tests/load_balancer.rs b/tests/load_balancer.rs index 88d39ab..67e2629 100644 --- a/tests/load_balancer.rs +++ b/tests/load_balancer.rs @@ -3,17 +3,17 @@ use common::*; use base64::Engine; use blockfrost_gateway::{ - blockfrost::AssetName, load_balancer::{JsonResponse, LoadBalancerMessage, LoadBalancerState, RelayMessage}, + types::AssetName, }; use futures::{SinkExt, StreamExt}; use std::vec; -use tungstenite::{handshake::client::generate_key, Message}; +use tungstenite::{Message, handshake::client::generate_key}; use uuid::Uuid; #[tokio::test] async fn test_websocket_connection_invalid_token() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let router = build_router(lb.clone()).await; let (addr, server_handle) = start_server(router).await; @@ -34,11 +34,11 @@ async fn test_websocket_connection_invalid_token() { #[tokio::test] async fn test_websocket_request_response_flow() { - let lb = LoadBalancerState::new().await; + let lb = LoadBalancerState::new(None).await; let name = AssetName("test-asset".to_string()); let prefix = Uuid::new_v4(); - let token = lb.new_access_token(name.clone(), prefix).await; + let token = lb.new_access_token(name.clone(), prefix, "addr1…").await; let router = build_router(lb.clone()).await; let (addr, server_handle) = start_server(router).await; @@ -73,7 +73,8 @@ async fn test_websocket_request_response_flow() { id: json_req.id, code: 200, header: vec![], - body_base64: base64::engine::general_purpose::STANDARD.encode(b"test response"), + body_base64: base64::engine::general_purpose::STANDARD + .encode(b"test response"), }; let relay_msg = RelayMessage::Response(response);