diff --git a/Cargo.lock b/Cargo.lock index cfd3531c48..a313e8005a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -923,7 +923,7 @@ dependencies = [ "bitflags 2.9.1", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "proc-macro2 1.0.95", "quote", "regex", @@ -938,7 +938,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "568b6890865156d9043af490d4c4081c385dd68ea10acd6ca15733d511e6b51c" dependencies = [ - "hmac", + "hmac 0.12.1", "pbkdf2 0.12.2", "rand 0.8.5", "sha2 0.10.9", @@ -953,7 +953,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db40d3dfbeab4e031d78c844642fa0caa0b0db11ce1607ac9d2986dff1405c69" dependencies = [ "bs58", - "hmac", + "hmac 0.12.1", "k256", "once_cell", "pbkdf2 0.12.2", @@ -1585,7 +1585,7 @@ dependencies = [ "bs58", "coins-core", "digest 0.10.7", - "hmac", + "hmac 0.12.1", "k256", "serde", "sha2 0.10.9", @@ -1600,7 +1600,7 @@ checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" dependencies = [ "bitvec", "coins-bip32", - "hmac", + "hmac 0.12.1", "once_cell", "pbkdf2 0.12.2", "rand 0.8.5", @@ -1888,6 +1888,22 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-mac" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "cryptoxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "382ce8820a5bb815055d3553a610e8cb542b2d767bbacea99038afda96cd760d" + [[package]] name = "ct-codecs" version = "1.1.6" @@ -1903,6 +1919,19 @@ dependencies = [ "cipher 0.4.4", ] +[[package]] +name = "curve25519-dalek" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle", + "zeroize", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -2337,10 +2366,19 @@ dependencies = [ "elliptic-curve", "rfc6979", "serdect", - "signature", + "signature 2.2.0", "spki", ] +[[package]] +name = "ed25519" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +dependencies = [ + "signature 1.6.4", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -2349,7 +2387,7 @@ checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", "serde", - "signature", + "signature 2.2.0", ] [[package]] @@ -2367,14 +2405,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.2.0", + "ed25519 1.5.3", + "sha2 0.9.9", + "zeroize", +] + [[package]] name = "ed25519-dalek" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek", - "ed25519", + "curve25519-dalek 4.1.3", + "ed25519 2.2.3", "rand_core 0.6.4", "serde", "sha2 0.10.9", @@ -2389,8 +2439,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b49a684b133c4980d7ee783936af771516011c8cd15f429dbda77245e282f03" dependencies = [ "derivation-path", - "ed25519-dalek", - "hmac", + "ed25519-dalek 2.1.1", + "hmac 0.12.1", "sha2 0.10.9", ] @@ -2564,7 +2614,7 @@ dependencies = [ "ctr", "digest 0.10.7", "hex", - "hmac", + "hmac 0.12.1", "pbkdf2 0.11.0", "rand 0.8.5", "scrypt", @@ -3627,6 +3677,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" +[[package]] +name = "hmac" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deae6d9dbb35ec2c502d62b8f7b1c000a0822c3b0794ba36b3149c0a1c840dff" +dependencies = [ + "crypto-mac", + "digest 0.9.0", +] + [[package]] name = "hmac" version = "0.12.1" @@ -4499,14 +4559,15 @@ dependencies = [ "async-stream", "bech32 0.9.1", "bitcoin", + "blake2", "bs58", "byte-unit", "bytes", "crossbeam-channel", "digest 0.10.7", "dirs-next", - "ed25519", - "ed25519-dalek", + "ed25519 2.2.3", + "ed25519-dalek 2.1.1", "ed25519-dalek-bip32", "env_logger", "flex-error", @@ -4526,6 +4587,8 @@ dependencies = [ "num-bigint", "num-rational", "once_cell", + "pallas-codec", + "pallas-primitives", "pbjson-types", "penumbra-sdk-custody", "penumbra-sdk-fee", @@ -4536,6 +4599,7 @@ dependencies = [ "penumbra-sdk-view", "penumbra-sdk-wallet", "prost", + "prost-types", "regex", "reqwest 0.11.27", "retry", @@ -4548,7 +4612,8 @@ dependencies = [ "serde_json", "serial_test", "sha2 0.10.9", - "signature", + "signature 2.2.0", + "slip10", "strum 0.25.0", "subtle-encoding", "tendermint", @@ -5449,7 +5514,7 @@ dependencies = [ "once_cell", "serdect", "sha2 0.10.9", - "signature", + "signature 2.2.0", ] [[package]] @@ -5787,6 +5852,27 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minicbor" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d15f4203d71fdf90903c2696e55426ac97a363c67b218488a73b534ce7aca10" +dependencies = [ + "half", + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1154809406efdb7982841adb6311b3d095b46f78342dd646736122fe6b19e267" +dependencies = [ + "proc-macro2 1.0.95", + "quote", + "syn 1.0.109", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5956,7 +6042,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dfd77f274636f722e966c394b381a70233ed4c25150864a4c53d398028a6818" dependencies = [ "base58", - "hmac", + "hmac 0.12.1", "k256", "memzero", "sha2 0.10.9", @@ -6988,6 +7074,48 @@ dependencies = [ "group", ] +[[package]] +name = "pallas-codec" +version = "0.30.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "747279d1bc612986035619a3eaded8f9f4ceae29668aa7a5feae83681a0e93f4" +dependencies = [ + "hex", + "minicbor", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "pallas-crypto" +version = "0.30.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6f8b08e32c7dbb50302222701ae15ef9ac1a7cc39225ce29c253f6ddab2aa7" +dependencies = [ + "cryptoxide", + "hex", + "pallas-codec", + "rand_core 0.6.4", + "serde", + "thiserror 1.0.69", +] + +[[package]] +name = "pallas-primitives" +version = "0.30.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24929d461308626183d5bf15290e6315f4cc67fa38a1a66469425919683cceb2" +dependencies = [ + "base58", + "bech32 0.9.1", + "hex", + "log", + "pallas-codec", + "pallas-crypto", + "serde", + "serde_json", +] + [[package]] name = "parity-scale-codec" version = "3.7.5" @@ -7139,7 +7267,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ "digest 0.10.7", - "hmac", + "hmac 0.12.1", "password-hash 0.4.2", "sha2 0.10.9", ] @@ -7151,7 +7279,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", - "hmac", + "hmac 0.12.1", "password-hash 0.5.0", ] @@ -7700,7 +7828,7 @@ dependencies = [ "ethnum", "f4jumble", "hex", - "hmac", + "hmac 0.12.1", "ibig", "num-bigint", "once_cell", @@ -8731,6 +8859,12 @@ dependencies = [ "rand_core 0.9.3", ] +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" + [[package]] name = "rand_core" version = "0.6.4" @@ -8976,7 +9110,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "hmac", + "hmac 0.12.1", "subtle", ] @@ -9484,7 +9618,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" dependencies = [ - "hmac", + "hmac 0.12.1", "pbkdf2 0.11.0", "salsa20", "sha2 0.10.9", @@ -9929,6 +10063,12 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" + [[package]] name = "signature" version = "2.2.0" @@ -9985,6 +10125,17 @@ version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +[[package]] +name = "slip10" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28724a6e6f70b0cb115c580891483da6f3aa99e6a353598303a57f89d23aa6bc" +dependencies = [ + "ed25519-dalek 1.0.1", + "hmac 0.9.0", + "sha2 0.9.9", +] + [[package]] name = "slip10_ed25519" version = "0.1.3" @@ -10336,7 +10487,7 @@ checksum = "fc997743ecfd4864bbca8170d68d9b2bee24653b034210752c2d883ef4b838b1" dependencies = [ "bytes", "digest 0.10.7", - "ed25519", + "ed25519 2.2.3", "ed25519-consensus", "flex-error", "futures", @@ -10350,7 +10501,7 @@ dependencies = [ "serde_json", "serde_repr", "sha2 0.10.9", - "signature", + "signature 2.2.0", "subtle", "subtle-encoding", "tendermint-proto", @@ -10667,7 +10818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" dependencies = [ "anyhow", - "hmac", + "hmac 0.12.1", "once_cell", "pbkdf2 0.11.0", "rand 0.8.5", @@ -12235,7 +12386,7 @@ dependencies = [ "crc32fast", "crossbeam-utils", "flate2", - "hmac", + "hmac 0.12.1", "pbkdf2 0.11.0", "sha1", "time", diff --git a/README.md b/README.md index 9d8f0d496c..424ad3775f 100644 --- a/README.md +++ b/README.md @@ -135,3 +135,49 @@ Unless required by applicable law or agreed to in writing, software distributed [cosmos-shield]: https://img.shields.io/static/v1?label=&labelColor=1B1E36&color=1B1E36&message=cosmos%20ecosystem&style=for-the-badge&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4KPCEtLSBHZW5lcmF0b3I6IEFkb2JlIElsbHVzdHJhdG9yIDI0LjMuMCwgU1ZHIEV4cG9ydCBQbHVnLUluIC4gU1ZHIFZlcnNpb246IDYuMDAgQnVpbGQgMCkgIC0tPgo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4IgoJIHZpZXdCb3g9IjAgMCAyNTAwIDI1MDAiIHN0eWxlPSJlbmFibGUtYmFja2dyb3VuZDpuZXcgMCAwIDI1MDAgMjUwMDsiIHhtbDpzcGFjZT0icHJlc2VydmUiPgo8c3R5bGUgdHlwZT0idGV4dC9jc3MiPgoJLnN0MHtmaWxsOiM2RjczOTA7fQoJLnN0MXtmaWxsOiNCN0I5Qzg7fQo8L3N0eWxlPgo8cGF0aCBjbGFzcz0ic3QwIiBkPSJNMTI1Mi42LDE1OS41Yy0xMzQuOSwwLTI0NC4zLDQ4OS40LTI0NC4zLDEwOTMuMXMxMDkuNCwxMDkzLjEsMjQ0LjMsMTA5My4xczI0NC4zLTQ4OS40LDI0NC4zLTEwOTMuMQoJUzEzODcuNSwxNTkuNSwxMjUyLjYsMTU5LjV6IE0xMjY5LjQsMjI4NGMtMTUuNCwyMC42LTMwLjksNS4xLTMwLjksNS4xYy02Mi4xLTcyLTkzLjItMjA1LjgtOTMuMi0yMDUuOAoJYy0xMDguNy0zNDkuOC04Mi44LTExMDAuOC04Mi44LTExMDAuOGM1MS4xLTU5Ni4yLDE0NC03MzcuMSwxNzUuNi03NjguNGM2LjctNi42LDE3LjEtNy40LDI0LjctMmM0NS45LDMyLjUsODQuNCwxNjguNSw4NC40LDE2OC41CgljMTEzLjYsNDIxLjgsMTAzLjMsODE3LjksMTAzLjMsODE3LjljMTAuMywzNDQuNy01Ni45LDczMC41LTU2LjksNzMwLjVDMTM0MS45LDIyMjIuMiwxMjY5LjQsMjI4NCwxMjY5LjQsMjI4NHoiLz4KPHBhdGggY2xhc3M9InN0MCIgZD0iTTIyMDAuNyw3MDguNmMtNjcuMi0xMTcuMS01NDYuMSwzMS42LTEwNzAsMzMycy04OTMuNSw2MzguOS04MjYuMyw3NTUuOXM1NDYuMS0zMS42LDEwNzAtMzMyCglTMjI2Ny44LDgyNS42LDIyMDAuNyw3MDguNkwyMjAwLjcsNzA4LjZ6IE0zNjYuNCwxNzgwLjRjLTI1LjctMy4yLTE5LjktMjQuNC0xOS45LTI0LjRjMzEuNi04OS43LDEzMi0xODMuMiwxMzItMTgzLjIKCWMyNDkuNC0yNjguNCw5MTMuOC02MTkuNyw5MTMuOC02MTkuN2M1NDIuNS0yNTIuNCw3MTEuMS0yNDEuOCw3NTMuOC0yMzBjOS4xLDIuNSwxNSwxMS4yLDE0LDIwLjZjLTUuMSw1Ni0xMDQuMiwxNTctMTA0LjIsMTU3CgljLTMwOS4xLDMwOC42LTY1Ny44LDQ5Ni44LTY1Ny44LDQ5Ni44Yy0yOTMuOCwxODAuNS02NjEuOSwzMTQuMS02NjEuOSwzMTQuMUM0NTYsMTgxMi42LDM2Ni40LDE3ODAuNCwzNjYuNCwxNzgwLjRMMzY2LjQsMTc4MC40CglMMzY2LjQsMTc4MC40eiIvPgo8cGF0aCBjbGFzcz0ic3QwIiBkPSJNMjE5OC40LDE4MDAuNGM2Ny43LTExNi44LTMwMC45LTQ1Ni44LTgyMy03NTkuNVMzNzQuNCw1ODcuOCwzMDYuOCw3MDQuN3MzMDAuOSw0NTYuOCw4MjMuMyw3NTkuNQoJUzIxMzAuNywxOTE3LjQsMjE5OC40LDE4MDAuNHogTTM1MS42LDc0OS44Yy0xMC0yMy43LDExLjEtMjkuNCwxMS4xLTI5LjRjOTMuNS0xNy42LDIyNC43LDIyLjYsMjI0LjcsMjIuNgoJYzM1Ny4yLDgxLjMsOTk0LDQ4MC4yLDk5NCw0ODAuMmM0OTAuMywzNDMuMSw1NjUuNSw0OTQuMiw1NzYuOCw1MzcuMWMyLjQsOS4xLTIuMiwxOC42LTEwLjcsMjIuNGMtNTEuMSwyMy40LTE4OC4xLTExLjUtMTg4LjEtMTEuNQoJYy00MjIuMS0xMTMuMi03NTkuNi0zMjAuNS03NTkuNi0zMjAuNWMtMzAzLjMtMTYzLjYtNjAzLjItNDE1LjMtNjAzLjItNDE1LjNjLTIyNy45LTE5MS45LTI0NS0yODUuNC0yNDUtMjg1LjRMMzUxLjYsNzQ5Ljh6Ii8+CjxjaXJjbGUgY2xhc3M9InN0MSIgY3g9IjEyNTAiIGN5PSIxMjUwIiByPSIxMjguNiIvPgo8ZWxsaXBzZSBjbGFzcz0ic3QxIiBjeD0iMTc3Ny4zIiBjeT0iNzU2LjIiIHJ4PSI3NC42IiByeT0iNzcuMiIvPgo8ZWxsaXBzZSBjbGFzcz0ic3QxIiBjeD0iNTUzIiBjeT0iMTAxOC41IiByeD0iNzQuNiIgcnk9Ijc3LjIiLz4KPGVsbGlwc2UgY2xhc3M9InN0MSIgY3g9IjEwOTguMiIgY3k9IjE5NjUiIHJ4PSI3NC42IiByeT0iNzcuMiIvPgo8L3N2Zz4K [cosmos-link]: https://cosmos.network + +## Note on Cardano Integration + +The Cosmos SDK chains follow a standard pattern: + +```rust +// How Cosmos chains work: +impl SigningKeyPair for Secp256k1KeyPair { + // Creates key from mnemonic + fn from_mnemonic( + mnemonic: &str, + hd_path: &StandardHDPath, + address_type: &AddressType, + account_prefix: &str, + ) -> Result { + let private_key = private_key_from_mnemonic(mnemonic, hd_path)?; + let public_key = Xpub::from_priv(&Secp256k1::signing_only(), &private_key); + let address = get_address(&public_key.public_key, address_type); + let account = encode_address(account_prefix, &address)?; + + Ok(Self { + private_key, + public_key, + address_type, + account, + }) + } + + // Must be Serialize + Deserialize for storage + fn account(&self) -> String { self.account.clone() } + fn sign(&self, message: &[u8]) -> Vec { /* ... */ } +} +``` +Where keys are stored in `~/.hermes/keys/{chain-id}/keyring-test/{key-name}.json` serialized as JSON including the mnemonic, then get loaded on demand. The usage would be: + +`hermes keys add --chain cosmos-hub --mnemonic-file ~/mnemonic.txt` +`hermes keys list --chain cosmos-hub` +`hermes keys delete --chain cosmos-hub --key-name my-key` + +### Cardano Light Client Model + +On the Cosmos side, Cardano is tracked using a single client type, `08-cardano`, with headers encoded as `/ibc.lightclients.mithril.v1.MithrilHeader`. The header carries Mithril-certified evidence for a HostState update transaction, which allows the verifier to extract the committed 32-byte `ibc_state_root` and store it in consensus state. Membership and non-membership then use standard ICS-23 proofs (protobuf `ibc.core.commitment.v1.MerkleProof` bytes) against that `ibc_state_root`. + +Height semantics follow Mithril transaction snapshots: `Height.revision_height` is treated as a Cardano block number (as surfaced by db-sync and the Mithril snapshot `block_number`), not a Cardano slot. Because Mithril certificates are checkpoint-based, Hermes may need to wait after a Cardano transaction is included until that inclusion is covered by a certified snapshot before it can safely build or use proofs at that height. + +This design intentionally keeps header progression and state proof verification under a single IBC client identifier, matching the core IBC connection and channel machinery. diff --git a/crates/relayer-cli/src/commands/keys/add.rs b/crates/relayer-cli/src/commands/keys/add.rs index 24eddd94f9..da3bd49b0a 100644 --- a/crates/relayer-cli/src/commands/keys/add.rs +++ b/crates/relayer-cli/src/commands/keys/add.rs @@ -10,7 +10,7 @@ use abscissa_core::{Command, Runnable}; use eyre::eyre; use hdpath::StandardHDPath; use ibc_relayer::{ - chain::namada::wallet::CliWalletUtils, + chain::{cardano::signing_key_pair::CardanoSigningKeyPair, namada::wallet::CliWalletUtils}, config::{ChainConfig, Config}, keyring::{ AnySigningKeyPair, KeyRing, NamadaKeyPair, Secp256k1KeyPair, SigningKeyPair, @@ -253,6 +253,23 @@ pub fn add_key( namada_key.into() } ChainConfig::Penumbra(_) => unimplemented!("no key storage support for penumbra"), + ChainConfig::Cardano(config) => { + let mut keyring = KeyRing::new( + config.key_store_type, + "cardano", // account_prefix not used for Cardano + &config.id, + &config.key_store_folder, + )?; + + check_key_exists(&keyring, key_name, overwrite); + + let key_contents = + fs::read_to_string(file).map_err(|_| eyre!("error reading the key file"))?; + let key_pair = CardanoSigningKeyPair::from_seed_file(&key_contents, hd_path)?; + + keyring.add_key(key_name, key_pair.clone())?; + key_pair.into() + } }; Ok(key_pair) @@ -295,6 +312,26 @@ pub fn restore_key( )); } ChainConfig::Penumbra(_) => return Err(eyre!("no key storage support for penumbra")), + ChainConfig::Cardano(config) => { + let mut keyring = KeyRing::new( + config.key_store_type, + "cardano", // account_prefix not used for Cardano + &config.id, + &config.key_store_folder, + )?; + + check_key_exists(&keyring, key_name, overwrite); + + let key_pair = CardanoSigningKeyPair::from_mnemonic( + &mnemonic_content, + hdpath, + &ibc_relayer::config::AddressType::Cosmos, // Not used for Cardano + "cardano", // Not used for Cardano + )?; + + keyring.add_key(key_name, key_pair.clone())?; + key_pair.into() + } }; Ok(key_pair) diff --git a/crates/relayer-cli/src/commands/keys/balance.rs b/crates/relayer-cli/src/commands/keys/balance.rs index b8af7b0e80..afcca40db1 100644 --- a/crates/relayer-cli/src/commands/keys/balance.rs +++ b/crates/relayer-cli/src/commands/keys/balance.rs @@ -81,6 +81,7 @@ fn get_balance(chain: impl ChainHandle, key_name: Option, denom: Option< chain_config.key_name } ChainConfig::Penumbra(_) => unimplemented!("not yet supported for penumbra"), + ChainConfig::Cardano(chain_config) => chain_config.key_name, } }); @@ -106,6 +107,7 @@ fn get_balances(chain: impl ChainHandle, key_name: Option) { chain_config.key_name } ChainConfig::Penumbra(_) => unimplemented!("not yet supported for penumbra"), + ChainConfig::Cardano(chain_config) => chain_config.key_name, } }); diff --git a/crates/relayer-cli/src/commands/keys/delete.rs b/crates/relayer-cli/src/commands/keys/delete.rs index c5dc2be65f..dbc093d4aa 100644 --- a/crates/relayer-cli/src/commands/keys/delete.rs +++ b/crates/relayer-cli/src/commands/keys/delete.rs @@ -3,6 +3,7 @@ use abscissa_core::{Command, Runnable}; use eyre::eyre; use ibc_relayer::{ + chain::cardano::signing_key_pair::CardanoSigningKeyPair, config::{ChainConfig, Config}, keyring::{KeyRing, Store}, }; @@ -129,6 +130,15 @@ pub fn delete_key(config: &ChainConfig, key_name: &str) -> eyre::Result<()> { keyring.remove_key(key_name)?; } ChainConfig::Penumbra(_) => unimplemented!("no key support for penumbra"), + ChainConfig::Cardano(config) => { + let mut keyring: KeyRing = KeyRing::new( + config.key_store_type, + "cardano", + &config.id, + &config.key_store_folder, + )?; + keyring.remove_key(key_name)?; + } } Ok(()) } @@ -156,6 +166,18 @@ pub fn delete_all_keys(config: &ChainConfig) -> eyre::Result<()> { } } ChainConfig::Penumbra(_) => unimplemented!("no key support for penumbra"), + ChainConfig::Cardano(config) => { + let mut keyring: KeyRing = KeyRing::new( + config.key_store_type, + "cardano", + &config.id, + &config.key_store_folder, + )?; + let keys = keyring.keys()?; + for (key_name, _) in keys { + keyring.remove_key(&key_name)?; + } + } } Ok(()) } diff --git a/crates/relayer-cli/src/commands/listen.rs b/crates/relayer-cli/src/commands/listen.rs index a83afb599b..ff5581b84f 100644 --- a/crates/relayer-cli/src/commands/listen.rs +++ b/crates/relayer-cli/src/commands/listen.rs @@ -208,6 +208,9 @@ fn subscribe( let subscription = monitor_tx.subscribe()?; Ok(subscription) } + ChainConfig::Cardano(_) => Err(eyre!( + "event subscription is not implemented for Cardano; requires Gateway-backed event source support in `hermes listen`" + )), } } @@ -218,6 +221,11 @@ fn detect_compatibility_mode( let rpc_addr = match config { ChainConfig::CosmosSdk(config) | ChainConfig::Namada(config) => config.rpc_addr.clone(), ChainConfig::Penumbra(config) => config.rpc_addr.clone(), + ChainConfig::Cardano(_) => { + return Err(eyre!( + "compatibility mode detection is not applicable for Cardano (no Tendermint RPC)" + )); + } }; let client = HttpClient::builder(rpc_addr.try_into()?) @@ -232,6 +240,11 @@ fn detect_compatibility_mode( let status = rt.block_on(client.status())?; penumbra::util::compat_mode_from_version(&config.compat_mode, status.node_info.version)? } + ChainConfig::Cardano(_) => { + return Err(eyre!( + "compatibility mode detection is not applicable for Cardano (no Tendermint RPC)" + )); + } }; Ok(compat_mode) diff --git a/crates/relayer-cli/src/commands/tx/client.rs b/crates/relayer-cli/src/commands/tx/client.rs index 7cf09d4c9a..f81f86a5c9 100644 --- a/crates/relayer-cli/src/commands/tx/client.rs +++ b/crates/relayer-cli/src/commands/tx/client.rs @@ -212,6 +212,11 @@ impl Runnable for TxUpdateClientCmd { ChainConfig::Penumbra(chain_config) => { chain_config.genesis_restart = Some(restart_params) } + ChainConfig::Cardano(_) => Output::error( + "genesis restart parameters are not supported for Cardano chains" + .to_string(), + ) + .exit(), }, None => { Output::error(format!( diff --git a/crates/relayer-types/src/clients/ics08_cardano/client_state.rs b/crates/relayer-types/src/clients/ics08_cardano/client_state.rs new file mode 100644 index 0000000000..59826a4276 --- /dev/null +++ b/crates/relayer-types/src/clients/ics08_cardano/client_state.rs @@ -0,0 +1,259 @@ +use std::time::Duration; + +use prost::Message; +use serde_derive::{Deserialize, Serialize}; + +use ibc_proto::google::protobuf::Any; +use ibc_proto::Protobuf; + +use crate::clients::ics08_cardano::error::Error; +use crate::clients::ics08_cardano::raw; +use crate::core::ics02_client::client_state::ClientState as Ics2ClientState; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics02_client::error::Error as Ics02Error; +use crate::core::ics24_host::identifier::ChainId; +use crate::Height; + +pub const MITHRIL_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.mithril.v1.ClientState"; + +type RawClientState = raw::ClientState; +type RawHeight = raw::Height; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct ClientState { + pub chain_id: ChainId, + pub latest_height: Height, + pub frozen_height: Option, + pub current_epoch: u64, + pub trusting_period: Duration, + pub protocol_parameters: raw::MithrilProtocolParameters, + pub upgrade_path: Vec, + pub host_state_nft_policy_id: Vec, + pub host_state_nft_token_name: Vec, +} + +impl Ics2ClientState for ClientState { + fn chain_id(&self) -> ChainId { + self.chain_id.clone() + } + + fn client_type(&self) -> ClientType { + ClientType::Cardano + } + + fn latest_height(&self) -> Height { + self.latest_height + } + + fn frozen_height(&self) -> Option { + self.frozen_height + } + + fn expired(&self, _elapsed: Duration) -> bool { + // The Cosmos-sidechain Mithril client currently disables expiry (see Go implementation). + false + } +} + +impl Protobuf for ClientState {} + +impl TryFrom for ClientState { + type Error = Error; + + fn try_from(raw: RawClientState) -> Result { + let RawClientState { + chain_id: raw_chain_id, + latest_height, + frozen_height, + current_epoch, + trusting_period, + protocol_parameters, + upgrade_path, + host_state_nft_policy_id, + host_state_nft_token_name, + } = raw; + + // `ChainId` parsing is infallible in Hermes. + let chain_id = ChainId::from_string(&raw_chain_id); + + let latest_height = latest_height + .ok_or_else(|| Error::missing_field("latest_height"))? + .try_into()?; + + let frozen_height = frozen_height.and_then(|h| h.try_into().ok()); + + let trusting_period = trusting_period + .and_then(|d| duration_from_proto(d).ok()) + .ok_or_else(|| Error::missing_field("trusting_period"))?; + + let protocol_parameters = + protocol_parameters.ok_or_else(|| Error::missing_field("protocol_parameters"))?; + + if host_state_nft_policy_id.is_empty() { + return Err(Error::missing_field("host_state_nft_policy_id")); + } + + if host_state_nft_policy_id.len() != 28 { + return Err(Error::invalid_field( + "host_state_nft_policy_id", + format!("expected 28 bytes, got {}", host_state_nft_policy_id.len()), + )); + } + + Ok(Self { + chain_id, + latest_height, + frozen_height, + current_epoch, + trusting_period, + protocol_parameters, + upgrade_path, + host_state_nft_policy_id, + host_state_nft_token_name, + }) + } +} + +impl From for RawClientState { + fn from(value: ClientState) -> Self { + RawClientState { + chain_id: value.chain_id.to_string(), + latest_height: Some(value.latest_height.into()), + frozen_height: value.frozen_height.map(Into::into), + current_epoch: value.current_epoch, + trusting_period: Some(duration_to_proto(value.trusting_period)), + protocol_parameters: Some(value.protocol_parameters), + upgrade_path: value.upgrade_path, + host_state_nft_policy_id: value.host_state_nft_policy_id, + host_state_nft_token_name: value.host_state_nft_token_name, + } + } +} + +impl TryFrom for Height { + type Error = Error; + + fn try_from(raw: RawHeight) -> Result { + Height::new(raw.revision_number, raw.revision_height).map_err(|e| { + Error::height_conversion(format!( + "failed to construct height from revision_number={}, revision_height={}: {e}", + raw.revision_number, raw.revision_height + )) + }) + } +} + +impl From for RawHeight { + fn from(value: Height) -> Self { + RawHeight { + revision_number: value.revision_number(), + revision_height: value.revision_height(), + } + } +} + +fn duration_from_proto(d: ibc_proto::google::protobuf::Duration) -> Result { + let secs = u64::try_from(d.seconds) + .map_err(|_| Error::timestamp_conversion("negative duration seconds".to_string()))?; + + let nanos = u32::try_from(d.nanos) + .map_err(|_| Error::timestamp_conversion("negative duration nanos".to_string()))?; + + Ok(Duration::new(secs, nanos)) +} + +fn duration_to_proto(d: Duration) -> ibc_proto::google::protobuf::Duration { + ibc_proto::google::protobuf::Duration { + seconds: d.as_secs() as i64, + nanos: d.subsec_nanos() as i32, + } +} + +impl Protobuf for ClientState {} + +impl TryFrom for ClientState { + type Error = Ics02Error; + + fn try_from(raw_any: Any) -> Result { + use core::ops::Deref; + + fn decode_state(bytes: &[u8]) -> Result { + RawClientState::decode(bytes) + .map_err(Error::decode)? + .try_into() + } + + match raw_any.type_url.as_str() { + MITHRIL_CLIENT_STATE_TYPE_URL => { + decode_state(raw_any.value.deref()).map_err(Into::into) + } + _ => Err(Ics02Error::unknown_client_state_type(raw_any.type_url)), + } + } +} + +impl From for Any { + fn from(value: ClientState) -> Self { + Any { + type_url: MITHRIL_CLIENT_STATE_TYPE_URL.to_string(), + value: Protobuf::::encode_vec(value), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use test_log::test; + + fn raw_protocol_parameters() -> raw::MithrilProtocolParameters { + raw::MithrilProtocolParameters { + k: 1, + m: 2, + phi_f: None, + } + } + + fn raw_client_state() -> raw::ClientState { + raw::ClientState { + chain_id: "chain-1".to_string(), + latest_height: Some(raw::Height { + revision_number: 0, + revision_height: 10, + }), + frozen_height: None, + current_epoch: 0, + trusting_period: Some(ibc_proto::google::protobuf::Duration { + seconds: 3600, + nanos: 0, + }), + protocol_parameters: Some(raw_protocol_parameters()), + upgrade_path: vec![], + host_state_nft_policy_id: vec![0; 28], + host_state_nft_token_name: b"host_state_nft".to_vec(), + } + } + + #[test] + fn mithril_client_state_any_roundtrip() { + let state = ClientState::try_from(raw_client_state()).unwrap(); + let any: Any = state.clone().into(); + let decoded = ClientState::try_from(any).unwrap(); + + assert_eq!(decoded, state); + assert_eq!(decoded.latest_height.revision_number(), 0); + assert_eq!(decoded.latest_height.revision_height(), 10); + } + + #[test] + fn mithril_client_state_invalid_policy_id_length_fails() { + let mut raw = raw_client_state(); + raw.host_state_nft_policy_id = vec![0; 27]; + + let err = ClientState::try_from(raw).unwrap_err(); + assert!(err + .to_string() + .contains("invalid field host_state_nft_policy_id: expected 28 bytes, got 27")); + } +} diff --git a/crates/relayer-types/src/clients/ics08_cardano/consensus_state.rs b/crates/relayer-types/src/clients/ics08_cardano/consensus_state.rs new file mode 100644 index 0000000000..8bbca0af8d --- /dev/null +++ b/crates/relayer-types/src/clients/ics08_cardano/consensus_state.rs @@ -0,0 +1,188 @@ +use prost::Message; +use serde_derive::{Deserialize, Serialize}; + +use ibc_proto::google::protobuf::Any; +use ibc_proto::Protobuf; + +use crate::clients::ics08_cardano::error::Error; +use crate::clients::ics08_cardano::raw; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics02_client::consensus_state::ConsensusState as Ics2ConsensusState; +use crate::core::ics02_client::error::Error as Ics02Error; +use crate::core::ics23_commitment::commitment::CommitmentRoot; +use crate::timestamp::Timestamp; + +pub const MITHRIL_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.lightclients.mithril.v1.ConsensusState"; + +type RawConsensusState = raw::ConsensusState; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct ConsensusState { + pub root: CommitmentRoot, + pub timestamp: u64, + pub first_cert_hash_latest_epoch: raw::MithrilCertificate, + pub latest_cert_hash_tx_snapshot: String, +} + +impl ConsensusState { + pub fn new( + root: CommitmentRoot, + timestamp: u64, + first_cert_hash_latest_epoch: raw::MithrilCertificate, + latest_cert_hash_tx_snapshot: String, + ) -> Self { + Self { + root, + timestamp, + first_cert_hash_latest_epoch, + latest_cert_hash_tx_snapshot, + } + } +} + +impl Ics2ConsensusState for ConsensusState { + fn client_type(&self) -> ClientType { + ClientType::Cardano + } + + fn root(&self) -> &CommitmentRoot { + &self.root + } + + fn timestamp(&self) -> Timestamp { + Timestamp::from_nanoseconds(self.timestamp).unwrap_or_else(|_| Timestamp::none()) + } +} + +impl Protobuf for ConsensusState {} + +impl TryFrom for ConsensusState { + type Error = Error; + + fn try_from(raw: RawConsensusState) -> Result { + let RawConsensusState { + timestamp, + first_cert_hash_latest_epoch, + latest_cert_hash_tx_snapshot, + ibc_state_root, + } = raw; + + let first = first_cert_hash_latest_epoch + .ok_or_else(|| Error::missing_field("first_cert_hash_latest_epoch"))?; + + if ibc_state_root.is_empty() { + return Err(Error::missing_field("ibc_state_root")); + } + + if ibc_state_root.len() != 32 { + return Err(Error::invalid_field( + "ibc_state_root", + format!("expected 32 bytes, got {}", ibc_state_root.len()), + )); + } + + let root = CommitmentRoot::from_bytes(&ibc_state_root); + + Ok(Self::new( + root, + timestamp, + first, + latest_cert_hash_tx_snapshot, + )) + } +} + +impl From for RawConsensusState { + fn from(value: ConsensusState) -> Self { + RawConsensusState { + timestamp: value.timestamp, + first_cert_hash_latest_epoch: Some(value.first_cert_hash_latest_epoch), + latest_cert_hash_tx_snapshot: value.latest_cert_hash_tx_snapshot, + ibc_state_root: value.root.as_bytes().to_vec(), + } + } +} + +impl Protobuf for ConsensusState {} + +impl TryFrom for ConsensusState { + type Error = Ics02Error; + + fn try_from(raw_any: Any) -> Result { + use core::ops::Deref; + + fn decode_state(bytes: &[u8]) -> Result { + RawConsensusState::decode(bytes) + .map_err(Error::decode)? + .try_into() + } + + match raw_any.type_url.as_str() { + MITHRIL_CONSENSUS_STATE_TYPE_URL => { + decode_state(raw_any.value.deref()).map_err(Into::into) + } + _ => Err(Ics02Error::unknown_consensus_state_type(raw_any.type_url)), + } + } +} + +impl From for Any { + fn from(value: ConsensusState) -> Self { + Any { + type_url: MITHRIL_CONSENSUS_STATE_TYPE_URL.to_string(), + value: Protobuf::::encode_vec(value), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use test_log::test; + + fn raw_certificate() -> raw::MithrilCertificate { + raw::MithrilCertificate { + hash: "cert_hash".to_string(), + previous_hash: "".to_string(), + epoch: 0, + signed_entity_type: None, + metadata: None, + protocol_message: None, + signed_message: "".to_string(), + aggregate_verification_key: "".to_string(), + multi_signature: "".to_string(), + genesis_signature: "".to_string(), + } + } + + fn raw_consensus_state() -> raw::ConsensusState { + raw::ConsensusState { + timestamp: 1, + first_cert_hash_latest_epoch: Some(raw_certificate()), + latest_cert_hash_tx_snapshot: "latest".to_string(), + ibc_state_root: vec![0u8; 32], + } + } + + #[test] + fn mithril_consensus_state_any_roundtrip() { + let state = ConsensusState::try_from(raw_consensus_state()).unwrap(); + let any: Any = state.clone().into(); + let decoded = ConsensusState::try_from(any).unwrap(); + + assert_eq!(decoded, state); + assert_eq!(decoded.root.as_bytes().len(), 32); + } + + #[test] + fn mithril_consensus_state_invalid_root_length_fails() { + let mut raw = raw_consensus_state(); + raw.ibc_state_root = vec![0u8; 31]; + + let err = ConsensusState::try_from(raw).unwrap_err(); + assert!(err + .to_string() + .contains("invalid field ibc_state_root: expected 32 bytes, got 31")); + } +} diff --git a/crates/relayer-types/src/clients/ics08_cardano/error.rs b/crates/relayer-types/src/clients/ics08_cardano/error.rs new file mode 100644 index 0000000000..1f2ea2e4d7 --- /dev/null +++ b/crates/relayer-types/src/clients/ics08_cardano/error.rs @@ -0,0 +1,48 @@ +use flex_error::{define_error, TraceError}; + +use crate::core::ics02_client::error::Error as Ics02Error; +use crate::core::ics24_host::error::ValidationError; + +define_error! { + #[derive(Debug, PartialEq, Eq)] + Error { + MissingField + { field: &'static str } + |e| { format_args!("missing required field: {}", e.field) }, + + InvalidField + { field: &'static str, reason: String } + |e| { format_args!("invalid field {}: {}", e.field, e.reason) }, + + InvalidHeight + { height: u64 } + |e| { format_args!("invalid Mithril header height: {}", e.height) }, + + InvalidTimestamp + { value: String } + |e| { format_args!("invalid Mithril header timestamp: {}", e.value) }, + + Decode + [ TraceError ] + |_| { "decode error" }, + + InvalidChainId + { value: String } + [ ValidationError ] + |e| { format_args!("invalid chain id: {}", e.value) }, + + HeightConversion + { reason: String } + |e| { format_args!("height conversion error: {}", e.reason) }, + + TimestampConversion + { reason: String } + |e| { format_args!("timestamp conversion error: {}", e.reason) }, + } +} + +impl From for Ics02Error { + fn from(e: Error) -> Self { + Self::client_specific(e.to_string()) + } +} diff --git a/crates/relayer-types/src/clients/ics08_cardano/header.rs b/crates/relayer-types/src/clients/ics08_cardano/header.rs new file mode 100644 index 0000000000..f2a4fcf1b8 --- /dev/null +++ b/crates/relayer-types/src/clients/ics08_cardano/header.rs @@ -0,0 +1,276 @@ +use bytes::Buf; +use ibc_proto::google::protobuf::Any; +use ibc_proto::Protobuf; +use prost::Message; +use serde_derive::{Deserialize, Serialize}; + +use crate::clients::ics08_cardano::error::Error; +use crate::clients::ics08_cardano::raw; +use crate::core::ics02_client::client_type::ClientType; +use crate::core::ics02_client::error::Error as Ics02Error; +use crate::timestamp::Timestamp; +use crate::Height; + +pub const MITHRIL_HEADER_TYPE_URL: &str = "/ibc.lightclients.mithril.v1.MithrilHeader"; + +type RawHeader = raw::MithrilHeader; + +/// Cardano Mithril header (Cosmos-sidechain light client). +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct Header { + pub height: Height, + pub timestamp: Timestamp, + pub mithril_stake_distribution: raw::MithrilStakeDistribution, + pub mithril_stake_distribution_certificate: raw::MithrilCertificate, + pub transaction_snapshot: raw::CardanoTransactionSnapshot, + pub transaction_snapshot_certificate: raw::MithrilCertificate, + pub previous_mithril_stake_distribution_certificates: Vec, + pub host_state_tx_hash: String, + pub host_state_tx_body_cbor: Vec, + pub host_state_tx_output_index: u32, + pub host_state_tx_proof: Vec, +} + +impl crate::core::ics02_client::header::Header for Header { + fn client_type(&self) -> ClientType { + ClientType::Cardano + } + + fn height(&self) -> Height { + self.height + } + + fn timestamp(&self) -> Timestamp { + self.timestamp + } +} + +impl Protobuf for Header {} + +impl TryFrom for Header { + type Error = Error; + + fn try_from(raw: RawHeader) -> Result { + let RawHeader { + mithril_stake_distribution, + mithril_stake_distribution_certificate, + transaction_snapshot, + transaction_snapshot_certificate, + previous_mithril_stake_distribution_certificates, + host_state_tx_hash, + host_state_tx_body_cbor, + host_state_tx_output_index, + host_state_tx_proof, + } = raw; + + let transaction_snapshot: raw::CardanoTransactionSnapshot = + transaction_snapshot.ok_or_else(|| Error::missing_field("transaction_snapshot"))?; + + let transaction_snapshot_certificate: raw::MithrilCertificate = + transaction_snapshot_certificate + .ok_or_else(|| Error::missing_field("transaction_snapshot_certificate"))?; + + // IBC heights are `(revision_number, revision_height)`. + // For Cardano we use `revision_number = 0` and interpret `revision_height` as the + // Cardano block number from the Mithril transaction snapshot (not a slot number). + let height = Height::new(0, transaction_snapshot.block_number).map_err(|e| { + Error::height_conversion(format!( + "failed to construct height from block_number {}: {e}", + transaction_snapshot.block_number + )) + })?; + + let timestamp = { + let metadata = transaction_snapshot_certificate + .metadata + .as_ref() + .ok_or_else(|| Error::missing_field("transaction_snapshot_certificate.metadata"))?; + + let sealed_at = metadata.sealed_at.trim(); + if sealed_at.is_empty() { + return Err(Error::invalid_timestamp(sealed_at.to_string())); + } + + // RFC3339 with optional sub-second precision, matching the Go client. + let ts = time::OffsetDateTime::parse( + sealed_at, + &time::format_description::well_known::Rfc3339, + ) + .map_err(|_| Error::invalid_timestamp(sealed_at.to_string()))?; + + let nanos: i128 = ts.unix_timestamp_nanos(); + if nanos <= 0 { + return Err(Error::invalid_timestamp(sealed_at.to_string())); + } + + let nanos_u64: u64 = nanos + .try_into() + .map_err(|_| Error::timestamp_conversion("timestamp out of range".to_string()))?; + + Timestamp::from_nanoseconds(nanos_u64) + .map_err(|e| Error::timestamp_conversion(e.to_string()))? + }; + + if host_state_tx_body_cbor.is_empty() { + return Err(Error::missing_field("host_state_tx_body_cbor")); + } + + if host_state_tx_proof.is_empty() { + return Err(Error::missing_field("host_state_tx_proof")); + } + + Ok(Self { + height, + timestamp, + mithril_stake_distribution: mithril_stake_distribution + .ok_or_else(|| Error::missing_field("mithril_stake_distribution"))?, + mithril_stake_distribution_certificate: mithril_stake_distribution_certificate + .ok_or_else(|| Error::missing_field("mithril_stake_distribution_certificate"))?, + transaction_snapshot, + transaction_snapshot_certificate, + previous_mithril_stake_distribution_certificates, + host_state_tx_hash, + host_state_tx_body_cbor, + host_state_tx_output_index, + host_state_tx_proof, + }) + } +} + +impl From
for RawHeader { + fn from(value: Header) -> Self { + RawHeader { + mithril_stake_distribution: Some(value.mithril_stake_distribution), + mithril_stake_distribution_certificate: Some( + value.mithril_stake_distribution_certificate, + ), + transaction_snapshot: Some(value.transaction_snapshot), + transaction_snapshot_certificate: Some(value.transaction_snapshot_certificate), + previous_mithril_stake_distribution_certificates: value + .previous_mithril_stake_distribution_certificates, + host_state_tx_hash: value.host_state_tx_hash, + host_state_tx_body_cbor: value.host_state_tx_body_cbor, + host_state_tx_output_index: value.host_state_tx_output_index, + host_state_tx_proof: value.host_state_tx_proof, + } + } +} + +impl Protobuf for Header {} + +impl TryFrom for Header { + type Error = Ics02Error; + + fn try_from(raw_any: Any) -> Result { + use core::ops::Deref; + + fn decode_header(buf: B) -> Result { + RawHeader::decode(buf).map_err(Error::decode)?.try_into() + } + + match raw_any.type_url.as_str() { + MITHRIL_HEADER_TYPE_URL => decode_header(raw_any.value.deref()).map_err(Into::into), + _ => Err(Ics02Error::unknown_header_type(raw_any.type_url)), + } + } +} + +impl From
for Any { + fn from(header: Header) -> Self { + Any { + type_url: MITHRIL_HEADER_TYPE_URL.to_string(), + value: Protobuf::::encode_vec(header), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use test_log::test; + + fn raw_protocol_parameters() -> raw::MithrilProtocolParameters { + raw::MithrilProtocolParameters { + k: 1, + m: 2, + phi_f: None, + } + } + + fn raw_certificate(sealed_at: &str) -> raw::MithrilCertificate { + raw::MithrilCertificate { + hash: "cert_hash".to_string(), + previous_hash: "".to_string(), + epoch: 0, + signed_entity_type: None, + metadata: Some(raw::CertificateMetadata { + network: "testnet".to_string(), + protocol_version: "v1".to_string(), + protocol_parameters: Some(raw_protocol_parameters()), + initiated_at: "2024-01-01T00:00:00Z".to_string(), + sealed_at: sealed_at.to_string(), + signers: vec![], + }), + protocol_message: None, + signed_message: "".to_string(), + aggregate_verification_key: "".to_string(), + multi_signature: "".to_string(), + genesis_signature: "".to_string(), + } + } + + fn raw_stake_distribution() -> raw::MithrilStakeDistribution { + raw::MithrilStakeDistribution { + epoch: 0, + signers_with_stake: vec![], + hash: "stake_dist_hash".to_string(), + certificate_hash: "stake_dist_cert_hash".to_string(), + created_at: 0, + protocol_parameter: Some(raw_protocol_parameters()), + } + } + + fn raw_header(block_number: u64) -> raw::MithrilHeader { + raw::MithrilHeader { + mithril_stake_distribution: Some(raw_stake_distribution()), + mithril_stake_distribution_certificate: Some(raw_certificate("2024-01-01T00:00:00Z")), + transaction_snapshot: Some(raw::CardanoTransactionSnapshot { + merkle_root: "merkle_root".to_string(), + epoch: 0, + block_number, + hash: "tx_snapshot_hash".to_string(), + certificate_hash: "tx_snapshot_cert_hash".to_string(), + created_at: "2024-01-01T00:00:00Z".to_string(), + }), + transaction_snapshot_certificate: Some(raw_certificate("2024-01-01T00:00:00Z")), + previous_mithril_stake_distribution_certificates: vec![], + host_state_tx_hash: "host_state_tx_hash".to_string(), + host_state_tx_body_cbor: vec![0x01], + host_state_tx_output_index: 0, + host_state_tx_proof: vec![0x02], + } + } + + #[test] + fn mithril_header_any_roundtrip() { + let header = Header::try_from(raw_header(10)).unwrap(); + let any: Any = header.clone().into(); + let decoded = Header::try_from(any).unwrap(); + + assert_eq!(decoded, header); + assert_eq!(decoded.height.revision_number(), 0); + assert_eq!(decoded.height.revision_height(), 10); + } + + #[test] + fn mithril_header_missing_transaction_snapshot_fails() { + let mut raw = raw_header(10); + raw.transaction_snapshot = None; + + let err = Header::try_from(raw).unwrap_err(); + assert!(err + .to_string() + .contains("missing required field: transaction_snapshot")); + } +} diff --git a/crates/relayer-types/src/clients/ics08_cardano/mod.rs b/crates/relayer-types/src/clients/ics08_cardano/mod.rs new file mode 100644 index 0000000000..d3911d2075 --- /dev/null +++ b/crates/relayer-types/src/clients/ics08_cardano/mod.rs @@ -0,0 +1,14 @@ +//! Cardano light client types (`08-cardano`) +//! +//! Domain types for the Cosmos-side `08-cardano` light client, implemented using Mithril. +//! Protobuf messages live under `ibc.lightclients.mithril.v1.*`. + +pub mod client_state; +pub mod consensus_state; +pub mod error; +pub mod header; +pub mod raw; + +pub use client_state::ClientState; +pub use consensus_state::ConsensusState; +pub use header::Header; diff --git a/crates/relayer-types/src/clients/ics08_cardano/raw.rs b/crates/relayer-types/src/clients/ics08_cardano/raw.rs new file mode 100644 index 0000000000..635b07bc97 --- /dev/null +++ b/crates/relayer-types/src/clients/ics08_cardano/raw.rs @@ -0,0 +1,275 @@ +//! Raw protobuf types for `ibc.lightclients.mithril.v1`. +//! +//! These message definitions mirror `cosmos/sidechain/proto/ibc/lightclients/mithril/v1/mithril.proto`. +//! They are intentionally kept local to `ibc-relayer-types` to enable encoding/decoding from +//! `google.protobuf.Any` without requiring upstream `ibc-proto` support. + +use serde_derive::{Deserialize, Serialize}; + +#[derive(Clone, PartialEq, ::prost::Message, Serialize, Deserialize)] +pub struct Height { + #[prost(uint64, tag = "1")] + pub revision_number: u64, + #[prost(uint64, tag = "2")] + pub revision_height: u64, +} + +#[derive(Clone, PartialEq, ::prost::Message, Serialize, Deserialize)] +pub struct ClientState { + #[prost(string, tag = "1")] + pub chain_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub latest_height: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub frozen_height: ::core::option::Option, + #[prost(uint64, tag = "4")] + pub current_epoch: u64, + #[prost(message, optional, tag = "5")] + pub trusting_period: ::core::option::Option, + #[prost(message, optional, tag = "6")] + pub protocol_parameters: ::core::option::Option, + #[prost(string, repeated, tag = "7")] + pub upgrade_path: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(bytes = "vec", tag = "8")] + pub host_state_nft_policy_id: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "9")] + pub host_state_nft_token_name: ::prost::alloc::vec::Vec, +} + +#[derive(Clone, PartialEq, ::prost::Message, Serialize, Deserialize)] +pub struct ConsensusState { + #[prost(uint64, tag = "1")] + pub timestamp: u64, + #[prost(message, optional, tag = "2")] + pub first_cert_hash_latest_epoch: ::core::option::Option, + #[prost(string, tag = "3")] + pub latest_cert_hash_tx_snapshot: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "4")] + pub ibc_state_root: ::prost::alloc::vec::Vec, +} + +#[derive(Clone, PartialEq, ::prost::Message, Serialize, Deserialize)] +pub struct Misbehaviour { + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub mithril_header_1: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub mithril_header_2: ::core::option::Option, +} + +#[derive(Clone, PartialEq, ::prost::Message, Serialize, Deserialize)] +pub struct MithrilHeader { + #[prost(message, optional, tag = "1")] + pub mithril_stake_distribution: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub mithril_stake_distribution_certificate: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub transaction_snapshot: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub transaction_snapshot_certificate: ::core::option::Option, + #[prost(message, repeated, tag = "9")] + pub previous_mithril_stake_distribution_certificates: + ::prost::alloc::vec::Vec, + #[prost(string, tag = "5")] + pub host_state_tx_hash: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "6")] + pub host_state_tx_body_cbor: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "7")] + pub host_state_tx_output_index: u32, + #[prost(bytes = "vec", tag = "8")] + pub host_state_tx_proof: ::prost::alloc::vec::Vec, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct MithrilStakeDistribution { + #[prost(uint64, tag = "1")] + pub epoch: u64, + #[prost(message, repeated, tag = "2")] + pub signers_with_stake: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub hash: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub certificate_hash: ::prost::alloc::string::String, + #[prost(uint64, tag = "5")] + pub created_at: u64, + #[prost(message, optional, tag = "6")] + pub protocol_parameter: ::core::option::Option, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct CardanoTransactionSnapshot { + #[prost(string, tag = "1")] + pub merkle_root: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub epoch: u64, + #[prost(uint64, tag = "3")] + pub block_number: u64, + #[prost(string, tag = "4")] + pub hash: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub certificate_hash: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub created_at: ::prost::alloc::string::String, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct MithrilCertificate { + #[prost(string, tag = "1")] + pub hash: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub previous_hash: ::prost::alloc::string::String, + #[prost(uint64, tag = "3")] + pub epoch: u64, + #[prost(message, optional, tag = "4")] + pub signed_entity_type: ::core::option::Option, + #[prost(message, optional, tag = "5")] + pub metadata: ::core::option::Option, + #[prost(message, optional, tag = "6")] + pub protocol_message: ::core::option::Option, + #[prost(string, tag = "7")] + pub signed_message: ::prost::alloc::string::String, + #[prost(string, tag = "8")] + pub aggregate_verification_key: ::prost::alloc::string::String, + #[prost(string, tag = "9")] + pub multi_signature: ::prost::alloc::string::String, + #[prost(string, tag = "10")] + pub genesis_signature: ::prost::alloc::string::String, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct CertificateMetadata { + #[prost(string, tag = "1")] + pub network: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub protocol_version: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub protocol_parameters: ::core::option::Option, + #[prost(string, tag = "4")] + pub initiated_at: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub sealed_at: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "6")] + pub signers: ::prost::alloc::vec::Vec, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct SignerWithStake { + #[prost(string, tag = "1")] + pub party_id: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub stake: u64, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct ProtocolMessage { + #[prost(message, repeated, tag = "1")] + pub message_parts: ::prost::alloc::vec::Vec, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct MessagePart { + #[prost(enumeration = "ProtocolMessagePartKey", tag = "1")] + pub protocol_message_part_key: i32, + #[prost(string, tag = "2")] + pub protocol_message_part_value: ::prost::alloc::string::String, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct MithrilProtocolParameters { + #[prost(uint64, tag = "1")] + pub k: u64, + #[prost(uint64, tag = "2")] + pub m: u64, + #[prost(message, optional, tag = "3")] + pub phi_f: ::core::option::Option, +} + +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration, + Serialize, + Deserialize, +)] +#[repr(i32)] +pub enum ProtocolMessagePartKey { + Unspecified = 0, + SnapshotDigest = 1, + CardanoTransactionsMerkleRoot = 2, + NextAggregateVerificationKey = 3, + LatestImmutableFileNumber = 4, + LatestBlockNumber = 5, +} + +#[derive(Clone, PartialEq, ::prost::Message, Serialize, Deserialize)] +pub struct ProtocolGenesisSignature { + #[prost(bytes = "vec", tag = "1")] + pub signature: ::prost::alloc::vec::Vec, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct SignedEntityType { + #[prost(oneof = "signed_entity_type::Entity", tags = "1, 2, 3, 4")] + pub entity: ::core::option::Option, +} + +pub mod signed_entity_type { + use super::*; + + #[derive(Clone, PartialEq, Eq, ::prost::Oneof, Serialize, Deserialize)] + pub enum Entity { + #[prost(message, tag = "1")] + MithrilStakeDistribution(MithrilStakeDistribution), + #[prost(message, tag = "2")] + CardanoStakeDistribution(CardanoStakeDistribution), + #[prost(message, tag = "3")] + CardanoImmutableFilesFull(CardanoImmutableFilesFull), + #[prost(message, tag = "4")] + CardanoTransactions(CardanoTransactions), + } +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct CardanoStakeDistribution { + #[prost(uint64, tag = "1")] + pub epoch: u64, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct CardanoImmutableFilesFull { + #[prost(message, optional, tag = "1")] + pub beacon: ::core::option::Option, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct CardanoTransactions { + #[prost(uint64, tag = "1")] + pub epoch: u64, + #[prost(uint64, tag = "2")] + pub block_number: u64, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct CardanoDbBeacon { + #[prost(string, tag = "1")] + pub network: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub epoch: u64, + #[prost(uint64, tag = "3")] + pub immutable_file_number: u64, +} + +#[derive(Clone, PartialEq, Eq, ::prost::Message, Serialize, Deserialize)] +pub struct Fraction { + #[prost(uint64, tag = "1")] + pub numerator: u64, + #[prost(uint64, tag = "2")] + pub denominator: u64, +} diff --git a/crates/relayer-types/src/clients/mod.rs b/crates/relayer-types/src/clients/mod.rs index 65ea910b18..de43b9c18a 100644 --- a/crates/relayer-types/src/clients/mod.rs +++ b/crates/relayer-types/src/clients/mod.rs @@ -1,3 +1,4 @@ //! Implementations of client verification algorithms for specific types of chains. pub mod ics07_tendermint; +pub mod ics08_cardano; diff --git a/crates/relayer-types/src/core/ics02_client/client_type.rs b/crates/relayer-types/src/core/ics02_client/client_type.rs index 0966d500b5..692b111b00 100644 --- a/crates/relayer-types/src/core/ics02_client/client_type.rs +++ b/crates/relayer-types/src/core/ics02_client/client_type.rs @@ -7,15 +7,20 @@ use super::error::Error; #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub enum ClientType { Tendermint = 1, + Cardano = 2, } impl ClientType { const TENDERMINT_STR: &'static str = "07-tendermint"; + // Cardano tracking client type. The corresponding protobuf messages are currently under + // `ibc.lightclients.mithril.v1.*` (Mithril). + const CARDANO_STR: &'static str = "08-cardano"; /// Yields the identifier of this client type as a string pub fn as_str(&self) -> &'static str { match self { Self::Tendermint => Self::TENDERMINT_STR, + Self::Cardano => Self::CARDANO_STR, } } } @@ -32,6 +37,7 @@ impl core::str::FromStr for ClientType { fn from_str(s: &str) -> Result { match s { Self::TENDERMINT_STR => Ok(Self::Tendermint), + Self::CARDANO_STR => Ok(Self::Cardano), _ => Err(Error::unknown_client_type(s.to_string())), } diff --git a/crates/relayer-types/src/core/ics02_client/header.rs b/crates/relayer-types/src/core/ics02_client/header.rs index e238fe1d96..c25ee1ea8e 100644 --- a/crates/relayer-types/src/core/ics02_client/header.rs +++ b/crates/relayer-types/src/core/ics02_client/header.rs @@ -4,10 +4,12 @@ use serde::{Deserialize, Serialize}; use ibc_proto::google::protobuf::Any; use ibc_proto::Protobuf; +use prost::Message; use crate::clients::ics07_tendermint::header::{ decode_header as tm_decode_header, Header as TendermintHeader, TENDERMINT_HEADER_TYPE_URL, }; +use crate::clients::ics08_cardano::header::{Header as MithrilHeader, MITHRIL_HEADER_TYPE_URL}; use crate::core::ics02_client::client_type::ClientType; use crate::core::ics02_client::error::Error; use crate::timestamp::Timestamp; @@ -28,37 +30,36 @@ pub trait Header: Debug + Send + Sync // Any: From, /// Decodes an encoded header into a known `Header` type, pub fn decode_header(header_bytes: &[u8]) -> Result { - // For now, we only have tendermint; however when there is more than one, we - // can try decoding into all the known types, and return an error only if - // none work - let header: TendermintHeader = - Protobuf::::decode(header_bytes).map_err(Error::invalid_raw_header)?; - - Ok(AnyHeader::Tendermint(header)) + let raw_any: Any = Any::decode(header_bytes).map_err(Error::decode)?; + AnyHeader::try_from(raw_any) } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[allow(clippy::large_enum_variant)] pub enum AnyHeader { Tendermint(TendermintHeader), + Mithril(MithrilHeader), } impl Header for AnyHeader { fn client_type(&self) -> ClientType { match self { Self::Tendermint(header) => header.client_type(), + Self::Mithril(header) => header.client_type(), } } fn height(&self) -> Height { match self { Self::Tendermint(header) => header.height(), + Self::Mithril(header) => header.height(), } } fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(header) => header.timestamp(), + Self::Mithril(header) => header.timestamp(), } } } @@ -74,6 +75,10 @@ impl TryFrom for AnyHeader { let val = tm_decode_header(raw.value.as_slice())?; Ok(AnyHeader::Tendermint(val)) } + MITHRIL_HEADER_TYPE_URL => { + let val: MithrilHeader = raw.try_into()?; + Ok(AnyHeader::Mithril(val)) + } _ => Err(Error::unknown_header_type(raw.type_url)), } @@ -89,6 +94,7 @@ impl From for Any { type_url: TENDERMINT_HEADER_TYPE_URL.to_string(), value: Protobuf::::encode_vec(header), }, + AnyHeader::Mithril(header) => header.into(), } } } @@ -98,3 +104,9 @@ impl From for AnyHeader { Self::Tendermint(header) } } + +impl From for AnyHeader { + fn from(header: MithrilHeader) -> Self { + Self::Mithril(header) + } +} diff --git a/crates/relayer-types/src/core/ics24_host/identifier.rs b/crates/relayer-types/src/core/ics24_host/identifier.rs index dbe60ea337..124cc48f5b 100644 --- a/crates/relayer-types/src/core/ics24_host/identifier.rs +++ b/crates/relayer-types/src/core/ics24_host/identifier.rs @@ -190,6 +190,7 @@ impl ClientId { pub fn prefix(client_type: ClientType) -> &'static str { match client_type { ClientType::Tendermint => ClientType::Tendermint.as_str(), + ClientType::Cardano => ClientType::Cardano.as_str(), } } diff --git a/crates/relayer/Cargo.toml b/crates/relayer/Cargo.toml index 672c34c00d..a8997ad8bb 100644 --- a/crates/relayer/Cargo.toml +++ b/crates/relayer/Cargo.toml @@ -61,6 +61,7 @@ num-bigint = { workspace = true, features = ["serde"] } num-rational = { workspace = true, features = ["num-bigint", "serde"] } once_cell = { workspace = true } prost = { workspace = true } +prost-types = "0.13" regex = { workspace = true } reqwest = { workspace = true, features = ["rustls-tls-native-roots", "json"] } retry = { workspace = true } @@ -92,6 +93,12 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["fmt", "env-filter", "json"] } uuid = { workspace = true, features = ["v4"] } +# Cardano-specific dependencies +slip10 = "0.4" +blake2 = "0.10" +pallas-primitives = "0.30" +pallas-codec = "0.30" + [dev-dependencies] ibc-relayer-types = { workspace = true } serial_test = { workspace = true } diff --git a/crates/relayer/src/chain.rs b/crates/relayer/src/chain.rs index bdee1f430a..b413677acc 100644 --- a/crates/relayer/src/chain.rs +++ b/crates/relayer/src/chain.rs @@ -1,3 +1,4 @@ +pub mod cardano; pub mod client; pub mod client_settings; pub mod cosmos; diff --git a/crates/relayer/src/chain/cardano/chain_handle.rs b/crates/relayer/src/chain/cardano/chain_handle.rs new file mode 100644 index 0000000000..cc67e6aa23 --- /dev/null +++ b/crates/relayer/src/chain/cardano/chain_handle.rs @@ -0,0 +1,7 @@ +//! Chain handle stub +//! +//! Note: In Hermes, the ChainHandle trait is implemented by the framework's +//! ChainRuntime. Custom chains implement the ChainEndpoint trait instead. +//! See endpoint.rs for the actual Cardano implementation. + +// This file is kept for historical reference but is not used in the Hermes integration diff --git a/crates/relayer/src/chain/cardano/config.rs b/crates/relayer/src/chain/cardano/config.rs new file mode 100644 index 0000000000..d3899ce47e --- /dev/null +++ b/crates/relayer/src/chain/cardano/config.rs @@ -0,0 +1,156 @@ +//! Configuration for Cardano chain + +use ibc_relayer_types::core::ics24_host::identifier::ChainId; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use std::time::Duration; + +use crate::config::{default, PacketFilter, RefreshRate}; +use crate::keyring::Store; + +/// Minimal configuration for Cardano chain integration +#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct CardanoConfig { + /// The chain's network identifier + pub id: ChainId, + + /// Gateway gRPC endpoint URL + pub gateway_url: String, + + /// Network ID (1 = mainnet, 0 = testnet) + pub network_id: u8, + + /// Key name for signing + pub key_name: String, + + /// Keystore type (test, file, etc.) + #[serde(default)] + pub key_store_type: Store, + + /// Optional path to keystore folder + pub key_store_folder: Option, + + /// Account index for CIP-1852 derivation + #[serde(default)] + pub account: u32, + + /// Maximum block time (for timeout calculations) + #[serde(default = "default_max_block_time", with = "humantime_serde")] + pub max_block_time: Duration, + + /// Packet filter configuration + #[serde(default)] + pub packet_filter: PacketFilter, + + /// Optional trust threshold (not used by Cardano but required by config interface) + #[serde(default)] + pub trust_threshold: + Option, + + /// How many packets to fetch at once from the chain when clearing packets + #[serde(default = "default_query_packets_chunk_size")] + pub query_packets_chunk_size: usize, + + /// Optional clear interval + pub clear_interval: Option, + + /// Clock drift tolerance + #[serde(default = "default_clock_drift", with = "humantime_serde")] + pub clock_drift: Duration, + + /// The rate at which to refresh the client referencing this chain, + /// expressed as a fraction of the trusting period. + #[serde(default = "default::client_refresh_rate")] + pub client_refresh_rate: RefreshRate, + + /// Event polling interval for monitoring IBC events + #[serde(default = "default_event_poll_interval", with = "humantime_serde")] + pub event_poll_interval: Option, + + /// Maximum amount of time Hermes will wait after a Cardano transaction is included + /// until it is also "Mithril-certified". + /// + /// Important nuance about "height": + /// In this Cardano↔Cosmos integration, `Height.revision_height` is treated as a Cardano + /// *block number* (as surfaced by `db-sync` and by Mithril's `cardano-transactions` + /// snapshots). It is not a Cardano *slot number*. + /// + /// When Hermes submits a transaction on Cardano, the Gateway returns the inclusion + /// block number. Hermes then waits until the Gateway reports a Mithril snapshot + /// whose `block_number` is >= that inclusion block number, before proceeding to the + /// next IBC step. Without this, Hermes can race ahead and build proofs at a height + /// that the Cosmos-side Mithril light client cannot yet verify. + #[serde( + default = "default_mithril_certification_timeout", + with = "humantime_serde" + )] + pub mithril_certification_timeout: Duration, + + /// Polling interval while waiting for Mithril snapshots to catch up. + #[serde(default = "default_mithril_poll_interval", with = "humantime_serde")] + pub mithril_poll_interval: Duration, + + /// How often to log progress while waiting for Mithril snapshot catch-up. + /// + /// This is intentionally an `INFO`-level log, because in many environments the default + /// log level is `info` (so `debug` would be invisible and the process would look hung). + #[serde( + default = "default_mithril_wait_log_interval", + with = "humantime_serde" + )] + pub mithril_wait_log_interval: Duration, +} + +fn default_max_block_time() -> Duration { + Duration::from_secs(30) +} + +fn default_query_packets_chunk_size() -> usize { + 50 +} + +fn default_clock_drift() -> Duration { + Duration::from_secs(5) +} + +fn default_event_poll_interval() -> Option { + Some(Duration::from_secs(5)) +} + +fn default_mithril_certification_timeout() -> Duration { + Duration::from_secs(10 * 60) +} + +fn default_mithril_poll_interval() -> Duration { + Duration::from_secs(5) +} + +fn default_mithril_wait_log_interval() -> Duration { + Duration::from_secs(30) +} + +impl Default for CardanoConfig { + fn default() -> Self { + Self { + id: ChainId::from_string("cardano-test"), + gateway_url: "http://localhost:3001".to_string(), + network_id: 0, + key_name: "default".to_string(), + key_store_type: Store::Test, + key_store_folder: None, + account: 0, + max_block_time: default_max_block_time(), + packet_filter: PacketFilter::default(), + trust_threshold: None, + query_packets_chunk_size: default_query_packets_chunk_size(), + clear_interval: None, + clock_drift: default_clock_drift(), + client_refresh_rate: default::client_refresh_rate(), + event_poll_interval: default_event_poll_interval(), + mithril_certification_timeout: default_mithril_certification_timeout(), + mithril_poll_interval: default_mithril_poll_interval(), + mithril_wait_log_interval: default_mithril_wait_log_interval(), + } + } +} diff --git a/crates/relayer/src/chain/cardano/endpoint.rs b/crates/relayer/src/chain/cardano/endpoint.rs new file mode 100644 index 0000000000..9d9940c017 --- /dev/null +++ b/crates/relayer/src/chain/cardano/endpoint.rs @@ -0,0 +1,2508 @@ +//! Cardano ChainEndpoint implementation for Hermes +//! +//! This module implements the ChainEndpoint trait required by Hermes for custom chain support. + +use super::config::CardanoConfig; +use super::gateway_client::GatewayClient; +use super::signing_key_pair::CardanoSigningKeyPair; + +use ibc_relayer_types::clients::ics08_cardano::header::Header as MithrilHeader; +use ibc_relayer_types::clients::ics08_cardano::{ + client_state::ClientState as MithrilClientState, + consensus_state::ConsensusState as MithrilConsensusState, +}; + +use crate::account::Balance; +use crate::chain::client::ClientSettings; +use crate::chain::cosmos::version::Specs as CosmosSpecs; +use crate::chain::endpoint::{ChainEndpoint, ChainStatus, HealthCheck}; +use crate::chain::handle::Subscription; +use crate::chain::requests::{ + CrossChainQueryRequest, IncludeProof, QueryChannelClientStateRequest, QueryChannelRequest, + QueryChannelsRequest, QueryClientConnectionsRequest, QueryClientStateRequest, + QueryClientStatesRequest, QueryConnectionChannelsRequest, QueryConnectionRequest, + QueryConnectionsRequest, QueryConsensusStateHeightsRequest, QueryConsensusStateRequest, + QueryHostConsensusStateRequest, QueryNextSequenceReceiveRequest, + QueryPacketAcknowledgementRequest, QueryPacketAcknowledgementsRequest, + QueryPacketCommitmentRequest, QueryPacketCommitmentsRequest, QueryPacketEventDataRequest, + QueryPacketReceiptRequest, QueryTxRequest, QueryUnreceivedAcksRequest, + QueryUnreceivedPacketsRequest, QueryUpgradedClientStateRequest, + QueryUpgradedConsensusStateRequest, +}; +use crate::chain::tracking::TrackedMsgs; +use crate::chain::version::Specs; +use crate::client_state::{AnyClientState, IdentifiedAnyClientState}; +use crate::config::{ChainConfig, Error as ConfigError}; +use crate::consensus_state::AnyConsensusState; +use crate::denom::DenomTrace; +use crate::error::Error; +use crate::event::IbcEventWithHeight; +use crate::keyring::{KeyRing, SigningKeyPair}; +use crate::misbehaviour::MisbehaviourEvidence; +use ibc_relayer_types::core::ics02_client::events::UpdateClient; +use ibc_relayer_types::core::ics03_connection::connection::{ + ConnectionEnd, IdentifiedConnectionEnd, +}; +use ibc_relayer_types::core::ics04_channel::channel::{ChannelEnd, IdentifiedChannelEnd}; +use ibc_relayer_types::core::ics04_channel::packet::Sequence; +use ibc_relayer_types::core::ics23_commitment::commitment::CommitmentPrefix; +use ibc_relayer_types::core::ics23_commitment::commitment::CommitmentRoot; +use ibc_relayer_types::core::ics23_commitment::merkle::MerkleProof; +use ibc_relayer_types::core::ics24_host::identifier::{ + ChainId, ChannelId, ClientId, ConnectionId, PortId, +}; +use ibc_relayer_types::signer::Signer; +use ibc_relayer_types::Height as ICSHeight; +use std::str::FromStr; +use std::sync::Arc; +use tendermint_rpc::endpoint::broadcast::tx_sync::Response as TxResponse; +use tokio::runtime::Runtime as TokioRuntime; + +/// Cardano light block (placeholder) +#[derive(Debug, Clone)] +pub struct CardanoLightBlock { + pub header: MithrilHeader, + pub host_state_nft_policy_id: Vec, + pub host_state_nft_token_name: Vec, +} + +// CardanoSigningKeyPair is now defined in signing_key_pair.rs +// From for AnySigningKeyPair is implemented in ibc-relayer/src/keyring/any_signing_key_pair.rs + +/// Cardano ChainEndpoint implementation +pub struct CardanoChainEndpoint { + config: CardanoConfig, + rt: Arc, + gateway_client: GatewayClient, + keyring: KeyRing, + event_source_cmd: Option, +} + +impl CardanoChainEndpoint { + /// Sign a transaction using the keyring (private helper method) + fn sign_transaction_helper(&self, unsigned_cbor_hex: &str) -> Result { + use super::signer; + + // Convert hex to bytes + let unsigned_tx_bytes = hex::decode(unsigned_cbor_hex) + .map_err(|e| Error::send_tx(format!("Failed to decode unsigned tx hex: {}", e)))?; + + // Get signing key from keyring + let key = self + .keyring + .get_key(&self.config.key_name) + .map_err(Error::key_base)?; + + // Get the CardanoSigningKeyPair and extract the CardanoKeyring + let signing_key_pair = key + .as_any() + .downcast_ref::() + .ok_or_else(|| { + Error::send_tx("Failed to downcast to CardanoSigningKeyPair".to_string()) + })?; + let cardano_keyring = signing_key_pair + .get_cardano_keyring() + .map_err(|e| Error::send_tx(format!("Failed to get CardanoKeyring: {}", e)))?; + + // Sign the transaction + let signed_tx_bytes = signer::sign_transaction(&unsigned_tx_bytes, &cardano_keyring) + .map_err(|e| Error::send_tx(format!("Failed to sign transaction: {}", e)))?; + + // Convert back to hex + Ok(hex::encode(signed_tx_bytes)) + } + + /// Initialize the event source for monitoring Cardano chain events + fn init_event_source(&mut self) -> Result { + use super::event_source::CardanoEventSource; + use std::thread; + use std::time::Duration; + + tracing::info!("Initializing Cardano event source with polling"); + + // Get poll interval from config (default 5 seconds) + let poll_interval = self + .config + .event_poll_interval + .unwrap_or_else(|| Duration::from_secs(5)); + + let (event_source, monitor_tx) = CardanoEventSource::new( + self.config.id.clone(), + self.gateway_client.clone(), + poll_interval, + self.rt.clone(), + ) + .map_err(Error::event_source)?; + + thread::spawn(move || event_source.run()); + + tracing::info!( + "Event source initialized, polling every {:?}", + poll_interval + ); + + Ok(monitor_tx) + } + + /// Wait until the Gateway's "latest height" has caught up to (or passed) a specific + /// Cardano transaction inclusion height. + /// + /// Why this exists: + /// - The Gateway returns a transaction `height` in `submit_signed_tx` based on `db-sync`'s + /// `block_no` for the block that included the transaction. + /// - Separately, for Cardano↔Cosmos IBC, the Cosmos-side light client only accepts *Mithril- + /// certified* heights. In this integration, we treat the IBC `Height.revision_height` for + /// Cardano as the Mithril Cardano-transactions snapshot `block_number` (a monotonically + /// increasing block number), not as the raw chain tip. + /// + /// If Hermes proceeds immediately after inclusion, it may query proofs at a height that the + /// Cosmos-side client has not yet been updated to, or worse: it may receive proofs that are + /// valid for a newer on-chain HostState root but are being verified against an older certified + /// root. That shows up as "proof does not match ibc_state_root". + /// + /// To avoid this class of race, we treat "commit" for Cardano transactions as "included AND + /// covered by the latest Mithril transaction snapshot". + async fn wait_for_mithril_certified_height( + &self, + included_height: ICSHeight, + ) -> Result { + let poll_interval = self.config.mithril_poll_interval; + let timeout = self.config.mithril_certification_timeout; + let log_interval = self.config.mithril_wait_log_interval; + let start = tokio::time::Instant::now(); + let mut last_logged_elapsed = std::time::Duration::from_secs(0); + let mut last_latest_height: Option = None; + + loop { + let latest = self + .gateway_client + .query_latest_height() + .await + .map_err(|e| Error::query(format!("Gateway query_latest_height failed: {e}")))?; + + if latest.revision_number() != included_height.revision_number() { + return Err(Error::query(format!( + "gateway returned revision_number={} but expected revision_number={}", + latest.revision_number(), + included_height.revision_number() + ))); + } + + if latest.revision_height() >= included_height.revision_height() { + return Ok(latest); + } + + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(Error::send_tx(format!( + "timed out waiting for Mithril-certified height >= {} (latest={}). \ + Note: for Cardano, Height.revision_height is a Mithril snapshot block_number (not a slot).", + included_height, latest, + ))); + } + + let latest_height = latest.revision_height(); + let required_height = included_height.revision_height(); + let missing_blocks = required_height.saturating_sub(latest_height); + + let latest_changed = last_latest_height + .map(|prev| prev != latest_height) + .unwrap_or(true); + let should_log = + latest_changed || elapsed.saturating_sub(last_logged_elapsed) >= log_interval; + + if should_log { + let remaining = timeout.saturating_sub(elapsed); + let log_msg = format!( + "Waiting for Mithril snapshot: need >= {} (missing {} blocks), have {}, elapsed={}s, remaining={}s", + included_height, + missing_blocks, + latest, + elapsed.as_secs(), + remaining.as_secs(), + ); + + if remaining <= log_interval { + tracing::warn!("{log_msg}"); + } else { + tracing::info!("{log_msg}"); + } + + last_logged_elapsed = elapsed; + } + + last_latest_height = Some(latest_height); + tokio::time::sleep(poll_interval).await; + } + } +} + +impl ChainEndpoint for CardanoChainEndpoint { + type LightBlock = CardanoLightBlock; + type Header = MithrilHeader; + type ConsensusState = MithrilConsensusState; + type ClientState = MithrilClientState; + type Time = i64; // Unix timestamp + type SigningKeyPair = CardanoSigningKeyPair; + + fn id(&self) -> &ChainId { + &self.config.id + } + + fn config(&self) -> ChainConfig { + ChainConfig::Cardano(self.config.clone()) + } + + fn bootstrap(config: ChainConfig, rt: Arc) -> Result { + tracing::info!("Bootstrapping Cardano chain endpoint"); + + // Extract Cardano-specific config + let cardano_config: CardanoConfig = match config { + ChainConfig::Cardano(config) => config, + _ => { + tracing::error!("Invalid config type provided to Cardano bootstrap"); + return Err(Error::config(ConfigError::wrong_type())); + } + }; + + tracing::info!( + "Initializing Cardano endpoint for chain: {}, gateway: {}", + cardano_config.id, + cardano_config.gateway_url + ); + + // Initialize Gateway client (async operation, so use rt.block_on) + let gateway_client = rt + .block_on(GatewayClient::new(cardano_config.gateway_url.clone())) + .map_err(|e| { + tracing::error!("Failed to initialize Gateway client: {}", e); + Error::config(ConfigError::wrong_type()) + })?; + + tracing::info!("Gateway client initialized successfully"); + + // Initialize keyring + // Note: Cardano uses "addr" as account prefix (similar to how Cosmos uses prefixes) + let keyring = KeyRing::new( + cardano_config.key_store_type, + "addr", // Cardano address prefix + &cardano_config.id, + &cardano_config.key_store_folder, + ) + .map_err(Error::key_base)?; + + tracing::info!("Keyring initialized successfully"); + + let endpoint = Self { + config: cardano_config, + rt, + gateway_client, + keyring, + event_source_cmd: None, // Initialized lazily on first subscribe() call + }; + + tracing::info!("Cardano chain endpoint bootstrap complete"); + Ok(endpoint) + } + + fn shutdown(self) -> Result<(), Error> { + tracing::info!("Shutting down Cardano chain endpoint"); + Ok(()) + } + + fn health_check(&mut self) -> Result { + match self.rt.block_on(self.gateway_client.query_latest_height()) { + Ok(_) => Ok(HealthCheck::Healthy), + Err(e) => Ok(HealthCheck::Unhealthy(Box::new(Error::query(format!( + "Gateway health check failed: {e}" + ))))), + } + } + + fn subscribe(&mut self) -> Result { + if self.event_source_cmd.is_none() { + self.event_source_cmd = Some(self.init_event_source()?); + } + + let event_source_cmd = self.event_source_cmd.as_ref().ok_or_else(|| { + Error::event_source(crate::event::source::Error::collect_events_failed( + "Cardano event source command missing after initialization".to_string(), + )) + })?; + + let subscription = event_source_cmd.subscribe().map_err(Error::event_source)?; + Ok(subscription) + } + + fn keybase(&self) -> &KeyRing { + &self.keyring + } + + fn keybase_mut(&mut self) -> &mut KeyRing { + &mut self.keyring + } + + fn get_signer(&self) -> Result { + let key = self + .keyring + .get_key(&self.config.key_name) + .map_err(Error::key_base)?; + + let cardano_keyring = key.get_cardano_keyring().map_err(Error::key_base)?; + let address = cardano_keyring.address(self.config.network_id); + + Signer::from_str(&address).map_err(|e| { + Error::key_base(crate::keyring::errors::Error::invalid_mnemonic( + anyhow::anyhow!("Invalid signer address: {e}"), + )) + }) + } + + fn get_key(&self) -> Result { + // Get the signing key pair from keyring + self.keyring + .get_key(&self.config.key_name) + .map_err(Error::key_base) + } + + fn version_specs(&self) -> Result { + // TODO: Return Cardano version info + // Return empty Cosmos specs for now (Cardano doesn't use Cosmos SDK) + Ok(Specs::Cosmos(CosmosSpecs { + cosmos_sdk: None, + ibc_go: None, + consensus: None, + })) + } + + fn send_messages_and_wait_commit( + &mut self, + tracked_msgs: TrackedMsgs, + ) -> Result, Error> { + tracing::info!( + "send_messages_and_wait_commit: processing {} messages", + tracked_msgs.msgs.len() + ); + + // Block on async operations using the runtime + self.rt.block_on(async { + let mut all_events = Vec::new(); + + for msg in tracked_msgs.msgs.iter() { + tracing::debug!("Processing message type: {:?}", msg.type_url); + + // Step 1: Build unsigned transaction via Gateway + let unsigned_tx = self + .gateway_client + .build_ibc_tx(&msg.type_url, msg.value.clone()) + .await + .map_err(|e| Error::send_tx(format!("Failed to build transaction: {}", e)))?; + + tracing::debug!("Built unsigned tx: {}", unsigned_tx.description); + + // Step 2: Sign transaction with keyring + let signed_cbor_hex = self.sign_transaction_helper(&unsigned_tx.cbor_hex)?; + + tracing::debug!("Signed transaction, CBOR length: {}", signed_cbor_hex.len()); + + // Step 3: Submit signed transaction via Gateway + let tx_response = self + .gateway_client + .submit_signed_tx(&signed_cbor_hex) + .await + .map_err(|e| Error::send_tx(format!("Failed to submit transaction: {}", e)))?; + + // Step 4: Parse events from transaction result + let included_height = tx_response.height.ok_or_else(|| { + Error::send_tx("No height in transaction response".to_string()) + })?; + + tracing::info!( + "Transaction submitted: {} at height {}", + tx_response.tx_hash, + included_height + ); + + // Ensure the transaction is also covered by the latest Mithril transaction snapshot + // before we treat it as "committed" from the perspective of IBC relaying. + let certified_height = self + .wait_for_mithril_certified_height(included_height) + .await?; + if certified_height.revision_height() != included_height.revision_height() { + tracing::info!( + "Transaction {} inclusion height {} is now certified at {}", + tx_response.tx_hash, + included_height, + certified_height + ); + } + + // Log all events for debugging + for event in &tx_response.events { + tracing::debug!( + "Gateway event: type={} attributes={:?}", + event.event_type, + event.attributes + ); + } + + // Convert custom IbcEvent to proto Event format for parsing + let proto_events: Vec = tx_response + .events + .into_iter() + .map(|e| super::generated::ibc::cardano::v1::Event { + r#type: e.event_type, + attributes: e + .attributes + .into_iter() + .map( + |(k, v)| super::generated::ibc::cardano::v1::EventAttribute { + key: k, + value: v, + }, + ) + .collect(), + }) + .collect(); + + // Parse Gateway events into Hermes IbcEvent types + let parsed_events = + super::event_parser::parse_events(proto_events, certified_height) + .map_err(|e| Error::send_tx(format!("Failed to parse events: {}", e)))?; + + tracing::info!("Parsed {} IBC events from transaction", parsed_events.len()); + + // Wrap events with height + let events_with_height: Vec = parsed_events + .into_iter() + .map(|event| IbcEventWithHeight::new(event, certified_height)) + .collect(); + + // Add parsed events to result + all_events.extend(events_with_height); + } + + Ok(all_events) + }) + } + + fn send_messages_and_wait_check_tx( + &mut self, + tracked_msgs: TrackedMsgs, + ) -> Result, Error> { + tracing::info!( + "send_messages_and_wait_check_tx: processing {} messages", + tracked_msgs.msgs.len() + ); + + if tracked_msgs.msgs.is_empty() { + return Ok(vec![]); + } + + self.rt.block_on(async { + use bytes::Bytes; + use tendermint::abci::Code; + use tendermint::Hash; + + let mut responses = Vec::with_capacity(tracked_msgs.msgs.len()); + + for msg in tracked_msgs.msgs.iter() { + tracing::debug!("Processing message type: {:?}", msg.type_url); + + let unsigned_tx = self + .gateway_client + .build_ibc_tx(&msg.type_url, msg.value.clone()) + .await + .map_err(|e| Error::send_tx(format!("Failed to build transaction: {e}")))?; + + let signed_cbor_hex = self.sign_transaction_helper(&unsigned_tx.cbor_hex)?; + + let tx_response = self + .gateway_client + .submit_signed_tx(&signed_cbor_hex) + .await + .map_err(|e| Error::send_tx(format!("Failed to submit transaction: {e}")))?; + + let hash = match Hash::from_str(&tx_response.tx_hash.to_ascii_uppercase()) { + Ok(h) => h, + Err(e) => { + tracing::warn!( + "failed to parse tx hash `{}` as Tendermint hash: {e}", + tx_response.tx_hash + ); + Hash::None + } + }; + + responses.push(TxResponse { + codespace: String::new(), + code: Code::Ok, + data: Bytes::new(), + log: format!("submitted tx {}", tx_response.tx_hash), + hash, + }); + } + + Ok(responses) + }) + } + + fn verify_header( + &mut self, + _trusted: ICSHeight, + target: ICSHeight, + client_state: &AnyClientState, + ) -> Result { + // Hermes uses `verify_header()` as part of its generic client update workflow. + // + // For Tendermint clients, this verifies signatures and header continuity off-chain. + // For Cardano, we rely on on-chain verification in the Cosmos-side Mithril light client + // implementation (the chain rejects invalid Mithril headers and proofs). + // + // To keep Hermes functional without coupling it to the full Mithril verification stack + // (which is already implemented in the on-chain client), we treat this as a best-effort + // fetch + structural validation step: + // - fetch the MithrilHeader for `target` from the Gateway + // - return it as a CardanoLightBlock so the relayer can proceed + // + // TODO: Implement optional off-chain verification to avoid broadcasting invalid headers and + // wasting fees, and to enable richer relayer-side diagnostics. + let header = self + .rt + .block_on(self.gateway_client.query_header(target)) + .map_err(|e| { + Error::query(format!("failed to query Cardano header from Gateway: {e}")) + })?; + + let (host_state_nft_policy_id, host_state_nft_token_name) = match client_state { + AnyClientState::Mithril(state) => ( + state.host_state_nft_policy_id.clone(), + state.host_state_nft_token_name.clone(), + ), + _ => { + return Err(Error::query( + "Cardano verify_header requires a Mithril client state".to_string(), + )) + } + }; + + Ok(CardanoLightBlock { + header, + host_state_nft_policy_id, + host_state_nft_token_name, + }) + } + + fn check_misbehaviour( + &mut self, + _update: &UpdateClient, + _client_state: &AnyClientState, + ) -> Result, Error> { + // TODO: Check for Cardano misbehaviour + tracing::warn!("check_misbehaviour: stub implementation"); + Ok(None) + } + + fn query_balance(&self, key_name: Option<&str>, denom: Option<&str>) -> Result { + let denom = denom.unwrap_or("lovelace"); // Cardano's base unit + let key_name = key_name.unwrap_or(&self.config.key_name); + + Err(Error::query(format!( + "Cardano balance query is not implemented (key={key_name}, denom={denom}); requires Gateway UTXO/balance query support" + ))) + } + + fn query_all_balances(&self, key_name: Option<&str>) -> Result, Error> { + let key_name = key_name.unwrap_or(&self.config.key_name); + Err(Error::query(format!( + "Cardano all-balances query is not implemented (key={key_name}); requires Gateway UTXO/balance query support" + ))) + } + + fn query_denom_trace(&self, _hash: String) -> Result { + // Not applicable to Cardano (native assets) + tracing::warn!("query_denom_trace: not applicable for Cardano"); + Err(Error::config(ConfigError::wrong_type())) + } + + fn query_commitment_prefix(&self) -> Result { + // Cardano uses "ibc" as commitment prefix + CommitmentPrefix::try_from(b"ibc".to_vec()) + .map_err(|e| Error::query(format!("invalid commitment prefix for Cardano: {e}"))) + } + + fn query_application_status(&self) -> Result { + tracing::debug!("Querying Cardano application status via Gateway"); + + // Query latest height from Gateway + let height = self + .rt + .block_on(self.gateway_client.query_latest_height()) + .map_err(|e| { + tracing::error!("Failed to query latest height: {}", e); + Error::query(format!("Gateway query_latest_height failed: {}", e)) + })?; + + tracing::info!("Cardano chain at height: {}", height); + + let timestamp = match self.rt.block_on(self.gateway_client.query_header(height)) { + Ok(header) => header.timestamp, + Err(e) => { + tracing::warn!( + "Failed to query header at height {height} for timestamp (falling back to local time): {e}" + ); + tendermint::Time::now().into() + } + }; + + Ok(ChainStatus { height, timestamp }) + } + + fn query_clients( + &self, + _request: QueryClientStatesRequest, + ) -> Result, Error> { + tracing::debug!("Querying all clients"); + + // Block on async operation + self.rt.block_on(async { + // Query clients from Gateway + let response_bytes = self.gateway_client + .query_clients() + .await + .map_err(|e| Error::query(format!("Failed to query clients: {}", e)))?; + + // Decode the response + use prost::Message; + use ibc_proto::ibc::core::client::v1::QueryClientStatesResponse; + + let response = QueryClientStatesResponse::decode(&response_bytes[..]) + .map_err(|e| Error::query(format!("Failed to decode clients response: {}", e)))?; + + // Convert proto client states to domain types, filtering out unsupported types + let clients: Vec = response + .client_states + .into_iter() + .filter_map(|cs| { + IdentifiedAnyClientState::try_from(cs.clone()) + .map_err(|e| { + let (client_type, client_id) = ( + if let Some(client_state) = &cs.client_state { + client_state.type_url.clone() + } else { + "None".to_string() + }, + &cs.client_id + ); + tracing::warn!( + "Encountered unsupported client type `{}` while scanning client `{}`, skipping the client", + client_type, client_id + ); + tracing::debug!("Failed to parse client state. Error: {}", e); + }) + .ok() + }) + .collect(); + + Ok(clients) + }) + } + + fn query_client_state( + &self, + request: QueryClientStateRequest, + include_proof: IncludeProof, + ) -> Result<(AnyClientState, Option), Error> { + tracing::debug!("Querying client state for: {}", request.client_id); + + let response = self + .rt + .block_on( + self.gateway_client + .query_client_state(request.client_id.as_str()), + ) + .map_err(|e| { + tracing::error!("Failed to query client state: {}", e); + Error::query(format!("Gateway query_client_state failed: {}", e)) + })?; + + let client_state_any = response + .client_state + .ok_or_else(|| Error::query("No client_state in response".to_string()))?; + + let any_client_state: AnyClientState = AnyClientState::try_from(client_state_any.clone()) + .map_err(|e| { + Error::query(format!( + "Failed to decode client state {}: {e}", + client_state_any.type_url + )) + })?; + + let proof = if include_proof == IncludeProof::Yes && !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + use prost::Message; + + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {e}")))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + }; + + Ok((any_client_state, proof)) + } + + fn query_consensus_state( + &self, + request: QueryConsensusStateRequest, + include_proof: IncludeProof, + ) -> Result<(AnyConsensusState, Option), Error> { + tracing::debug!( + "Querying consensus state for client: {} at height: {:?}", + request.client_id, + request.consensus_height + ); + + let response = self + .rt + .block_on( + self.gateway_client + .query_consensus_state(request.client_id.as_str(), request.consensus_height), + ) + .map_err(|e| { + tracing::error!("Failed to query consensus state: {}", e); + Error::query(format!("Gateway query_consensus_state failed: {}", e)) + })?; + + let consensus_state_any = response + .consensus_state + .ok_or_else(|| Error::query("No consensus_state in response".to_string()))?; + + let any_consensus_state: AnyConsensusState = + AnyConsensusState::try_from(consensus_state_any.clone()).map_err(|e| { + Error::query(format!( + "Failed to decode consensus state {}: {e}", + consensus_state_any.type_url + )) + })?; + + let proof = if include_proof == IncludeProof::Yes && !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + use prost::Message; + + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {e}")))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + }; + + Ok((any_consensus_state, proof)) + } + + fn query_consensus_state_heights( + &self, + request: QueryConsensusStateHeightsRequest, + ) -> Result, Error> { + tracing::debug!( + "Querying consensus state heights for client: {}", + request.client_id + ); + + self.rt.block_on(async { + let grpc_request: ibc_proto::ibc::core::client::v1::QueryConsensusStateHeightsRequest = + request.clone().into(); + + let heights_response = self + .gateway_client + .query_consensus_state_heights(grpc_request) + .await; + + let consensus_state_heights = match heights_response { + Ok(res) => res.consensus_state_heights, + Err(heights_err) => { + // Some chains do not implement `ConsensusStateHeights`; fall back to + // `ConsensusStates` and extract the heights. + let states_request: ibc_proto::ibc::core::client::v1::QueryConsensusStatesRequest = + ibc_proto::ibc::core::client::v1::QueryConsensusStatesRequest { + client_id: request.client_id.to_string(), + pagination: request.pagination.map(|p| p.into()), + }; + + let states = self + .gateway_client + .query_consensus_states(states_request) + .await + .map_err(|states_err| { + Error::query(format!( + "Failed to query consensus state heights ({heights_err}) and fallback consensus states ({states_err})" + )) + })?; + + states + .consensus_states + .into_iter() + .filter_map(|cs| cs.height) + .collect() + } + }; + + let mut heights: Vec<_> = consensus_state_heights + .into_iter() + .filter_map(|h| { + ICSHeight::new(h.revision_number, h.revision_height) + .map_err(|e| { + tracing::warn!( + "Failed to parse consensus state height {}-{}: {}", + h.revision_number, + h.revision_height, + e + ); + }) + .ok() + }) + .collect(); + + heights.sort_unstable(); + + Ok(heights) + }) + } + + fn query_upgraded_client_state( + &self, + _request: QueryUpgradedClientStateRequest, + ) -> Result<(AnyClientState, MerkleProof), Error> { + Err(Error::query( + "Cardano upgraded client state query is not implemented".to_string(), + )) + } + + fn query_upgraded_consensus_state( + &self, + _request: QueryUpgradedConsensusStateRequest, + ) -> Result<(AnyConsensusState, MerkleProof), Error> { + Err(Error::query( + "Cardano upgraded consensus state query is not implemented".to_string(), + )) + } + + fn query_connections( + &self, + _request: QueryConnectionsRequest, + ) -> Result, Error> { + tracing::debug!("Querying all connections"); + + // Block on async operation + self.rt.block_on(async { + // Query connections from Gateway + let response_bytes = self + .gateway_client + .query_connections() + .await + .map_err(|e| Error::query(format!("Failed to query connections: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::connection::v1::QueryConnectionsResponse; + use prost::Message; + + let response = QueryConnectionsResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!("Failed to decode connections response: {}", e)) + })?; + + // Convert proto connections to domain types, filtering out parsing errors + let connections: Vec = response + .connections + .into_iter() + .filter_map(|co| { + IdentifiedConnectionEnd::try_from(co.clone()) + .map_err(|e| { + tracing::warn!( + "Connection with ID {} failed parsing. Error: {}", + co.id, + e + ); + }) + .ok() + }) + .collect(); + + Ok(connections) + }) + } + + fn query_client_connections( + &self, + request: QueryClientConnectionsRequest, + ) -> Result, Error> { + tracing::debug!("Querying connections for client: {}", request.client_id); + + // Block on async operation + self.rt.block_on(async { + // Query client connections from Gateway + let response_bytes = self + .gateway_client + .query_client_connections(&request.client_id.to_string()) + .await + .map_err(|e| { + // If not found, return empty list + if e.to_string().contains("NotFound") { + return Error::query("Client connections not found".to_string()); + } + Error::query(format!("Failed to query client connections: {}", e)) + })?; + + // Decode the response + use ibc_proto::ibc::core::connection::v1::QueryClientConnectionsResponse; + use prost::Message; + use std::str::FromStr; + + let response = + QueryClientConnectionsResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode client connections response: {}", + e + )) + })?; + + // Parse connection_paths strings into ConnectionId instances + let connection_ids: Vec = response + .connection_paths + .iter() + .filter_map(|id| { + ConnectionId::from_str(id) + .map_err(|e| { + tracing::warn!( + "Connection with ID {} failed parsing. Error: {}", + id, + e + ); + }) + .ok() + }) + .collect(); + + Ok(connection_ids) + }) + } + + fn query_connection( + &self, + request: QueryConnectionRequest, + include_proof: IncludeProof, + ) -> Result<(ConnectionEnd, Option), Error> { + tracing::info!("Querying connection: {:?}", request.connection_id); + + // Block on async operation + self.rt.block_on(async { + // Query connection from Gateway + let response_bytes = self + .gateway_client + .query_connection(&request.connection_id.to_string()) + .await + .map_err(|e| Error::query(format!("Failed to query connection: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::connection::v1::QueryConnectionResponse; + use prost::Message; + + let response = QueryConnectionResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!("Failed to decode connection response: {}", e)) + })?; + + let connection_end = response + .connection + .ok_or_else(|| Error::query("No connection in response".to_string()))?; + + // Convert proto ConnectionEnd to domain ConnectionEnd + let connection = ConnectionEnd::try_from(connection_end) + .map_err(|e| Error::query(format!("Failed to parse ConnectionEnd: {}", e)))?; + + // Parse proof if requested + let proof = if matches!(include_proof, IncludeProof::Yes) { + if !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {}", e)))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + } + } else { + None + }; + + Ok((connection, proof)) + }) + } + + fn query_connection_channels( + &self, + request: QueryConnectionChannelsRequest, + ) -> Result, Error> { + tracing::debug!( + "Querying channels for connection: {}", + request.connection_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query connection channels from Gateway + let response_bytes = self + .gateway_client + .query_connection_channels(&request.connection_id.to_string()) + .await + .map_err(|e| Error::query(format!("Failed to query connection channels: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryConnectionChannelsResponse; + use prost::Message; + + let response = + QueryConnectionChannelsResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode connection channels response: {}", + e + )) + })?; + + // Convert proto channels to domain types, filtering out parsing errors + let channels: Vec = response + .channels + .into_iter() + .filter_map(|ch| { + IdentifiedChannelEnd::try_from(ch.clone()) + .map_err(|e| { + tracing::warn!( + "Channel with port {} and ID {} failed parsing. Error: {}", + ch.port_id, + ch.channel_id, + e + ); + }) + .ok() + }) + .collect(); + + Ok(channels) + }) + } + + fn query_channels( + &self, + _request: QueryChannelsRequest, + ) -> Result, Error> { + tracing::debug!("Querying all channels"); + + // Block on async operation + self.rt.block_on(async { + // Query channels from Gateway + let response_bytes = self + .gateway_client + .query_channels() + .await + .map_err(|e| Error::query(format!("Failed to query channels: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryChannelsResponse; + use prost::Message; + + let response = QueryChannelsResponse::decode(&response_bytes[..]) + .map_err(|e| Error::query(format!("Failed to decode channels response: {}", e)))?; + + // Convert proto channels to domain types, filtering out parsing errors + let channels: Vec = response + .channels + .into_iter() + .filter_map(|ch| { + IdentifiedChannelEnd::try_from(ch.clone()) + .map_err(|e| { + tracing::warn!( + "Channel with port {} and ID {} failed parsing. Error: {}", + ch.port_id, + ch.channel_id, + e + ); + }) + .ok() + }) + .collect(); + + Ok(channels) + }) + } + + fn query_channel( + &self, + request: QueryChannelRequest, + include_proof: IncludeProof, + ) -> Result<(ChannelEnd, Option), Error> { + tracing::info!( + "Querying channel: port={}, channel={}", + request.port_id, + request.channel_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query channel from Gateway + let response_bytes = self + .gateway_client + .query_channel(request.port_id.as_ref(), request.channel_id.as_ref()) + .await + .map_err(|e| Error::query(format!("Failed to query channel: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryChannelResponse; + use prost::Message; + + let response = QueryChannelResponse::decode(&response_bytes[..]) + .map_err(|e| Error::query(format!("Failed to decode channel response: {}", e)))?; + + let channel_proto = response + .channel + .ok_or_else(|| Error::query("No channel in response".to_string()))?; + + // Convert proto Channel to domain ChannelEnd + let channel = ChannelEnd::try_from(channel_proto) + .map_err(|e| Error::query(format!("Failed to parse ChannelEnd: {}", e)))?; + + // Parse proof if requested + let proof = if matches!(include_proof, IncludeProof::Yes) { + if !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {}", e)))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + } + } else { + None + }; + + Ok((channel, proof)) + }) + } + + fn query_channel_client_state( + &self, + request: QueryChannelClientStateRequest, + ) -> Result, Error> { + tracing::debug!( + "Querying channel client state: port={}, channel={}", + request.port_id, + request.channel_id + ); + + self.rt.block_on(async { + let response_bytes = self + .gateway_client + .query_channel_client_state(request.port_id.as_ref(), request.channel_id.as_ref()) + .await + .map_err(|e| Error::query(format!("Failed to query channel client state: {e}")))?; + + use ibc_proto::ibc::core::channel::v1::QueryChannelClientStateResponse; + use prost::Message; + + let response = + QueryChannelClientStateResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode channel client state response: {e}" + )) + })?; + + let identified = response + .identified_client_state + .and_then(|ics| IdentifiedAnyClientState::try_from(ics).ok()); + + Ok(identified) + }) + } + + fn query_packet_commitment( + &self, + request: QueryPacketCommitmentRequest, + include_proof: IncludeProof, + ) -> Result<(Vec, Option), Error> { + tracing::info!( + "Querying packet commitment: port={}, channel={}, sequence={}", + request.port_id, + request.channel_id, + request.sequence + ); + + // Block on async operation + self.rt.block_on(async { + // Query packet commitment from Gateway + let response_bytes = self + .gateway_client + .query_packet_commitment( + request.port_id.as_ref(), + request.channel_id.as_ref(), + request.sequence.into(), + ) + .await + .map_err(|e| Error::query(format!("Failed to query packet commitment: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryPacketCommitmentResponse; + use prost::Message; + + let response = + QueryPacketCommitmentResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode packet commitment response: {}", + e + )) + })?; + + // Parse proof if requested + let proof = if matches!(include_proof, IncludeProof::Yes) { + if !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {}", e)))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + } + } else { + None + }; + + Ok((response.commitment, proof)) + }) + } + + fn query_packet_commitments( + &self, + request: QueryPacketCommitmentsRequest, + ) -> Result<(Vec, ICSHeight), Error> { + tracing::info!( + "Querying packet commitments: port={}, channel={}", + request.port_id, + request.channel_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query packet commitments from Gateway + let response_bytes = self + .gateway_client + .query_packet_commitments(request.port_id.as_ref(), request.channel_id.as_ref()) + .await + .map_err(|e| Error::query(format!("Failed to query packet commitments: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryPacketCommitmentsResponse; + use prost::Message; + + let response = + QueryPacketCommitmentsResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode packet commitments response: {}", + e + )) + })?; + + // Extract sequences from packet_states + let sequences: Vec = response + .commitments + .iter() + .map(|state| Sequence::from(state.sequence)) + .collect(); + + // Extract height from response + let height = response.height.ok_or_else(|| { + Error::query("No height in packet commitments response".to_string()) + })?; + + let ics_height = ICSHeight::new(height.revision_number, height.revision_height) + .map_err(|e| Error::query(format!("Invalid height: {}", e)))?; + + Ok((sequences, ics_height)) + }) + } + + fn query_packet_receipt( + &self, + request: QueryPacketReceiptRequest, + include_proof: IncludeProof, + ) -> Result<(Vec, Option), Error> { + tracing::info!( + "Querying packet receipt: port={}, channel={}, sequence={}", + request.port_id, + request.channel_id, + request.sequence + ); + + // Block on async operation + self.rt.block_on(async { + // Query packet receipt from Gateway + let response_bytes = self + .gateway_client + .query_packet_receipt( + request.port_id.as_ref(), + request.channel_id.as_ref(), + request.sequence.into(), + ) + .await + .map_err(|e| Error::query(format!("Failed to query packet receipt: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryPacketReceiptResponse; + use prost::Message; + + let response = + QueryPacketReceiptResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!("Failed to decode packet receipt response: {}", e)) + })?; + + // The receipt is a boolean - convert to bytes + let receipt_bytes = if response.received { + vec![1u8] + } else { + vec![0u8] + }; + + // Parse proof if requested + let proof = if matches!(include_proof, IncludeProof::Yes) { + if !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {}", e)))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + } + } else { + None + }; + + Ok((receipt_bytes, proof)) + }) + } + + fn query_unreceived_packets( + &self, + request: QueryUnreceivedPacketsRequest, + ) -> Result, Error> { + tracing::info!( + "Querying unreceived packets: port={}, channel={}", + request.port_id, + request.channel_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query unreceived packets from Gateway + let response_bytes = self + .gateway_client + .query_unreceived_packets( + request.port_id.as_ref(), + request.channel_id.as_ref(), + request + .packet_commitment_sequences + .iter() + .map(|s| (*s).into()) + .collect(), + ) + .await + .map_err(|e| Error::query(format!("Failed to query unreceived packets: {}", e)))?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryUnreceivedPacketsResponse; + use prost::Message; + + let response = + QueryUnreceivedPacketsResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode unreceived packets response: {}", + e + )) + })?; + + // Extract sequences from response + let sequences: Vec = response + .sequences + .iter() + .map(|s| Sequence::from(*s)) + .collect(); + + Ok(sequences) + }) + } + + fn query_packet_acknowledgement( + &self, + request: QueryPacketAcknowledgementRequest, + include_proof: IncludeProof, + ) -> Result<(Vec, Option), Error> { + tracing::info!( + "Querying packet acknowledgement: port={}, channel={}, sequence={}", + request.port_id, + request.channel_id, + request.sequence + ); + + // Block on async operation + self.rt.block_on(async { + // Query packet acknowledgement from Gateway + let response_bytes = self + .gateway_client + .query_packet_acknowledgement( + request.port_id.as_ref(), + request.channel_id.as_ref(), + request.sequence.into(), + ) + .await + .map_err(|e| { + Error::query(format!("Failed to query packet acknowledgement: {}", e)) + })?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryPacketAcknowledgementResponse; + use prost::Message; + + let response = QueryPacketAcknowledgementResponse::decode(&response_bytes[..]) + .map_err(|e| { + Error::query(format!( + "Failed to decode packet acknowledgement response: {}", + e + )) + })?; + + // Parse proof if requested + let proof = if matches!(include_proof, IncludeProof::Yes) { + if !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {}", e)))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + } + } else { + None + }; + + Ok((response.acknowledgement, proof)) + }) + } + + fn query_packet_acknowledgements( + &self, + request: QueryPacketAcknowledgementsRequest, + ) -> Result<(Vec, ICSHeight), Error> { + tracing::info!( + "Querying packet acknowledgements: port={}, channel={}", + request.port_id, + request.channel_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query packet acknowledgements from Gateway + let response_bytes = self + .gateway_client + .query_packet_acknowledgements( + request.port_id.as_ref(), + request.channel_id.as_ref(), + ) + .await + .map_err(|e| { + Error::query(format!("Failed to query packet acknowledgements: {}", e)) + })?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryPacketAcknowledgementsResponse; + use prost::Message; + + let response = QueryPacketAcknowledgementsResponse::decode(&response_bytes[..]) + .map_err(|e| { + Error::query(format!( + "Failed to decode packet acknowledgements response: {}", + e + )) + })?; + + // Extract sequences from acknowledgements + let sequences: Vec = response + .acknowledgements + .iter() + .map(|ack| Sequence::from(ack.sequence)) + .collect(); + + // Extract height from response + let height = response.height.ok_or_else(|| { + Error::query("No height in packet acknowledgements response".to_string()) + })?; + + let ics_height = ICSHeight::new(height.revision_number, height.revision_height) + .map_err(|e| Error::query(format!("Invalid height: {}", e)))?; + + Ok((sequences, ics_height)) + }) + } + + fn query_unreceived_acknowledgements( + &self, + request: QueryUnreceivedAcksRequest, + ) -> Result, Error> { + tracing::info!( + "Querying unreceived acknowledgements: port={}, channel={}", + request.port_id, + request.channel_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query unreceived acknowledgements from Gateway + let response_bytes = self + .gateway_client + .query_unreceived_acknowledgements( + request.port_id.as_ref(), + request.channel_id.as_ref(), + request + .packet_ack_sequences + .iter() + .map(|s| (*s).into()) + .collect(), + ) + .await + .map_err(|e| { + Error::query(format!( + "Failed to query unreceived acknowledgements: {}", + e + )) + })?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryUnreceivedAcksResponse; + use prost::Message; + + let response = + QueryUnreceivedAcksResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!("Failed to decode unreceived acks response: {}", e)) + })?; + + // Extract sequences from response + let sequences: Vec = response + .sequences + .iter() + .map(|s| Sequence::from(*s)) + .collect(); + + Ok(sequences) + }) + } + + fn query_next_sequence_receive( + &self, + request: QueryNextSequenceReceiveRequest, + include_proof: IncludeProof, + ) -> Result<(Sequence, Option), Error> { + tracing::info!( + "Querying next sequence receive: port={}, channel={}", + request.port_id, + request.channel_id + ); + + // Block on async operation + self.rt.block_on(async { + // Query next sequence receive from Gateway + let response_bytes = self + .gateway_client + .query_next_sequence_receive(request.port_id.as_ref(), request.channel_id.as_ref()) + .await + .map_err(|e| { + Error::query(format!("Failed to query next sequence receive: {}", e)) + })?; + + // Decode the response + use ibc_proto::ibc::core::channel::v1::QueryNextSequenceReceiveResponse; + use prost::Message; + + let response = + QueryNextSequenceReceiveResponse::decode(&response_bytes[..]).map_err(|e| { + Error::query(format!( + "Failed to decode next sequence receive response: {}", + e + )) + })?; + + let sequence = Sequence::from(response.next_sequence_receive); + + // Parse proof if requested + let proof = if matches!(include_proof, IncludeProof::Yes) { + if !response.proof.is_empty() { + use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; + let raw_proof = RawMerkleProof::decode(&response.proof[..]) + .map_err(|e| Error::query(format!("Failed to decode proof: {}", e)))?; + Some(MerkleProof::from(raw_proof)) + } else { + None + } + } else { + None + }; + + Ok((sequence, proof)) + }) + } + + fn query_txs(&self, _request: QueryTxRequest) -> Result, Error> { + use crate::chain::requests::{QueryHeight, QueryTxRequest}; + use ibc_relayer_types::events::WithBlockDataType; + + match _request { + QueryTxRequest::Transaction(tx) => { + self.rt.block_on(async { + let response = self + .gateway_client + .query_transaction_by_hash(tx.0.to_string()) + .await + .map_err(|e| Error::query(format!("Failed to query transaction by hash: {e}")))?; + + let height = ICSHeight::new(0, response.height) + .map_err(|e| Error::query(format!("Invalid tx height {}: {e}", response.height)))?; + + let proto_events: Vec = response + .events + .into_iter() + .map(|e| super::generated::ibc::cardano::v1::Event { + r#type: e.r#type, + attributes: e + .event_attribute + .into_iter() + .map(|a| super::generated::ibc::cardano::v1::EventAttribute { + key: a.key, + value: a.value, + }) + .collect(), + }) + .collect(); + + let parsed_events = super::event_parser::parse_events(proto_events, height) + .map_err(|e| Error::query(format!("Failed to parse tx events: {e}")))?; + + Ok(parsed_events + .into_iter() + .map(|ev| IbcEventWithHeight::new(ev, height)) + .collect()) + }) + } + + QueryTxRequest::Client(request) => { + self.rt.block_on(async { + const LOOKBACK_WINDOW: u64 = 50; + + let filter_events = |height: ICSHeight, + proto_events: Vec| + -> Result, Error> { + let parsed_events = super::event_parser::parse_events(proto_events, height) + .map_err(|e| Error::query(format!("Failed to parse tx events: {e}")))?; + + Ok(parsed_events + .into_iter() + .filter(|ev| match (&request.event_id, ev) { + ( + WithBlockDataType::CreateClient, + ibc_relayer_types::events::IbcEvent::CreateClient(e), + ) => e.client_id() == &request.client_id + && e.0.consensus_height == request.consensus_height, + ( + WithBlockDataType::UpdateClient, + ibc_relayer_types::events::IbcEvent::UpdateClient(e), + ) => e.common.client_id == request.client_id + && e.common.consensus_height == request.consensus_height, + _ => false, + }) + .map(|ev| IbcEventWithHeight::new(ev, height)) + .collect()) + }; + + match request.query_height { + QueryHeight::Specific(h) => { + let target_height_u64 = h.revision_height(); + + let response = self + .gateway_client + .query_block_results(target_height_u64) + .await + .map_err(|e| Error::query(format!("Failed to query block results: {e}")))?; + + let block_results = response + .block_results + .ok_or_else(|| Error::query("No block_results in response".to_string()))?; + + let height = match block_results.height { + Some(h) => ICSHeight::new(h.revision_number, h.revision_height) + .map_err(|e| { + Error::query(format!("Invalid height in block results: {e}")) + })?, + None => ICSHeight::new(0, target_height_u64).map_err(|e| { + Error::query(format!( + "Invalid fallback height {target_height_u64} in block results: {e}" + )) + })?, + }; + + let proto_events: Vec = block_results + .txs_results + .into_iter() + .flat_map(|tx| tx.events) + .map(|e| super::generated::ibc::cardano::v1::Event { + r#type: e.r#type, + attributes: e + .event_attribute + .into_iter() + .map(|a| super::generated::ibc::cardano::v1::EventAttribute { + key: a.key, + value: a.value, + }) + .collect(), + }) + .collect(); + + filter_events(height, proto_events) + } + QueryHeight::Latest => { + let latest = self + .gateway_client + .query_latest_height() + .await + .map_err(|e| Error::query(format!("Failed to query latest height: {e}")))?; + + let latest_h = latest.revision_height(); + let since_h = latest_h.saturating_sub(LOOKBACK_WINDOW); + let since_h = since_h.max(1); + let since_height = ICSHeight::new(0, since_h) + .map_err(|e| Error::query(format!("Invalid since height {since_h}: {e}")))?; + + let response = self + .gateway_client + .query_events(since_height) + .await + .map_err(|e| Error::query(format!("Failed to query events: {e}")))?; + + let mut out = Vec::new(); + for block in response.events { + let height = ICSHeight::new(0, block.height) + .map_err(|e| Error::query(format!("Invalid block height {}: {e}", block.height)))?; + + let proto_events: Vec = block + .events + .into_iter() + .flat_map(|tx| tx.events) + .map(|e| super::generated::ibc::cardano::v1::Event { + r#type: e.r#type, + attributes: e + .event_attribute + .into_iter() + .map(|a| super::generated::ibc::cardano::v1::EventAttribute { + key: a.key, + value: a.value, + }) + .collect(), + }) + .collect(); + + out.extend(filter_events(height, proto_events)?); + } + + Ok(out) + } + } + }) + } + } + } + + fn query_packet_events( + &self, + request: QueryPacketEventDataRequest, + ) -> Result, Error> { + use crate::chain::requests::{Qualified, QueryHeight}; + + let max_height: Option = match request.height { + Qualified::SmallerEqual(QueryHeight::Specific(h)) => Some(h.revision_height()), + Qualified::Equal(QueryHeight::Specific(h)) => Some(h.revision_height()), + _ => None, + }; + + let must_equal_height: Option = match request.height { + Qualified::Equal(QueryHeight::Specific(h)) => Some(h.revision_height()), + _ => None, + }; + + self.rt.block_on(async { + let mut out = Vec::new(); + + // If the request targets a single height, avoid block search and inspect that block only. + if let Some(h) = must_equal_height { + let response = self + .gateway_client + .query_block_results(h) + .await + .map_err(|e| Error::query(format!("Failed to query block results: {e}")))?; + + out.extend(filter_packet_events_from_block_results( + &request, + response.block_results, + h, + )?); + return Ok(out); + } + + for seq in &request.sequences { + let search = self + .gateway_client + .query_block_search_all( + request.source_channel_id.to_string(), + request.destination_channel_id.to_string(), + seq.to_string(), + 50, + ) + .await + .map_err(|e| Error::query(format!("Failed to search blocks: {e}")))?; + + let mut heights: Vec = search + .blocks + .into_iter() + .filter_map(|b| b.block.map(|bi| bi.height)) + .filter_map(|h| u64::try_from(h).ok()) + .collect(); + + heights.sort_unstable(); + heights.dedup(); + + if let Some(max_h) = max_height { + heights.retain(|h| *h <= max_h); + } + + for h in heights { + let response = self + .gateway_client + .query_block_results(h) + .await + .map_err(|e| Error::query(format!("Failed to query block results: {e}")))?; + + out.extend(filter_packet_events_from_block_results( + &request, + response.block_results, + h, + )?); + } + } + + Ok(out) + }) + } + + fn query_host_consensus_state( + &self, + _request: QueryHostConsensusStateRequest, + ) -> Result { + Err(Error::query( + "Cardano host consensus state query is not implemented".to_string(), + )) + } + + fn build_client_state( + &self, + height: ICSHeight, + _settings: ClientSettings, + ) -> Result { + tracing::info!( + "Building Mithril client state for Cardano at height {:?}", + height + ); + + let response = self + .rt + .block_on( + self.gateway_client + .query_new_client(height.revision_height()), + ) + .map_err(|e| Error::query(format!("Gateway query_new_client failed: {e}")))?; + + let raw_any = response + .client_state + .ok_or_else(|| Error::query("No client_state in NewClient response".to_string()))?; + + let any = ibc_proto::google::protobuf::Any { + type_url: raw_any.type_url, + value: raw_any.value, + }; + + any.try_into() + .map_err(|e: ibc_relayer_types::core::ics02_client::error::Error| { + Error::query(format!("Failed to decode Mithril client state: {e}")) + }) + } + + fn build_consensus_state( + &self, + light_block: Self::LightBlock, + ) -> Result { + let ibc_state_root = extract_ibc_state_root_from_host_state_tx( + &light_block.header, + &light_block.host_state_nft_policy_id, + &light_block.host_state_nft_token_name, + )?; + + let header = light_block.header; + + Ok(MithrilConsensusState::new( + CommitmentRoot::from_bytes(&ibc_state_root), + header.timestamp.nanoseconds(), + header.mithril_stake_distribution_certificate, + header.transaction_snapshot_certificate.hash, + )) + } + + fn build_header( + &mut self, + _trusted_height: ICSHeight, + target_height: ICSHeight, + _client_state: &AnyClientState, + ) -> Result<(Self::Header, Vec), Error> { + // NOTE: Hermes core logic often requests a client update at `proofs_height + 1`. + // + // On Tendermint chains this is fine because heights are contiguous and the Tendermint + // header builder can return intermediate "support" headers (including the proof height). + // + // For Cardano/Mithril, however, headers only exist at Mithril-certified transaction snapshot + // heights (e.g. every ~15 blocks in our devnet setup). That means a height like `H + 1` + // may not exist at all even if the chain has advanced well beyond it. + // + // If the exact `target_height` is not available, we still want to: + // - install a consensus state at `target_height - 1` (the proof height), so proofs verify, and + // - also advance the client to the latest available snapshot height. + // + // We do this by returning: + // - `support` header at `target_height - 1`, and + // - a final header at the latest snapshot height. + match self + .rt + .block_on(self.gateway_client.query_header(target_height)) + { + Ok(header) => Ok((header, vec![])), + Err(e) => { + let err_str = e.to_string(); + if !err_str.contains("Not found") || !err_str.contains("height") { + return Err(Error::query(format!("Gateway query_header failed: {e}"))); + } + + let proof_height = target_height + .decrement() + .map_err(|_| Error::query(format!("invalid target height {target_height}")))?; + + let proof_header = self + .rt + .block_on(self.gateway_client.query_header(proof_height)) + .map_err(|e| { + Error::query(format!( + "Gateway query_header failed at proof height {proof_height}: {e}" + )) + })?; + + let latest_height = self + .rt + .block_on(self.gateway_client.query_latest_height()) + .map_err(|e| { + Error::query(format!("Gateway query_latest_height failed: {e}")) + })?; + + let latest_header = self + .rt + .block_on(self.gateway_client.query_header(latest_height)) + .map_err(|e| { + Error::query(format!( + "Gateway query_header failed at latest height {latest_height}: {e}" + )) + })?; + + Ok((latest_header, vec![proof_header])) + } + } + } + + fn maybe_register_counterparty_payee( + &mut self, + _channel_id: &ChannelId, + _port_id: &PortId, + _counterparty_payee: &Signer, + ) -> Result<(), Error> { + // ICS-29 fee middleware - not implemented for Cardano yet + tracing::warn!("maybe_register_counterparty_payee: not implemented for Cardano"); + Ok(()) + } + + fn cross_chain_query( + &self, + _requests: Vec, + ) -> Result< + Vec, + Error, + > { + Err(Error::query( + "ICS-31 cross-chain queries are not supported for Cardano".to_string(), + )) + } + + fn query_incentivized_packet( + &self, + _request: ibc_proto::ibc::apps::fee::v1::QueryIncentivizedPacketRequest, + ) -> Result { + Err(Error::query( + "ICS-29 fee middleware is not supported for Cardano".to_string(), + )) + } + + fn query_consumer_chains( + &self, + ) -> Result, Error> { + Err(Error::query( + "ICS-28 CCV (Cross-Chain Validation) is not applicable to Cardano".to_string(), + )) + } + + fn query_upgrade( + &self, + _request: ibc_proto::ibc::core::channel::v1::QueryUpgradeRequest, + _height: ibc_relayer_types::Height, + _include_proof: IncludeProof, + ) -> Result< + ( + ibc_relayer_types::core::ics04_channel::upgrade::Upgrade, + Option, + ), + Error, + > { + Err(Error::query( + "IBC channel upgrades are not implemented for Cardano".to_string(), + )) + } + + fn query_upgrade_error( + &self, + _request: ibc_proto::ibc::core::channel::v1::QueryUpgradeErrorRequest, + _height: ibc_relayer_types::Height, + _include_proof: IncludeProof, + ) -> Result< + ( + ibc_relayer_types::core::ics04_channel::upgrade::ErrorReceipt, + Option, + ), + Error, + > { + Err(Error::query( + "IBC channel upgrades are not implemented for Cardano".to_string(), + )) + } + + fn query_ccv_consumer_id( + &self, + _client_id: ClientId, + ) -> Result { + Err(Error::query( + "ICS-28 CCV (Cross-Chain Validation) is not applicable to Cardano".to_string(), + )) + } +} + +fn filter_packet_events_from_block_results( + request: &QueryPacketEventDataRequest, + block_results: Option, + fallback_height: u64, +) -> Result, Error> { + use ibc_relayer_types::events::{IbcEvent as RelayerIbcEvent, WithBlockDataType}; + + let block_results = match block_results { + Some(br) => br, + None => return Ok(vec![]), + }; + + let height = match block_results.height { + Some(h) => ICSHeight::new(h.revision_number, h.revision_height) + .map_err(|e| Error::query(format!("Invalid height in block results: {e}")))?, + None => ICSHeight::new(0, fallback_height) + .map_err(|e| Error::query(format!("Invalid fallback height {fallback_height}: {e}")))?, + }; + + let proto_events: Vec = block_results + .txs_results + .into_iter() + .flat_map(|tx| tx.events) + .map(|e| super::generated::ibc::cardano::v1::Event { + r#type: e.r#type, + attributes: e + .event_attribute + .into_iter() + .map(|a| super::generated::ibc::cardano::v1::EventAttribute { + key: a.key, + value: a.value, + }) + .collect(), + }) + .collect(); + + let parsed_events = super::event_parser::parse_events(proto_events, height) + .map_err(|e| Error::query(format!("Failed to parse block events: {e}")))?; + + let filtered: Vec = parsed_events + .into_iter() + .filter(|ev| match (&request.event_id, ev) { + (WithBlockDataType::SendPacket, RelayerIbcEvent::SendPacket(e)) => { + request.sequences.contains(&e.packet.sequence) + && e.src_port_id() == &request.source_port_id + && e.src_channel_id() == &request.source_channel_id + && e.dst_port_id() == &request.destination_port_id + && e.dst_channel_id() == &request.destination_channel_id + } + (WithBlockDataType::WriteAck, RelayerIbcEvent::WriteAcknowledgement(e)) => { + request.sequences.contains(&e.packet.sequence) + && e.src_port_id() == &request.source_port_id + && e.src_channel_id() == &request.source_channel_id + && e.dst_port_id() == &request.destination_port_id + && e.dst_channel_id() == &request.destination_channel_id + } + _ => false, + }) + .map(|ev| IbcEventWithHeight::new(ev, height)) + .collect(); + + Ok(filtered) +} + +// Mithril header is decoded from the Gateway as `google.protobuf.Any`. +// See `ibc-relayer-types/src/clients/ics08_cardano/header.rs` and +// `ibc-relayer-types/src/core/ics02_client/header.rs`. + +fn extract_ibc_state_root_from_host_state_tx( + header: &MithrilHeader, + host_state_nft_policy_id: &[u8], + host_state_nft_token_name: &[u8], +) -> Result, Error> { + let tx_hash = header.host_state_tx_hash.trim(); + if tx_hash.is_empty() { + return Err(Error::query( + "missing host_state_tx_hash in Mithril header".to_string(), + )); + } + + if host_state_nft_policy_id.len() != 28 { + return Err(Error::query(format!( + "invalid host_state_nft_policy_id length: expected 28 bytes, got {}", + host_state_nft_policy_id.len() + ))); + } + + let tx_body_cbor = header.host_state_tx_body_cbor.as_slice(); + if tx_body_cbor.is_empty() { + return Err(Error::query( + "missing host_state_tx_body_cbor in Mithril header".to_string(), + )); + } + + let computed = blake2b_256(tx_body_cbor); + let computed_hex = hex::encode(computed); + if !computed_hex.eq_ignore_ascii_case(tx_hash) { + return Err(Error::query(format!( + "HostState tx body hash mismatch: expected {tx_hash}, got {computed_hex}" + ))); + } + + use pallas_codec::minicbor; + use pallas_codec::utils::KeepRaw; + use pallas_primitives::{babbage, conway}; + + let conway_body: Result>, _> = + minicbor::decode(tx_body_cbor); + if let Ok(body) = conway_body { + return extract_root_from_conway_tx_body( + &body, + header.host_state_tx_output_index, + host_state_nft_policy_id, + host_state_nft_token_name, + ); + } + + let babbage_body: Result>, _> = + minicbor::decode(tx_body_cbor); + if let Ok(body) = babbage_body { + return extract_root_from_babbage_tx_body( + &body, + header.host_state_tx_output_index, + host_state_nft_policy_id, + host_state_nft_token_name, + ); + } + + Err(Error::query( + "unsupported HostState transaction body CBOR".to_string(), + )) +} + +fn blake2b_256(data: &[u8]) -> [u8; 32] { + use blake2::digest::consts::U32; + use blake2::{Blake2b, Digest}; + + let mut hasher = Blake2b::::new(); + hasher.update(data); + let digest = hasher.finalize(); + + let mut out = [0u8; 32]; + out.copy_from_slice(&digest); + out +} + +fn extract_root_from_conway_tx_body<'a>( + body: &pallas_codec::utils::KeepRaw<'a, pallas_primitives::conway::MintedTransactionBody<'a>>, + output_index: u32, + host_state_nft_policy_id: &[u8], + host_state_nft_token_name: &[u8], +) -> Result, Error> { + use pallas_primitives::conway::{MintedTransactionOutput, PseudoTransactionOutput}; + + let idx: usize = output_index + .try_into() + .map_err(|_| Error::query("host_state_tx_output_index out of range".to_string()))?; + + let output: &MintedTransactionOutput<'a> = body + .outputs + .get(idx) + .ok_or_else(|| Error::query("host_state_tx_output_index out of range".to_string()))?; + + let out = match output { + PseudoTransactionOutput::PostAlonzo(out) => out, + _ => { + return Err(Error::query( + "HostState output is not a post-Alonzo output".to_string(), + )) + } + }; + + ensure_value_contains_host_state_nft_conway( + &out.value, + host_state_nft_policy_id, + host_state_nft_token_name, + )?; + + let datum_option = out.datum_option.as_ref().ok_or_else(|| { + Error::query("HostState output has no datum option (expected inline datum)".to_string()) + })?; + + let plutus_data = match datum_option { + pallas_primitives::babbage::PseudoDatumOption::Data(cbor_wrap) => { + std::ops::Deref::deref(std::ops::Deref::deref(cbor_wrap)) + } + _ => { + return Err(Error::query( + "HostState output does not contain an inline datum".to_string(), + )) + } + }; + + extract_ibc_state_root_from_host_state_datum(plutus_data, host_state_nft_policy_id) +} + +fn extract_root_from_babbage_tx_body<'a>( + body: &pallas_codec::utils::KeepRaw<'a, pallas_primitives::babbage::MintedTransactionBody<'a>>, + output_index: u32, + host_state_nft_policy_id: &[u8], + host_state_nft_token_name: &[u8], +) -> Result, Error> { + use pallas_primitives::babbage::{MintedTransactionOutput, PseudoTransactionOutput}; + + let idx: usize = output_index + .try_into() + .map_err(|_| Error::query("host_state_tx_output_index out of range".to_string()))?; + + let output: &MintedTransactionOutput<'a> = body + .outputs + .get(idx) + .ok_or_else(|| Error::query("host_state_tx_output_index out of range".to_string()))?; + + let out = match output { + PseudoTransactionOutput::PostAlonzo(out) => out, + _ => { + return Err(Error::query( + "HostState output is not a post-Alonzo output".to_string(), + )) + } + }; + + ensure_value_contains_host_state_nft_alonzo( + &out.value, + host_state_nft_policy_id, + host_state_nft_token_name, + )?; + + let datum_option = out.datum_option.as_ref().ok_or_else(|| { + Error::query("HostState output has no datum option (expected inline datum)".to_string()) + })?; + + let plutus_data = match datum_option { + pallas_primitives::babbage::PseudoDatumOption::Data(cbor_wrap) => { + std::ops::Deref::deref(std::ops::Deref::deref(cbor_wrap)) + } + _ => { + return Err(Error::query( + "HostState output does not contain an inline datum".to_string(), + )) + } + }; + + extract_ibc_state_root_from_host_state_datum(plutus_data, host_state_nft_policy_id) +} + +fn ensure_value_contains_host_state_nft_conway( + value: &pallas_primitives::conway::Value, + host_state_nft_policy_id: &[u8], + host_state_nft_token_name: &[u8], +) -> Result<(), Error> { + match value { + pallas_primitives::conway::Value::Multiasset(_, multiasset) => { + for (policy, assets) in multiasset.iter() { + if policy.as_ref() != host_state_nft_policy_id { + continue; + } + + for (asset, amount) in assets.iter() { + if asset.as_slice() == host_state_nft_token_name { + let amount_u64: u64 = amount.into(); + if amount_u64 == 1 { + return Ok(()); + } + } + } + } + + Err(Error::query( + "HostState output does not contain the expected HostState NFT".to_string(), + )) + } + _ => Err(Error::query( + "HostState output has no multi-assets (expected HostState NFT)".to_string(), + )), + } +} + +fn ensure_value_contains_host_state_nft_alonzo( + value: &pallas_primitives::alonzo::Value, + host_state_nft_policy_id: &[u8], + host_state_nft_token_name: &[u8], +) -> Result<(), Error> { + match value { + pallas_primitives::alonzo::Value::Multiasset(_, multiasset) => { + for (policy, assets) in multiasset.iter() { + if policy.as_ref() != host_state_nft_policy_id { + continue; + } + + for (asset, amount) in assets.iter() { + if asset.as_slice() == host_state_nft_token_name && *amount == 1 { + return Ok(()); + } + } + } + + Err(Error::query( + "HostState output does not contain the expected HostState NFT".to_string(), + )) + } + _ => Err(Error::query( + "HostState output has no multi-assets (expected HostState NFT)".to_string(), + )), + } +} + +fn extract_ibc_state_root_from_host_state_datum( + datum: &pallas_primitives::alonzo::PlutusData, + expected_nft_policy_id: &[u8], +) -> Result, Error> { + use pallas_primitives::alonzo::PlutusData; + + let outer = match datum { + PlutusData::Constr(c) => c, + _ => { + return Err(Error::query( + "HostState datum is not a constructor PlutusData".to_string(), + )) + } + }; + + if plutus_constructor_index(outer) != Some(0) || outer.fields.len() < 2 { + return Err(Error::query( + "HostState datum does not match expected constructor shape".to_string(), + )); + } + + let state = &outer.fields[0]; + let nft_policy = &outer.fields[1]; + + if !expected_nft_policy_id.is_empty() { + let nft_policy_bytes: &[u8] = match nft_policy { + PlutusData::BoundedBytes(bytes) => bytes.as_slice(), + _ => { + return Err(Error::query( + "HostState datum nft_policy is not a byte string".to_string(), + )) + } + }; + + if nft_policy_bytes != expected_nft_policy_id { + return Err(Error::query( + "unexpected HostState nft_policy in datum".to_string(), + )); + } + } + + let state = match state { + PlutusData::Constr(c) => c, + _ => { + return Err(Error::query( + "HostState state is not a constructor".to_string(), + )) + } + }; + + if plutus_constructor_index(state) != Some(0) || state.fields.len() < 2 { + return Err(Error::query( + "HostState state does not match expected constructor shape".to_string(), + )); + } + + let root: &[u8] = match &state.fields[1] { + PlutusData::BoundedBytes(bytes) => bytes.as_slice(), + _ => { + return Err(Error::query( + "ibc_state_root is not a byte string".to_string(), + )) + } + }; + + if root.len() != 32 { + return Err(Error::query(format!( + "invalid ibc_state_root length: expected 32 bytes, got {}", + root.len() + ))); + } + + Ok(root.to_vec()) +} + +fn plutus_constructor_index( + constr: &pallas_primitives::alonzo::Constr, +) -> Option { + match constr.tag { + 102 => constr.any_constructor, + 121..=127 => Some(constr.tag - 121), + 1280..=1400 => Some(constr.tag - 1280 + 7), + _ => None, + } +} diff --git a/crates/relayer/src/chain/cardano/error.rs b/crates/relayer/src/chain/cardano/error.rs new file mode 100644 index 0000000000..3d1239c28f --- /dev/null +++ b/crates/relayer/src/chain/cardano/error.rs @@ -0,0 +1,61 @@ +//! Error types for Cardano chain implementation + +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Error { + #[error("Gateway client error: {0}")] + GatewayClient(String), + + #[error("Configuration error: {0}")] + Config(String), + + #[error("Keyring error: {0}")] + Keyring(String), + + #[error("Signer error: {0}")] + Signer(String), + + #[error("CBOR decode error: {0}")] + CborDecode(String), + + #[error("Transaction error: {0}")] + Transaction(String), + + #[error("Query error: {0}")] + Query(String), + + #[error("IBC error: {0}")] + Ibc(String), + + #[error("Event attribute error: {0}")] + EventAttribute(String), + + #[error("Generic error: {0}")] + Generic(String), +} + +// Conversion from other error types +impl From for Error { + fn from(err: tonic::Status) -> Self { + Error::GatewayClient(err.message().to_string()) + } +} + +impl From for Error { + fn from(err: tonic::transport::Error) -> Self { + Error::GatewayClient(err.to_string()) + } +} + +impl From for Error { + fn from(err: std::io::Error) -> Self { + Error::Generic(err.to_string()) + } +} + +impl From for Error { + fn from(err: serde_json::Error) -> Self { + Error::Generic(err.to_string()) + } +} diff --git a/crates/relayer/src/chain/cardano/event_parser.rs b/crates/relayer/src/chain/cardano/event_parser.rs new file mode 100644 index 0000000000..952aa77067 --- /dev/null +++ b/crates/relayer/src/chain/cardano/event_parser.rs @@ -0,0 +1,679 @@ +//! Event parsing for Cardano Gateway events -> Hermes `IbcEvent` conversion. +//! +//! The Gateway returns events in the format: +//! `Event { type: "create_client", attributes: [{ key: "client_id", value: "08-cardano-0" }, ...] }` +//! +//! This module converts them into Hermes' `IbcEvent` enum variants. + +use ibc_relayer_types::{ + core::{ + ics02_client::{ + events as ClientEvents, + height::{Height, HeightErrorDetail}, + }, + ics03_connection::events as ConnectionEvents, + ics04_channel::{events as ChannelEvents, packet::Packet, timeout::TimeoutHeight}, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + timestamp::Timestamp, +}; +use std::collections::HashMap; +use std::str::FromStr; + +use super::error::Error; +use super::generated::ibc::cardano::v1::{Event, EventAttribute}; + +/// Parse a list of Gateway events into Hermes IbcEvent types +pub fn parse_events(gateway_events: Vec, _height: Height) -> Result, Error> { + let mut ibc_events = Vec::new(); + + for event in gateway_events { + tracing::debug!("Parsing event type: {}", event.r#type); + + // Convert attributes to a HashMap for easier lookup + let attrs = attributes_to_map(event.attributes); + + // Parse event based on type + let ibc_event = match event.r#type.as_str() { + // Client events + "create_client" => parse_create_client_event(attrs)?, + "update_client" => parse_update_client_event(attrs)?, + "upgrade_client" => parse_upgrade_client_event(attrs)?, + "client_misbehaviour" => parse_client_misbehaviour_event(attrs)?, + + // Connection events + "connection_open_init" => parse_connection_open_init_event(attrs)?, + "connection_open_try" => parse_connection_open_try_event(attrs)?, + "connection_open_ack" => parse_connection_open_ack_event(attrs)?, + "connection_open_confirm" => parse_connection_open_confirm_event(attrs)?, + + // Channel events + "channel_open_init" => parse_channel_open_init_event(attrs)?, + "channel_open_try" => parse_channel_open_try_event(attrs)?, + "channel_open_ack" => parse_channel_open_ack_event(attrs)?, + "channel_open_confirm" => parse_channel_open_confirm_event(attrs)?, + "channel_close_init" => parse_channel_close_init_event(attrs)?, + "channel_close_confirm" => parse_channel_close_confirm_event(attrs)?, + + // Packet events + "send_packet" => parse_send_packet_event(attrs)?, + "recv_packet" => parse_recv_packet_event(attrs)?, + "write_acknowledgement" => parse_write_acknowledgement_event(attrs)?, + "acknowledge_packet" => parse_acknowledge_packet_event(attrs)?, + "timeout_packet" => parse_timeout_packet_event(attrs)?, + "timeout_on_close_packet" => parse_timeout_on_close_packet_event(attrs)?, + + // Unknown event type - log warning and skip + _ => { + tracing::warn!("Unknown event type: {}", event.r#type); + continue; + } + }; + + ibc_events.push(ibc_event); + } + + Ok(ibc_events) +} + +/// Convert event attributes to a HashMap for easier lookup +fn attributes_to_map(attributes: Vec) -> HashMap { + attributes + .into_iter() + .map(|attr| (attr.key, attr.value)) + .collect() +} + +// +// Client event parsers +// + +fn parse_create_client_event(attrs: HashMap) -> Result { + let client_id = parse_client_id(&attrs, "client_id")?; + let client_type = parse_client_type(&attrs, "client_type")?; + let consensus_height = parse_height(&attrs, "consensus_height")?; + + let attributes = ClientEvents::Attributes { + client_id, + client_type, + consensus_height, + }; + + Ok(IbcEvent::CreateClient(ClientEvents::CreateClient( + attributes, + ))) +} + +fn parse_update_client_event(attrs: HashMap) -> Result { + let client_id = parse_client_id(&attrs, "client_id")?; + let client_type = parse_client_type(&attrs, "client_type")?; + let consensus_height = parse_height(&attrs, "consensus_height")?; + + let common = ClientEvents::Attributes { + client_id, + client_type, + consensus_height, + }; + + Ok(IbcEvent::UpdateClient(ClientEvents::UpdateClient { + common, + header: None, // Header is not included in Gateway events + })) +} + +fn parse_upgrade_client_event(attrs: HashMap) -> Result { + let client_id = parse_client_id(&attrs, "client_id")?; + let client_type = parse_client_type(&attrs, "client_type")?; + let consensus_height = parse_height(&attrs, "consensus_height")?; + + let attributes = ClientEvents::Attributes { + client_id, + client_type, + consensus_height, + }; + + Ok(IbcEvent::UpgradeClient(ClientEvents::UpgradeClient( + attributes, + ))) +} + +fn parse_client_misbehaviour_event(attrs: HashMap) -> Result { + let client_id = parse_client_id(&attrs, "client_id")?; + let client_type = parse_client_type(&attrs, "client_type")?; + let consensus_height = parse_height(&attrs, "consensus_height")?; + + let attributes = ClientEvents::Attributes { + client_id, + client_type, + consensus_height, + }; + + Ok(IbcEvent::ClientMisbehaviour( + ClientEvents::ClientMisbehaviour(attributes), + )) +} + +// +// Connection event parsers +// + +fn parse_connection_open_init_event(attrs: HashMap) -> Result { + let connection_id = parse_optional_connection_id(&attrs, "connection_id"); + let client_id = parse_client_id(&attrs, "client_id")?; + let counterparty_connection_id = + parse_optional_connection_id(&attrs, "counterparty_connection_id"); + let counterparty_client_id = parse_client_id(&attrs, "counterparty_client_id")?; + + let attributes = ConnectionEvents::Attributes { + connection_id, + client_id, + counterparty_connection_id, + counterparty_client_id, + }; + + Ok(IbcEvent::OpenInitConnection(ConnectionEvents::OpenInit( + attributes, + ))) +} + +fn parse_connection_open_try_event(attrs: HashMap) -> Result { + let connection_id = parse_optional_connection_id(&attrs, "connection_id"); + let client_id = parse_client_id(&attrs, "client_id")?; + let counterparty_connection_id = + parse_optional_connection_id(&attrs, "counterparty_connection_id"); + let counterparty_client_id = parse_client_id(&attrs, "counterparty_client_id")?; + + let attributes = ConnectionEvents::Attributes { + connection_id, + client_id, + counterparty_connection_id, + counterparty_client_id, + }; + + Ok(IbcEvent::OpenTryConnection(ConnectionEvents::OpenTry( + attributes, + ))) +} + +fn parse_connection_open_ack_event(attrs: HashMap) -> Result { + let connection_id = parse_optional_connection_id(&attrs, "connection_id"); + let client_id = parse_client_id(&attrs, "client_id")?; + let counterparty_connection_id = + parse_optional_connection_id(&attrs, "counterparty_connection_id"); + let counterparty_client_id = parse_client_id(&attrs, "counterparty_client_id")?; + + let attributes = ConnectionEvents::Attributes { + connection_id, + client_id, + counterparty_connection_id, + counterparty_client_id, + }; + + Ok(IbcEvent::OpenAckConnection(ConnectionEvents::OpenAck( + attributes, + ))) +} + +fn parse_connection_open_confirm_event(attrs: HashMap) -> Result { + let connection_id = parse_optional_connection_id(&attrs, "connection_id"); + let client_id = parse_client_id(&attrs, "client_id")?; + let counterparty_connection_id = + parse_optional_connection_id(&attrs, "counterparty_connection_id"); + let counterparty_client_id = parse_client_id(&attrs, "counterparty_client_id")?; + + let attributes = ConnectionEvents::Attributes { + connection_id, + client_id, + counterparty_connection_id, + counterparty_client_id, + }; + + Ok(IbcEvent::OpenConfirmConnection( + ConnectionEvents::OpenConfirm(attributes), + )) +} + +// +// Channel event parsers +// + +fn parse_channel_open_init_event(attrs: HashMap) -> Result { + let port_id = parse_port_id(&attrs, "port_id")?; + let channel_id = parse_optional_channel_id(&attrs, "channel_id"); + let connection_id = parse_connection_id(&attrs, "connection_id")?; + let counterparty_port_id = parse_port_id(&attrs, "counterparty_port_id")?; + let counterparty_channel_id = parse_optional_channel_id(&attrs, "counterparty_channel_id"); + + Ok(IbcEvent::OpenInitChannel(ChannelEvents::OpenInit { + port_id, + channel_id, + connection_id, + counterparty_port_id, + counterparty_channel_id, + })) +} + +fn parse_channel_open_try_event(attrs: HashMap) -> Result { + let port_id = parse_port_id(&attrs, "port_id")?; + let channel_id = parse_optional_channel_id(&attrs, "channel_id"); + let connection_id = parse_connection_id(&attrs, "connection_id")?; + let counterparty_port_id = parse_port_id(&attrs, "counterparty_port_id")?; + let counterparty_channel_id = parse_optional_channel_id(&attrs, "counterparty_channel_id"); + + Ok(IbcEvent::OpenTryChannel(ChannelEvents::OpenTry { + port_id, + channel_id, + connection_id, + counterparty_port_id, + counterparty_channel_id, + })) +} + +fn parse_channel_open_ack_event(attrs: HashMap) -> Result { + let port_id = parse_port_id(&attrs, "port_id")?; + let channel_id = parse_optional_channel_id(&attrs, "channel_id"); + let connection_id = parse_connection_id(&attrs, "connection_id")?; + let counterparty_port_id = parse_port_id(&attrs, "counterparty_port_id")?; + let counterparty_channel_id = parse_optional_channel_id(&attrs, "counterparty_channel_id"); + + Ok(IbcEvent::OpenAckChannel(ChannelEvents::OpenAck { + port_id, + channel_id, + connection_id, + counterparty_port_id, + counterparty_channel_id, + })) +} + +fn parse_channel_open_confirm_event(attrs: HashMap) -> Result { + let port_id = parse_port_id(&attrs, "port_id")?; + let channel_id = parse_optional_channel_id(&attrs, "channel_id"); + let connection_id = parse_connection_id(&attrs, "connection_id")?; + let counterparty_port_id = parse_port_id(&attrs, "counterparty_port_id")?; + let counterparty_channel_id = parse_optional_channel_id(&attrs, "counterparty_channel_id"); + + Ok(IbcEvent::OpenConfirmChannel(ChannelEvents::OpenConfirm { + port_id, + channel_id, + connection_id, + counterparty_port_id, + counterparty_channel_id, + })) +} + +fn parse_channel_close_init_event(attrs: HashMap) -> Result { + let port_id = parse_port_id(&attrs, "port_id")?; + let channel_id = parse_channel_id(&attrs, "channel_id")?; + let connection_id = parse_connection_id(&attrs, "connection_id")?; + let counterparty_port_id = parse_port_id(&attrs, "counterparty_port_id")?; + let counterparty_channel_id = parse_optional_channel_id(&attrs, "counterparty_channel_id"); + + Ok(IbcEvent::CloseInitChannel(ChannelEvents::CloseInit { + port_id, + channel_id, + connection_id, + counterparty_port_id, + counterparty_channel_id, + })) +} + +fn parse_channel_close_confirm_event(attrs: HashMap) -> Result { + let port_id = parse_port_id(&attrs, "port_id")?; + let channel_id = parse_optional_channel_id(&attrs, "channel_id"); + let connection_id = parse_connection_id(&attrs, "connection_id")?; + let counterparty_port_id = parse_port_id(&attrs, "counterparty_port_id")?; + let counterparty_channel_id = parse_optional_channel_id(&attrs, "counterparty_channel_id"); + + Ok(IbcEvent::CloseConfirmChannel(ChannelEvents::CloseConfirm { + channel_id, + port_id, + connection_id, + counterparty_port_id, + counterparty_channel_id, + })) +} + +// +// Packet event parsers +// + +fn parse_send_packet_event(attrs: HashMap) -> Result { + let packet = parse_packet(&attrs)?; + Ok(IbcEvent::SendPacket(ChannelEvents::SendPacket { packet })) +} + +fn parse_recv_packet_event(attrs: HashMap) -> Result { + let packet = parse_packet(&attrs)?; + Ok(IbcEvent::ReceivePacket(ChannelEvents::ReceivePacket { + packet, + })) +} + +fn parse_write_acknowledgement_event(attrs: HashMap) -> Result { + let packet = parse_packet(&attrs)?; + let ack = parse_bytes(&attrs, "packet_ack")?; + Ok(IbcEvent::WriteAcknowledgement( + ChannelEvents::WriteAcknowledgement { packet, ack }, + )) +} + +fn parse_acknowledge_packet_event(attrs: HashMap) -> Result { + let packet = parse_packet(&attrs)?; + Ok(IbcEvent::AcknowledgePacket( + ChannelEvents::AcknowledgePacket { packet }, + )) +} + +fn parse_timeout_packet_event(attrs: HashMap) -> Result { + let packet = parse_packet(&attrs)?; + Ok(IbcEvent::TimeoutPacket(ChannelEvents::TimeoutPacket { + packet, + })) +} + +fn parse_timeout_on_close_packet_event(attrs: HashMap) -> Result { + let packet = parse_packet(&attrs)?; + Ok(IbcEvent::TimeoutOnClosePacket( + ChannelEvents::TimeoutOnClosePacket { packet }, + )) +} + +// +// Helper functions for parsing attribute values +// + +fn parse_client_id(attrs: &HashMap, key: &str) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + ClientId::from_str(value) + .map_err(|e| Error::EventAttribute(format!("Invalid client_id '{}': {}", value, e))) +} + +fn parse_client_type( + attrs: &HashMap, + key: &str, +) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + match value.as_str() { + "cardano" | "08-cardano" => { + Ok(ibc_relayer_types::core::ics02_client::client_type::ClientType::Cardano) + } + "tendermint" | "07-tendermint" => { + Ok(ibc_relayer_types::core::ics02_client::client_type::ClientType::Tendermint) + } + _ => Err(Error::EventAttribute(format!( + "Unknown client type: {}", + value + ))), + } +} + +fn parse_height(attrs: &HashMap, key: &str) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + // Height format: "revision_number-revision_height" (e.g., "0-100") + let parts: Vec<&str> = value.split('-').collect(); + if parts.len() != 2 { + return Err(Error::EventAttribute(format!( + "Invalid height format '{}', expected 'revision-height'", + value + ))); + } + + let revision_number = parts[0].parse::().map_err(|e| { + Error::EventAttribute(format!("Invalid revision number '{}': {}", parts[0], e)) + })?; + let revision_height = parts[1].parse::().map_err(|e| { + Error::EventAttribute(format!("Invalid revision height '{}': {}", parts[1], e)) + })?; + + Height::new(revision_number, revision_height) + .map_err(|e| Error::EventAttribute(format!("Invalid height: {}", e))) +} + +fn parse_timeout_height( + attrs: &HashMap, + key: &str, +) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + match Height::from_str(value) { + Ok(height) => Ok(TimeoutHeight::from(height)), + Err(e) => { + let error_message = e.to_string(); + match e.into_detail() { + HeightErrorDetail::ZeroHeight(_) => Ok(TimeoutHeight::no_timeout()), + _ => Err(Error::EventAttribute(format!( + "Invalid height: {}", + error_message + ))), + } + } + } +} + +fn parse_connection_id(attrs: &HashMap, key: &str) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + ConnectionId::from_str(value) + .map_err(|e| Error::EventAttribute(format!("Invalid connection_id '{}': {}", value, e))) +} + +fn parse_optional_connection_id( + attrs: &HashMap, + key: &str, +) -> Option { + attrs.get(key).and_then(|v| ConnectionId::from_str(v).ok()) +} + +fn parse_port_id(attrs: &HashMap, key: &str) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + PortId::from_str(value) + .map_err(|e| Error::EventAttribute(format!("Invalid port_id '{}': {}", value, e))) +} + +fn parse_channel_id(attrs: &HashMap, key: &str) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + ChannelId::from_str(value) + .map_err(|e| Error::EventAttribute(format!("Invalid channel_id '{}': {}", value, e))) +} + +fn parse_optional_channel_id(attrs: &HashMap, key: &str) -> Option { + attrs.get(key).and_then(|v| ChannelId::from_str(v).ok()) +} + +fn parse_u64(attrs: &HashMap, key: &str) -> Result { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + value + .parse::() + .map_err(|e| Error::EventAttribute(format!("Invalid u64 '{}': {}", value, e))) +} + +fn parse_bytes(attrs: &HashMap, key: &str) -> Result, Error> { + let value = attrs + .get(key) + .ok_or_else(|| Error::EventAttribute(format!("Missing attribute: {}", key)))?; + + let value_trimmed = value.strip_prefix("0x").unwrap_or(value); + match hex::decode(value_trimmed) { + Ok(bytes) => Ok(bytes), + Err(_) => Ok(value.as_bytes().to_vec()), + } +} + +fn parse_packet(attrs: &HashMap) -> Result { + let sequence = parse_u64(attrs, "packet_sequence")?; + let source_port = parse_port_id(attrs, "packet_src_port")?; + let source_channel = parse_channel_id(attrs, "packet_src_channel")?; + let destination_port = parse_port_id(attrs, "packet_dst_port")?; + let destination_channel = parse_channel_id(attrs, "packet_dst_channel")?; + let data = parse_bytes(attrs, "packet_data")?; + let timeout_height = parse_timeout_height(attrs, "packet_timeout_height")?; + let timeout_timestamp_nanos = parse_u64(attrs, "packet_timeout_timestamp")?; + let timeout_timestamp = Timestamp::from_nanoseconds(timeout_timestamp_nanos) + .map_err(|e| Error::EventAttribute(format!("Invalid timestamp: {}", e)))?; + + Ok(Packet { + sequence: sequence.into(), + source_port, + source_channel, + destination_port, + destination_channel, + data, + timeout_height, + timeout_timestamp, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use ibc_relayer_types::core::ics02_client::height::Height; + use ibc_relayer_types::core::ics04_channel::timeout::TimeoutHeight; + use ibc_relayer_types::events::IbcEvent as RelayerIbcEvent; + + fn attrs(kvs: &[(&str, &str)]) -> Vec { + kvs.iter() + .map(|(k, v)| EventAttribute { + key: (*k).to_string(), + value: (*v).to_string(), + }) + .collect() + } + + #[test] + fn parse_timeout_on_close_packet_event_ok() { + let gateway_event = Event { + r#type: "timeout_on_close_packet".to_string(), + attributes: attrs(&[ + ("packet_sequence", "7"), + ("packet_src_port", "transfer"), + ("packet_src_channel", "channel-0"), + ("packet_dst_port", "transfer"), + ("packet_dst_channel", "channel-1"), + ("packet_data", "deadbeef"), + ("packet_timeout_height", "0-10"), + ("packet_timeout_timestamp", "1000"), + ]), + }; + + let height = Height::new(0, 1).unwrap(); + let events = parse_events(vec![gateway_event], height).unwrap(); + + assert_eq!(events.len(), 1); + match &events[0] { + RelayerIbcEvent::TimeoutOnClosePacket(ev) => { + assert_eq!(ev.packet.sequence, 7.into()); + assert_eq!(ev.packet.source_port.as_str(), "transfer"); + assert_eq!(ev.packet.source_channel.as_str(), "channel-0"); + assert_eq!(ev.packet.destination_port.as_str(), "transfer"); + assert_eq!(ev.packet.destination_channel.as_str(), "channel-1"); + assert_eq!(ev.packet.data, hex::decode("deadbeef").unwrap()); + } + other => panic!("unexpected event: {other:?}"), + } + } + + #[test] + fn parse_timeout_on_close_packet_event_missing_attr_fails() { + let gateway_event = Event { + r#type: "timeout_on_close_packet".to_string(), + attributes: attrs(&[ + ("packet_sequence", "7"), + ("packet_src_port", "transfer"), + ("packet_src_channel", "channel-0"), + ("packet_dst_port", "transfer"), + ("packet_dst_channel", "channel-1"), + // packet_data missing + ("packet_timeout_height", "0-10"), + ("packet_timeout_timestamp", "1000"), + ]), + }; + + let height = Height::new(0, 1).unwrap(); + let err = parse_events(vec![gateway_event], height).unwrap_err(); + + match err { + Error::EventAttribute(msg) => assert!(msg.contains("Missing attribute: packet_data")), + other => panic!("unexpected error: {other:?}"), + } + } + + #[test] + fn parse_timeout_on_close_packet_event_raw_json_data_ok() { + let payload = r#"{"amount":"1000000","denom":"stake","receiver":"abc","sender":"def"}"#; + let gateway_event = Event { + r#type: "timeout_on_close_packet".to_string(), + attributes: attrs(&[ + ("packet_sequence", "7"), + ("packet_src_port", "transfer"), + ("packet_src_channel", "channel-0"), + ("packet_dst_port", "transfer"), + ("packet_dst_channel", "channel-1"), + ("packet_data", payload), + ("packet_timeout_height", "0-10"), + ("packet_timeout_timestamp", "1000"), + ]), + }; + + let height = Height::new(0, 1).unwrap(); + let events = parse_events(vec![gateway_event], height).unwrap(); + + assert_eq!(events.len(), 1); + match &events[0] { + RelayerIbcEvent::TimeoutOnClosePacket(ev) => { + assert_eq!(ev.packet.data, payload.as_bytes().to_vec()); + } + other => panic!("unexpected event: {other:?}"), + } + } + + #[test] + fn parse_send_packet_event_zero_timeout_height_maps_to_no_timeout() { + let gateway_event = Event { + r#type: "send_packet".to_string(), + attributes: attrs(&[ + ("packet_sequence", "9"), + ("packet_src_port", "transfer"), + ("packet_src_channel", "channel-0"), + ("packet_dst_port", "transfer"), + ("packet_dst_channel", "channel-1"), + ("packet_data", "deadbeef"), + ("packet_timeout_height", "0-0"), + ("packet_timeout_timestamp", "1000"), + ]), + }; + + let height = Height::new(0, 1).unwrap(); + let events = parse_events(vec![gateway_event], height).unwrap(); + + assert_eq!(events.len(), 1); + match &events[0] { + RelayerIbcEvent::SendPacket(ev) => { + assert_eq!(ev.packet.timeout_height, TimeoutHeight::no_timeout()); + } + other => panic!("unexpected event: {other:?}"), + } + } +} diff --git a/crates/relayer/src/chain/cardano/event_source.rs b/crates/relayer/src/chain/cardano/event_source.rs new file mode 100644 index 0000000000..4eda92f065 --- /dev/null +++ b/crates/relayer/src/chain/cardano/event_source.rs @@ -0,0 +1,296 @@ +//! Event source for Cardano chain +//! +//! Polls the Gateway for IBC events and broadcasts them to subscribers. + +use std::sync::Arc; + +use crossbeam_channel as channel; +use tokio::{ + runtime::Runtime as TokioRuntime, + time::{sleep, Duration, Instant}, +}; +use tracing::{debug, error, error_span, trace}; + +use ibc_relayer_types::core::{ics02_client::height::Height, ics24_host::identifier::ChainId}; + +use crate::{ + chain::tracking::TrackingId, + event::{bus::EventBus, source::Error, IbcEventWithHeight}, + telemetry, +}; + +use super::{event_parser, gateway_client::GatewayClient}; + +use crate::event::source::{EventBatch, EventSourceCmd, TxEventSourceCmd}; + +pub type Result = core::result::Result; + +#[derive(Debug, Copy, Clone)] +enum Next { + Continue, + Abort, +} + +/// An event source that polls the Cardano Gateway for IBC events +pub struct CardanoEventSource { + /// Chain identifier + chain_id: ChainId, + + /// Gateway client for querying events + gateway_client: GatewayClient, + + /// Poll interval + poll_interval: Duration, + + /// Event bus for broadcasting events + event_bus: EventBus>>, + + /// Channel where to receive commands + rx_cmd: channel::Receiver, + + /// Tokio runtime + rt: Arc, + + /// Last fetched block height + last_fetched_height: Height, +} + +impl CardanoEventSource { + pub fn new( + chain_id: ChainId, + gateway_client: GatewayClient, + poll_interval: Duration, + rt: Arc, + ) -> Result<(Self, TxEventSourceCmd)> { + let event_bus = EventBus::new(); + let (tx_cmd, rx_cmd) = channel::unbounded(); + + let source = Self { + rt, + chain_id, + gateway_client, + poll_interval, + event_bus, + rx_cmd, + // Start at a valid (non-zero) height; `run()` will immediately reset this + // to the latest height if the gateway is reachable. + last_fetched_height: Height::new(0, 1).map_err(|e| { + Error::collect_events_failed(format!("Failed to create initial height: {}", e)) + })?, + }; + + Ok((source, TxEventSourceCmd::new(tx_cmd))) + } + + pub fn run(mut self) { + let _span = error_span!("event_source.cardano", chain.id = %self.chain_id).entered(); + + debug!("starting Cardano event source"); + + let rt = self.rt.clone(); + + rt.block_on(async { + // Initialize the latest fetched height + if let Ok(latest_height) = self.fetch_latest_height().await { + self.last_fetched_height = latest_height; + debug!("initialized at height: {}", self.last_fetched_height); + } + + // Continuously run the event loop + loop { + let before_step = Instant::now(); + + match self.step().await { + Ok(Next::Abort) => break, + + Ok(Next::Continue) => { + // Check if we need to wait before the next iteration + let delay = self.poll_interval.checked_sub(before_step.elapsed()); + + if let Some(delay_remaining) = delay { + sleep(delay_remaining).await; + } + + continue; + } + + Err(e) => { + error!("event source encountered an error: {e}"); + // Wait before retrying + sleep(Duration::from_secs(5)).await; + } + } + } + }); + + debug!("shutting down Cardano event source"); + } + + async fn step(&mut self) -> Result { + // Process any shutdown or subscription commands before we start doing any work + if let Next::Abort = self.try_process_cmd() { + return Ok(Next::Abort); + } + + // Query Gateway for events since last height + let response = self + .gateway_client + .query_events(self.last_fetched_height) + .await + .map_err(|e| { + Error::collect_events_failed(format!("Failed to query Gateway events: {}", e)) + })?; + + let current_height = Height::new(0, response.current_height).map_err(|e| { + Error::collect_events_failed(format!("Invalid height from Gateway: {}", e)) + })?; + + // Process events if we have new blocks + if !response.events.is_empty() { + trace!( + "received {} block(s) of events from height {} to {}", + response.events.len(), + self.last_fetched_height, + current_height + ); + + for block_events in response.events { + let batch = self.process_block_events(block_events)?; + + // Check for commands before broadcasting + if let Next::Abort = self.try_process_cmd() { + return Ok(Next::Abort); + } + + if let Some(batch) = batch { + self.broadcast_batch(batch); + } + } + + // Update last fetched height + self.last_fetched_height = current_height; + } else { + trace!( + "no new events, current height: {}, last fetched: {}", + current_height, + self.last_fetched_height + ); + } + + Ok(Next::Continue) + } + + /// Process any pending commands, if any. + fn try_process_cmd(&mut self) -> Next { + if let Ok(cmd) = self.rx_cmd.try_recv() { + match cmd { + EventSourceCmd::Shutdown => return Next::Abort, + + EventSourceCmd::Subscribe(tx) => { + if let Err(e) = tx.send(self.event_bus.subscribe()) { + error!("failed to send back subscription: {e}"); + } + } + } + } + + Next::Continue + } + + /// Process events from a single block + fn process_block_events( + &self, + block_events: super::generated::ibc::cardano::v1::BlockEvents, + ) -> Result> { + let height = Height::new(0, block_events.height) + .map_err(|e| Error::collect_events_failed(format!("Invalid block height: {}", e)))?; + + if block_events.events.is_empty() { + return Ok(None); + } + + // Flatten all events from all ResponseDeliverTx items and convert to cardano Event type + let gateway_events: Vec<_> = block_events + .events + .into_iter() + .flat_map(|tx_result| { + tx_result.events.into_iter().map(|core_event| { + // Convert ibc.core.types.v1.Event to ibc.cardano.v1.Event + super::generated::ibc::cardano::v1::Event { + r#type: core_event.r#type, + attributes: core_event + .event_attribute + .into_iter() + .map(|attr| super::generated::ibc::cardano::v1::EventAttribute { + key: attr.key, + value: attr.value, + }) + .collect(), + } + }) + }) + .collect(); + + if gateway_events.is_empty() { + return Ok(None); + } + + // Parse Gateway events into IBC events + let ibc_events = event_parser::parse_events(gateway_events, height) + .map_err(|e| Error::collect_events_failed(format!("Failed to parse events: {}", e)))?; + + if ibc_events.is_empty() { + return Ok(None); + } + + // Convert to IbcEventWithHeight + let events_with_height: Vec = ibc_events + .into_iter() + .map(|event| IbcEventWithHeight::new(event, height)) + .collect(); + + debug!( + chain = %self.chain_id, + height = %height, + count = events_with_height.len(), + "parsed {} IBC events at height {}", + events_with_height.len(), + height + ); + + let batch = EventBatch { + chain_id: self.chain_id.clone(), + tracking_id: TrackingId::new_uuid(), + height, + events: events_with_height, + }; + + Ok(Some(batch)) + } + + /// Broadcast an event batch to all subscribers + fn broadcast_batch(&mut self, batch: EventBatch) { + telemetry!(ws_events, &batch.chain_id, batch.events.len() as u64); + + trace!( + chain = %batch.chain_id, + count = %batch.events.len(), + height = %batch.height, + "broadcasting batch of {} events at height {}", + batch.events.len(), + batch.height + ); + + self.event_bus.broadcast(Arc::new(Ok(batch))); + } + + /// Fetch the current chain height from Gateway + async fn fetch_latest_height(&self) -> Result { + self.gateway_client + .query_latest_height() + .await + .map_err(|e| { + Error::collect_events_failed(format!("Failed to fetch latest height: {}", e)) + }) + } +} diff --git a/crates/relayer/src/chain/cardano/gateway_client.rs b/crates/relayer/src/chain/cardano/gateway_client.rs new file mode 100644 index 0000000000..34235f28c8 --- /dev/null +++ b/crates/relayer/src/chain/cardano/gateway_client.rs @@ -0,0 +1,1452 @@ +//! gRPC client for Cardano Gateway +//! +//! This module provides a client for interacting with the Cardano Gateway service, +//! which handles Cardano blockchain queries, transaction building, and submission. + +use super::error::Error; +use super::generated::ibc::cardano::v1::{ + cardano_msg_client::CardanoMsgClient, SubmitSignedTxRequest, SubmitSignedTxResponse, +}; +use super::generated::ibc::core::channel::v1::msg_client::MsgClient as GenChannelMsgClient; +use super::generated::ibc::core::client::v1::msg_client::MsgClient as GenClientMsgClient; +use super::generated::ibc::core::connection::v1::msg_client::MsgClient as GenConnectionMsgClient; +use ibc_proto::google::protobuf::Any as ProtoAny; +use ibc_proto::ibc::core::channel::v1::query_client::QueryClient as ChannelQueryClient; +use ibc_proto::ibc::core::channel::v1::{ + QueryChannelClientStateRequest, QueryChannelClientStateResponse, QueryChannelRequest, + QueryChannelsRequest, QueryConnectionChannelsRequest, QueryNextSequenceReceiveRequest, + QueryPacketAcknowledgementRequest, QueryPacketAcknowledgementsRequest, + QueryPacketCommitmentRequest, QueryPacketCommitmentsRequest, QueryPacketReceiptRequest, + QueryUnreceivedAcksRequest, QueryUnreceivedPacketsRequest, +}; +use ibc_proto::ibc::core::client::v1::query_client::QueryClient as ClientQueryClient; +use ibc_proto::ibc::core::client::v1::{ + QueryClientStateRequest, QueryClientStateResponse, QueryClientStatesRequest, + QueryConsensusStateHeightsRequest, QueryConsensusStateHeightsResponse, + QueryConsensusStateRequest, QueryConsensusStateResponse, QueryConsensusStatesRequest, + QueryConsensusStatesResponse, +}; +use ibc_proto::ibc::core::connection::v1::query_client::QueryClient as ConnectionQueryClient; +use ibc_proto::ibc::core::connection::v1::{ + QueryClientConnectionsRequest, QueryConnectionRequest, QueryConnectionsRequest, +}; +use ibc_relayer_types::clients::ics08_cardano::header::Header as MithrilHeader; +use ibc_relayer_types::Height; +use tonic::transport::Channel; + +/// Unsigned transaction response from Gateway +#[derive(Debug, Clone)] +pub struct UnsignedTx { + pub cbor_hex: String, + pub description: String, +} + +/// Transaction submission response from Gateway +#[derive(Debug, Clone)] +pub struct TxSubmitResponse { + pub tx_hash: String, + pub height: Option, + pub events: Vec, +} + +/// Simplified IBC event structure for Gateway responses +#[derive(Debug, Clone)] +pub struct IbcEvent { + pub event_type: String, + pub attributes: Vec<(String, String)>, +} + +/// Client for communicating with Cardano Gateway +#[derive(Clone)] +pub struct GatewayClient { + endpoint: String, + channel: Channel, +} + +impl GatewayClient { + /// Create a new Gateway client and establish a gRPC connection + pub async fn new(endpoint: String) -> Result { + tracing::info!("Connecting to Cardano Gateway at {}", endpoint); + + let channel = Channel::from_shared(endpoint.clone()) + .map_err(|e| Error::GatewayClient(e.to_string()))? + .connect() + .await?; + + Ok(Self { endpoint, channel }) + } + + /// Query the latest block height from the Gateway + pub async fn query_latest_height(&self) -> Result { + use super::generated::ibc::core::client::v1::{ + query_client::QueryClient, QueryLatestHeightRequest, + }; + + let mut client = QueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryLatestHeightRequest {}); + + let response = client.latest_height(request).await?.into_inner(); + + tracing::debug!("Queried latest height: {}", response.height); + + // Height format: revision_number-revision_height + // For Cardano, we use revision_number = 0 + Height::new(0, response.height) + .map_err(|e| Error::Query(format!("Invalid height {}: {}", response.height, e))) + } + + /// Query the canonical Mithril client state/consensus state for creating a new client. + pub async fn query_new_client( + &self, + height: u64, + ) -> Result { + use super::generated::ibc::core::client::v1::{ + query_client::QueryClient, QueryNewClientRequest, + }; + + let mut client = QueryClient::new(self.channel.clone()); + let request = tonic::Request::new(QueryNewClientRequest { height }); + let response = client.new_client(request).await?.into_inner(); + + Ok(response) + } + + /// Query client state for a specific client ID + pub async fn query_client_state( + &self, + client_id: &str, + ) -> Result { + let mut client = ClientQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryClientStateRequest { + client_id: client_id.to_string(), + }); + + let response = client.client_state(request).await?.into_inner(); + + Ok(response) + } + + /// Query consensus state for a specific client ID and height + pub async fn query_consensus_state( + &self, + client_id: &str, + height: Height, + ) -> Result { + let mut client = ClientQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryConsensusStateRequest { + client_id: client_id.to_string(), + revision_number: height.revision_number(), + revision_height: height.revision_height(), + latest_height: false, + }); + + let response = client.consensus_state(request).await?.into_inner(); + + Ok(response) + } + + /// Query consensus state heights for a specific client ID. + pub async fn query_consensus_state_heights( + &self, + request: QueryConsensusStateHeightsRequest, + ) -> Result { + let mut client = ClientQueryClient::new(self.channel.clone()); + let request = tonic::Request::new(request); + let response = client.consensus_state_heights(request).await?.into_inner(); + Ok(response) + } + + /// Query all consensus states for a specific client ID. + pub async fn query_consensus_states( + &self, + request: QueryConsensusStatesRequest, + ) -> Result { + let mut client = ClientQueryClient::new(self.channel.clone()); + let request = tonic::Request::new(request); + let response = client.consensus_states(request).await?.into_inner(); + Ok(response) + } + + /// Query header at a specific height + /// + /// This is required for building headers used in `MsgUpdateClient`. + pub async fn query_header(&self, height: Height) -> Result { + use super::generated::ibc::core::types::v1::query_client::QueryClient as TypesQueryClient; + use super::generated::ibc::core::types::v1::QueryIbcHeaderRequest; + + let mut client = TypesQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryIbcHeaderRequest { + height: height.revision_height(), + }); + + let response = client.ibc_header(request).await?.into_inner(); + + let header_any = response + .header + .ok_or_else(|| Error::Query("No header in response".to_string()))?; + + let header_any = ProtoAny { + type_url: header_any.type_url, + value: header_any.value, + }; + + header_any + .try_into() + .map_err(|e: ibc_relayer_types::core::ics02_client::error::Error| { + Error::Ibc(e.to_string()) + }) + } + + /// Query block results at a specific height. + pub async fn query_block_results( + &self, + height: u64, + ) -> Result { + use super::generated::ibc::core::types::v1::query_client::QueryClient as TypesQueryClient; + use super::generated::ibc::core::types::v1::QueryBlockResultsRequest; + + let mut client = TypesQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryBlockResultsRequest { height }); + let response = client.block_results(request).await?.into_inner(); + + Ok(response) + } + + /// Search for blocks containing packet-related events. + pub async fn query_block_search( + &self, + packet_src_channel: String, + packet_dst_channel: String, + packet_sequence: String, + limit: u64, + ) -> Result { + self.query_block_search_page( + packet_src_channel, + packet_dst_channel, + packet_sequence, + limit, + 1, + ) + .await + } + + /// Search for blocks containing packet-related events, returning all pages. + pub async fn query_block_search_all( + &self, + packet_src_channel: String, + packet_dst_channel: String, + packet_sequence: String, + limit: u64, + ) -> Result { + let mut page = 1u64; + let mut blocks = Vec::new(); + let mut total_count = None; + + loop { + let response = self + .query_block_search_page( + packet_src_channel.clone(), + packet_dst_channel.clone(), + packet_sequence.clone(), + limit, + page, + ) + .await?; + + if total_count.is_none() { + total_count = Some(response.total_count); + } + + let page_is_empty = response.blocks.is_empty(); + blocks.extend(response.blocks); + + let total = total_count.unwrap_or(0); + if total == 0 || blocks.len() as u64 >= total { + break; + } + + // Defensive: avoid infinite pagination if server returns empty pages. + if page > 1 && page_is_empty { + break; + } + + page = page.saturating_add(1); + } + + Ok( + super::generated::ibc::core::types::v1::QueryBlockSearchResponse { + total_count: total_count.unwrap_or(blocks.len() as u64), + blocks, + }, + ) + } + + async fn query_block_search_page( + &self, + packet_src_channel: String, + packet_dst_channel: String, + packet_sequence: String, + limit: u64, + page: u64, + ) -> Result { + use super::generated::ibc::core::types::v1::query_client::QueryClient as TypesQueryClient; + use super::generated::ibc::core::types::v1::QueryBlockSearchRequest; + + let mut client = TypesQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryBlockSearchRequest { + packet_src_channel, + packet_dst_channel, + packet_sequence, + limit, + page, + }); + + let response = client.block_search(request).await?.into_inner(); + Ok(response) + } + + /// Query a transaction by hash. + pub async fn query_transaction_by_hash( + &self, + hash: String, + ) -> Result { + use super::generated::ibc::core::types::v1::query_client::QueryClient as TypesQueryClient; + use super::generated::ibc::core::types::v1::QueryTransactionByHashRequest; + + let mut client = TypesQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryTransactionByHashRequest { hash }); + let response = client.transaction_by_hash(request).await?.into_inner(); + Ok(response) + } + + /// Query the client state associated with a channel. + pub async fn query_channel_client_state( + &self, + port_id: &str, + channel_id: &str, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryChannelClientStateRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + }); + + let response: QueryChannelClientStateResponse = + client.channel_client_state(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query connection state + pub async fn query_connection(&self, connection_id: &str) -> Result, Error> { + let mut client = ConnectionQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryConnectionRequest { + connection_id: connection_id.to_string(), + }); + + let response = client.connection(request).await?.into_inner(); + + // Return serialized connection + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query all connections + pub async fn query_connections(&self) -> Result, Error> { + let mut client = ConnectionQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryConnectionsRequest { pagination: None }); + + let response = client.connections(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query channel state + pub async fn query_channel(&self, port_id: &str, channel_id: &str) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryChannelRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + }); + + let response = client.channel(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query all channels + pub async fn query_channels(&self) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryChannelsRequest { pagination: None }); + + let response = client.channels(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query all clients + pub async fn query_clients(&self) -> Result, Error> { + let mut client = ClientQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryClientStatesRequest { pagination: None }); + + let response = client.client_states(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query connections associated with a client + pub async fn query_client_connections(&self, client_id: &str) -> Result, Error> { + let mut client = ConnectionQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryClientConnectionsRequest { + client_id: client_id.to_string(), + }); + + let response = client.client_connections(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query channels associated with a connection + pub async fn query_connection_channels(&self, connection_id: &str) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryConnectionChannelsRequest { + connection: connection_id.to_string(), + pagination: None, + }); + + let response = client.connection_channels(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query packet commitment + pub async fn query_packet_commitment( + &self, + port_id: &str, + channel_id: &str, + sequence: u64, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryPacketCommitmentRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + sequence, + }); + + let response = client.packet_commitment(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query all packet commitments for a channel + pub async fn query_packet_commitments( + &self, + port_id: &str, + channel_id: &str, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryPacketCommitmentsRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + pagination: None, + }); + + let response = client.packet_commitments(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query packet receipt + pub async fn query_packet_receipt( + &self, + port_id: &str, + channel_id: &str, + sequence: u64, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryPacketReceiptRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + sequence, + }); + + let response = client.packet_receipt(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query packet acknowledgement + pub async fn query_packet_acknowledgement( + &self, + port_id: &str, + channel_id: &str, + sequence: u64, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryPacketAcknowledgementRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + sequence, + }); + + let response = client.packet_acknowledgement(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query all packet acknowledgements for a channel + pub async fn query_packet_acknowledgements( + &self, + port_id: &str, + channel_id: &str, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryPacketAcknowledgementsRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + pagination: None, + packet_commitment_sequences: vec![], + }); + + let response = client.packet_acknowledgements(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query unreceived packets + pub async fn query_unreceived_packets( + &self, + port_id: &str, + channel_id: &str, + sequences: Vec, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryUnreceivedPacketsRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + packet_commitment_sequences: sequences, + }); + + let response = client.unreceived_packets(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query unreceived acknowledgements + pub async fn query_unreceived_acknowledgements( + &self, + port_id: &str, + channel_id: &str, + sequences: Vec, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryUnreceivedAcksRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + packet_ack_sequences: sequences, + }); + + let response = client.unreceived_acks(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Query next sequence receive for a channel + pub async fn query_next_sequence_receive( + &self, + port_id: &str, + channel_id: &str, + ) -> Result, Error> { + let mut client = ChannelQueryClient::new(self.channel.clone()); + + let request = tonic::Request::new(QueryNextSequenceReceiveRequest { + port_id: port_id.to_string(), + channel_id: channel_id.to_string(), + }); + + let response = client.next_sequence_receive(request).await?.into_inner(); + + Ok(prost::Message::encode_to_vec(&response)) + } + + /// Build unsigned transaction for IBC message via Gateway + /// Gateway returns CBOR hex that Hermes will sign + /// + /// This method routes IBC messages to the appropriate Gateway Msg service based on the type_url. + /// The type_url format is: "/ibc.core.{module}.v1.Msg{Operation}" + pub async fn build_ibc_tx( + &self, + type_url: &str, + message_data: Vec, + ) -> Result { + tracing::info!( + "Building unsigned transaction for message type: {}", + type_url + ); + + // Route based on type_url + match type_url { + // IBC Client messages + "/ibc.core.client.v1.MsgCreateClient" => { + self.build_create_client_tx(message_data).await + } + "/ibc.core.client.v1.MsgUpdateClient" => { + self.build_update_client_tx(message_data).await + } + + // IBC Connection messages + "/ibc.core.connection.v1.MsgConnectionOpenInit" => { + self.build_connection_open_init_tx(message_data).await + } + "/ibc.core.connection.v1.MsgConnectionOpenTry" => { + self.build_connection_open_try_tx(message_data).await + } + "/ibc.core.connection.v1.MsgConnectionOpenAck" => { + self.build_connection_open_ack_tx(message_data).await + } + "/ibc.core.connection.v1.MsgConnectionOpenConfirm" => { + self.build_connection_open_confirm_tx(message_data).await + } + + // IBC Channel messages + "/ibc.core.channel.v1.MsgChannelOpenInit" => { + self.build_channel_open_init_tx(message_data).await + } + "/ibc.core.channel.v1.MsgChannelOpenTry" => { + self.build_channel_open_try_tx(message_data).await + } + "/ibc.core.channel.v1.MsgChannelOpenAck" => { + self.build_channel_open_ack_tx(message_data).await + } + "/ibc.core.channel.v1.MsgChannelOpenConfirm" => { + self.build_channel_open_confirm_tx(message_data).await + } + "/ibc.core.channel.v1.MsgChannelCloseInit" => { + self.build_channel_close_init_tx(message_data).await + } + "/ibc.core.channel.v1.MsgChannelCloseConfirm" => { + self.build_channel_close_confirm_tx(message_data).await + } + + // IBC Packet messages + "/ibc.core.channel.v1.MsgRecvPacket" => self.build_recv_packet_tx(message_data).await, + "/ibc.core.channel.v1.MsgAcknowledgement" => { + self.build_acknowledgement_tx(message_data).await + } + "/ibc.core.channel.v1.MsgTimeout" => self.build_timeout_tx(message_data).await, + "/ibc.core.channel.v1.MsgTimeoutOnClose" => { + self.build_timeout_on_close_tx(message_data).await + } + + // IBC Transfer messages + "/ibc.applications.transfer.v1.MsgTransfer" => { + self.build_transfer_tx(message_data).await + } + + // Unknown message type + _ => { + tracing::error!("Unsupported message type: {}", type_url); + Err(Error::Transaction(format!( + "Unsupported message type: {}", + type_url + ))) + } + } + } + + // + // Helper methods for building each message type + // + + async fn build_create_client_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::client::v1::MsgCreateClient; + use prost::Message; + + let msg = MsgCreateClient::decode(&message_data[..]) + .map_err(|e| Error::Transaction(format!("Failed to decode MsgCreateClient: {}", e)))?; + + let mut client = GenClientMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.create_client(request).await?.into_inner(); + + // Extract unsigned CBOR from response + // Gateway returns unsigned_tx as google.protobuf.Any with CBOR hex in the value field + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in CreateClient response".to_string()) + })?; + + // The value field contains the CBOR hex string + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "CreateClient: received unsigned CBOR (length: {}), client_id: {}", + cbor_hex.len(), + response.client_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgCreateClient (client_id: {})", response.client_id), + }) + } + + async fn build_update_client_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::client::v1::MsgUpdateClient; + use prost::Message; + + let msg = MsgUpdateClient::decode(&message_data[..]) + .map_err(|e| Error::Transaction(format!("Failed to decode MsgUpdateClient: {}", e)))?; + + let client_id = msg.client_id.clone(); + + let mut client = GenClientMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.update_client(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in UpdateClient response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "UpdateClient: received unsigned CBOR (length: {}), client_id: {}", + cbor_hex.len(), + client_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgUpdateClient (client_id: {})", client_id), + }) + } + + async fn build_connection_open_init_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::connection::v1::MsgConnectionOpenInit; + use prost::Message; + + let msg = MsgConnectionOpenInit::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgConnectionOpenInit: {}", e)) + })?; + + let client_id = msg.client_id.clone(); + + let mut client = GenConnectionMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.connection_open_init(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ConnectionOpenInit response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ConnectionOpenInit: received unsigned CBOR (length: {}), client_id: {}", + cbor_hex.len(), + client_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgConnectionOpenInit (client_id: {})", client_id), + }) + } + + async fn build_connection_open_try_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::connection::v1::MsgConnectionOpenTry; + use prost::Message; + + let msg = MsgConnectionOpenTry::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgConnectionOpenTry: {}", e)) + })?; + + let client_id = msg.client_id.clone(); + + let mut client = GenConnectionMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.connection_open_try(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ConnectionOpenTry response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ConnectionOpenTry: received unsigned CBOR (length: {}), client_id: {}", + cbor_hex.len(), + client_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgConnectionOpenTry (client_id: {})", client_id), + }) + } + + async fn build_connection_open_ack_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::connection::v1::MsgConnectionOpenAck; + use prost::Message; + + let msg = MsgConnectionOpenAck::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgConnectionOpenAck: {}", e)) + })?; + + let connection_id = msg.connection_id.clone(); + + let mut client = GenConnectionMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.connection_open_ack(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ConnectionOpenAck response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ConnectionOpenAck: received unsigned CBOR (length: {}), connection_id: {}", + cbor_hex.len(), + connection_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgConnectionOpenAck (connection_id: {})", connection_id), + }) + } + + async fn build_connection_open_confirm_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::connection::v1::MsgConnectionOpenConfirm; + use prost::Message; + + let msg = MsgConnectionOpenConfirm::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgConnectionOpenConfirm: {}", e)) + })?; + + let connection_id = msg.connection_id.clone(); + + let mut client = GenConnectionMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.connection_open_confirm(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ConnectionOpenConfirm response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ConnectionOpenConfirm: received unsigned CBOR (length: {}), connection_id: {}", + cbor_hex.len(), + connection_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!( + "MsgConnectionOpenConfirm (connection_id: {})", + connection_id + ), + }) + } + + async fn build_channel_open_init_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgChannelOpenInit; + use prost::Message; + + let msg = MsgChannelOpenInit::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgChannelOpenInit: {}", e)) + })?; + + let port_id = msg.port_id.clone(); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.channel_open_init(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ChannelOpenInit response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ChannelOpenInit: received unsigned CBOR (length: {}), port_id: {}, channel_id: {}", + cbor_hex.len(), + port_id, + response.channel_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!( + "MsgChannelOpenInit (port: {}, channel: {})", + port_id, response.channel_id + ), + }) + } + + async fn build_channel_open_try_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgChannelOpenTry; + use prost::Message; + + let msg = MsgChannelOpenTry::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgChannelOpenTry: {}", e)) + })?; + + let port_id = msg.port_id.clone(); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.channel_open_try(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ChannelOpenTry response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ChannelOpenTry: received unsigned CBOR (length: {}), port_id: {}", + cbor_hex.len(), + port_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgChannelOpenTry (port: {})", port_id), + }) + } + + async fn build_channel_open_ack_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgChannelOpenAck; + use prost::Message; + + let msg = MsgChannelOpenAck::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgChannelOpenAck: {}", e)) + })?; + + let port_id = msg.port_id.clone(); + let channel_id = msg.channel_id.clone(); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.channel_open_ack(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ChannelOpenAck response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ChannelOpenAck: received unsigned CBOR (length: {}), port_id: {}, channel_id: {}", + cbor_hex.len(), + port_id, + channel_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!( + "MsgChannelOpenAck (port: {}, channel: {})", + port_id, channel_id + ), + }) + } + + async fn build_channel_open_confirm_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::channel::v1::MsgChannelOpenConfirm; + use prost::Message; + + let msg = MsgChannelOpenConfirm::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgChannelOpenConfirm: {}", e)) + })?; + + let port_id = msg.port_id.clone(); + let channel_id = msg.channel_id.clone(); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.channel_open_confirm(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ChannelOpenConfirm response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ChannelOpenConfirm: received unsigned CBOR (length: {}), port_id: {}, channel_id: {}", + cbor_hex.len(), + port_id, + channel_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!( + "MsgChannelOpenConfirm (port: {}, channel: {})", + port_id, channel_id + ), + }) + } + + async fn build_channel_close_init_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::channel::v1::MsgChannelCloseInit; + use prost::Message; + + let msg = MsgChannelCloseInit::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgChannelCloseInit: {}", e)) + })?; + + let port_id = msg.port_id.clone(); + let channel_id = msg.channel_id.clone(); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.channel_close_init(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ChannelCloseInit response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ChannelCloseInit: received unsigned CBOR (length: {}), port_id: {}, channel_id: {}", + cbor_hex.len(), + port_id, + channel_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!( + "MsgChannelCloseInit (port: {}, channel: {})", + port_id, channel_id + ), + }) + } + + async fn build_channel_close_confirm_tx( + &self, + message_data: Vec, + ) -> Result { + use super::generated::ibc::core::channel::v1::MsgChannelCloseConfirm; + use prost::Message; + + let msg = MsgChannelCloseConfirm::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgChannelCloseConfirm: {}", e)) + })?; + + let port_id = msg.port_id.clone(); + let channel_id = msg.channel_id.clone(); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.channel_close_confirm(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in ChannelCloseConfirm response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "ChannelCloseConfirm: received unsigned CBOR (length: {}), port_id: {}, channel_id: {}", + cbor_hex.len(), + port_id, + channel_id + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!( + "MsgChannelCloseConfirm (port: {}, channel: {})", + port_id, channel_id + ), + }) + } + + async fn build_recv_packet_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgRecvPacket; + use prost::Message; + + let msg = MsgRecvPacket::decode(&message_data[..]) + .map_err(|e| Error::Transaction(format!("Failed to decode MsgRecvPacket: {}", e)))?; + + let sequence = msg.packet.as_ref().map(|p| p.sequence).unwrap_or(0); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.recv_packet(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in RecvPacket response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "RecvPacket: received unsigned CBOR (length: {}), sequence: {}", + cbor_hex.len(), + sequence + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgRecvPacket (sequence: {})", sequence), + }) + } + + async fn build_acknowledgement_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgAcknowledgement; + use prost::Message; + + let msg = MsgAcknowledgement::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgAcknowledgement: {}", e)) + })?; + + let sequence = msg.packet.as_ref().map(|p| p.sequence).unwrap_or(0); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.acknowledgement(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in Acknowledgement response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "Acknowledgement: received unsigned CBOR (length: {}), sequence: {}", + cbor_hex.len(), + sequence + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgAcknowledgement (sequence: {})", sequence), + }) + } + + async fn build_transfer_tx(&self, message_data: Vec) -> Result { + use ibc_proto::ibc::applications::transfer::v1::MsgTransfer; + use prost::Message; + + let msg = MsgTransfer::decode(&message_data[..]) + .map_err(|e| Error::Transaction(format!("Failed to decode MsgTransfer: {}", e)))?; + + let token = match msg.token { + Some(coin) => { + let amount: u64 = coin.amount.parse().map_err(|e| { + Error::Transaction(format!( + "Invalid token amount in MsgTransfer (expected u64): {}", + e + )) + })?; + Some(super::generated::ibc::core::channel::v1::Coin { + denom: coin.denom, + amount, + }) + } + None => None, + }; + + let timeout_height = msg.timeout_height.map(|height| { + super::generated::ibc::core::client::v1::Height { + revision_number: height.revision_number, + revision_height: height.revision_height, + } + }); + + // The Gateway expects MsgTransfer under `ibc.core.channel.v1` and includes a `signer` + // field. In canonical IBC, the sender is the signer for MsgTransfer. + let sender = msg.sender; + + let gateway_msg = super::generated::ibc::core::channel::v1::MsgTransfer { + source_port: msg.source_port, + source_channel: msg.source_channel.clone(), + token, + sender: sender.clone(), + receiver: msg.receiver, + timeout_height, + timeout_timestamp: msg.timeout_timestamp, + memo: msg.memo, + signer: sender, + }; + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(gateway_msg); + + let response = client.transfer(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in Transfer response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "Transfer: received unsigned CBOR (length: {}), source_channel: {}", + cbor_hex.len(), + msg.source_channel + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgTransfer (channel: {})", msg.source_channel), + }) + } + + async fn build_timeout_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgTimeout; + use prost::Message; + + let msg = MsgTimeout::decode(&message_data[..]) + .map_err(|e| Error::Transaction(format!("Failed to decode MsgTimeout: {}", e)))?; + + let sequence = msg.packet.as_ref().map(|p| p.sequence).unwrap_or(0); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.timeout(request).await?.into_inner(); + + let unsigned_tx_any = response + .unsigned_tx + .ok_or_else(|| Error::Transaction("No unsigned_tx in Timeout response".to_string()))?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "Timeout: received unsigned CBOR (length: {}), sequence: {}", + cbor_hex.len(), + sequence + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgTimeout (sequence: {})", sequence), + }) + } + + async fn build_timeout_on_close_tx(&self, message_data: Vec) -> Result { + use super::generated::ibc::core::channel::v1::MsgTimeoutOnClose; + use prost::Message; + + let msg = MsgTimeoutOnClose::decode(&message_data[..]).map_err(|e| { + Error::Transaction(format!("Failed to decode MsgTimeoutOnClose: {}", e)) + })?; + + let sequence = msg.packet.as_ref().map(|p| p.sequence).unwrap_or(0); + + let mut client = GenChannelMsgClient::new(self.channel.clone()); + let request = tonic::Request::new(msg); + + let response = client.timeout_on_close(request).await?.into_inner(); + + let unsigned_tx_any = response.unsigned_tx.ok_or_else(|| { + Error::Transaction("No unsigned_tx in TimeoutOnClose response".to_string()) + })?; + + let cbor_hex = String::from_utf8(unsigned_tx_any.value) + .map_err(|e| Error::Transaction(format!("Invalid UTF-8 in unsigned_tx: {}", e)))?; + + tracing::info!( + "TimeoutOnClose: received unsigned CBOR (length: {}), sequence: {}", + cbor_hex.len(), + sequence + ); + + Ok(UnsignedTx { + cbor_hex, + description: format!("MsgTimeoutOnClose (sequence: {})", sequence), + }) + } + + /// Submit a signed transaction to the Cardano blockchain via Gateway + pub async fn submit_signed_tx(&self, signed_tx_cbor: &str) -> Result { + tracing::info!( + "Submitting signed transaction (CBOR length: {})", + signed_tx_cbor.len() + ); + + let mut client = CardanoMsgClient::new(self.channel.clone()); + + let request = tonic::Request::new(SubmitSignedTxRequest { + signed_tx_cbor: signed_tx_cbor.to_string(), + description: "Hermes IBC transaction".to_string(), + }); + + let response: SubmitSignedTxResponse = client.submit_signed_tx(request).await?.into_inner(); + + // Parse height if present + let height = if !response.height.is_empty() { + let parts: Vec<&str> = response.height.split('-').collect(); + if parts.len() == 2 { + let revision_number: u64 = parts[0].parse().unwrap_or(0); + let revision_height: u64 = parts[1].parse().unwrap_or(0); + Height::new(revision_number, revision_height).ok() + } else { + None + } + } else { + None + }; + + // Convert proto events to IbcEvent + let events = response + .events + .into_iter() + .map(|e| IbcEvent { + event_type: e.r#type, + attributes: e.attributes.into_iter().map(|a| (a.key, a.value)).collect(), + }) + .collect(); + + Ok(TxSubmitResponse { + tx_hash: response.tx_hash, + height, + events, + }) + } + + /// Get the Gateway endpoint URL + pub fn endpoint(&self) -> &str { + &self.endpoint + } + + /// Fetch a Mithril certificate for a specific chain point + /// + /// This should query the Gateway's Mithril aggregator endpoint to get: + /// 1. The latest Mithril certificate covering the requested slot/epoch + /// 2. The certificate chain back to genesis (if needed) + /// 3. The multi-signature proof + /// + /// The certificate is used by the light client to verify Cardano block headers + /// without needing to sync the full chain. + /// + /// TODO: Add custom proto for Mithril certificate query + /// TODO: Implement certificate chain verification + /// TODO: Cache certificates to avoid redundant queries + pub async fn fetch_mithril_certificate(&self, slot: u64, epoch: u64) -> Result, Error> { + tracing::info!( + "Fetching Mithril certificate for slot={}, epoch={}", + slot, + epoch + ); + + // Stub implementation - requires custom Mithril proto + tracing::warn!( + "fetch_mithril_certificate: requires custom proto for Mithril aggregator endpoint" + ); + Ok(vec![]) + } + + /// Query block header at a specific height + pub async fn query_block_header(&self, _height: Height) -> Result, Error> { + // TODO: Implement block header query + tracing::warn!("query_block_header: stub implementation"); + Ok(vec![]) + } + + /// Query IBC events since a given height + /// Returns events grouped by block height + pub async fn query_events( + &self, + since_height: Height, + ) -> Result { + use super::generated::ibc::cardano::v1::{query_client::QueryClient, QueryEventsRequest}; + + tracing::debug!("Querying events since height: {}", since_height); + + let mut client = QueryClient::new(self.channel.clone()); + let request = tonic::Request::new(QueryEventsRequest { + since_height: since_height.revision_height(), + }); + + let response = client.events(request).await?.into_inner(); + + tracing::debug!( + "Received {} block events, current height: {}", + response.events.len(), + response.current_height + ); + + Ok(response) + } +} diff --git a/crates/relayer/src/chain/cardano/generated/cosmos.base.query.v1beta1.rs b/crates/relayer/src/chain/cardano/generated/cosmos.base.query.v1beta1.rs new file mode 100644 index 0000000000..22ce08b530 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/cosmos.base.query.v1beta1.rs @@ -0,0 +1,55 @@ +// This file is @generated by prost-build. +/// PageRequest is to be embedded in gRPC request messages for efficient +/// pagination. Ex: +/// +/// message SomeRequest { +/// Foo some_parameter = 1; +/// PageRequest pagination = 2; +/// } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PageRequest { + /// key is a value returned in PageResponse.next_key to begin + /// querying the next page most efficiently. Only one of offset or key + /// should be set. + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + /// offset is a numeric offset that can be used when key is unavailable. + /// It is less efficient than using key. Only one of offset or key should + /// be set. + #[prost(uint64, tag = "2")] + pub offset: u64, + /// limit is the total number of results to be returned in the result page. + /// If left empty it will default to a value to be set by each app. + #[prost(uint64, tag = "3")] + pub limit: u64, + /// count_total is set to true to indicate that the result set should include + /// a count of the total number of items available for pagination in UIs. + /// count_total is only respected when offset is used. It is ignored when key + /// is set. + #[prost(bool, tag = "4")] + pub count_total: bool, + /// reverse is set to true if results are to be returned in the descending order. + /// + /// Since: cosmos-sdk 0.43 + #[prost(bool, tag = "5")] + pub reverse: bool, +} +/// PageResponse is to be embedded in gRPC response messages where the +/// corresponding request message has used PageRequest. +/// +/// message SomeResponse { +/// repeated Bar results = 1; +/// PageResponse page = 2; +/// } +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PageResponse { + /// next_key is the key to be passed to PageRequest.key to + /// query the next page most efficiently. It will be empty if + /// there are no more results. + #[prost(bytes = "vec", tag = "1")] + pub next_key: ::prost::alloc::vec::Vec, + /// total is total number of results available if PageRequest.count_total + /// was set, its value is undefined otherwise + #[prost(uint64, tag = "2")] + pub total: u64, +} diff --git a/crates/relayer/src/chain/cardano/generated/cosmos.ics23.v1.rs b/crates/relayer/src/chain/cardano/generated/cosmos.ics23.v1.rs new file mode 100644 index 0000000000..ce56951571 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/cosmos.ics23.v1.rs @@ -0,0 +1,349 @@ +// This file is @generated by prost-build. +/// * +/// ExistenceProof takes a key and a value and a set of steps to perform on it. +/// The result of peforming all these steps will provide a "root hash", which can +/// be compared to the value in a header. +/// +/// Since it is computationally infeasible to produce a hash collission for any of the used +/// cryptographic hash functions, if someone can provide a series of operations to transform +/// a given key and value into a root hash that matches some trusted root, these key and values +/// must be in the referenced merkle tree. +/// +/// The only possible issue is maliablity in LeafOp, such as providing extra prefix data, +/// which should be controlled by a spec. Eg. with lengthOp as NONE, +/// prefix = FOO, key = BAR, value = CHOICE +/// and +/// prefix = F, key = OOBAR, value = CHOICE +/// would produce the same value. +/// +/// With LengthOp this is tricker but not impossible. Which is why the "leafPrefixEqual" field +/// in the ProofSpec is valuable to prevent this mutability. And why all trees should +/// length-prefix the data before hashing it. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExistenceProof { + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub leaf: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub path: ::prost::alloc::vec::Vec, +} +/// +/// NonExistenceProof takes a proof of two neighbors, one left of the desired key, +/// one right of the desired key. If both proofs are valid AND they are neighbors, +/// then there is no valid proof for the given key. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NonExistenceProof { + /// TODO: remove this as unnecessary??? we prove a range + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub left: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub right: ::core::option::Option, +} +/// +/// CommitmentProof is either an ExistenceProof or a NonExistenceProof, or a Batch of such messages +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommitmentProof { + #[prost(oneof = "commitment_proof::Proof", tags = "1, 2, 3, 4")] + pub proof: ::core::option::Option, +} +/// Nested message and enum types in `CommitmentProof`. +pub mod commitment_proof { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Proof { + #[prost(message, tag = "1")] + Exist(super::ExistenceProof), + #[prost(message, tag = "2")] + Nonexist(super::NonExistenceProof), + #[prost(message, tag = "3")] + Batch(super::BatchProof), + #[prost(message, tag = "4")] + Compressed(super::CompressedBatchProof), + } +} +/// * +/// LeafOp represents the raw key-value data we wish to prove, and +/// must be flexible to represent the internal transformation from +/// the original key-value pairs into the basis hash, for many existing +/// merkle trees. +/// +/// key and value are passed in. So that the signature of this operation is: +/// leafOp(key, value) -> output +/// +/// To process this, first prehash the keys and values if needed (ANY means no hash in this case): +/// hkey = prehashKey(key) +/// hvalue = prehashValue(value) +/// +/// Then combine the bytes, and hash it +/// output = hash(prefix || length(hkey) || hkey || length(hvalue) || hvalue) +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LeafOp { + #[prost(enumeration = "HashOp", tag = "1")] + pub hash: i32, + #[prost(enumeration = "HashOp", tag = "2")] + pub prehash_key: i32, + #[prost(enumeration = "HashOp", tag = "3")] + pub prehash_value: i32, + #[prost(enumeration = "LengthOp", tag = "4")] + pub length: i32, + /// prefix is a fixed bytes that may optionally be included at the beginning to differentiate + /// a leaf node from an inner node. + #[prost(bytes = "vec", tag = "5")] + pub prefix: ::prost::alloc::vec::Vec, +} +/// * +/// InnerOp represents a merkle-proof step that is not a leaf. +/// It represents concatenating two children and hashing them to provide the next result. +/// +/// The result of the previous step is passed in, so the signature of this op is: +/// innerOp(child) -> output +/// +/// The result of applying InnerOp should be: +/// output = op.hash(op.prefix || child || op.suffix) +/// +/// where the || operator is concatenation of binary data, +/// and child is the result of hashing all the tree below this step. +/// +/// Any special data, like prepending child with the length, or prepending the entire operation with +/// some value to differentiate from leaf nodes, should be included in prefix and suffix. +/// If either of prefix or suffix is empty, we just treat it as an empty string +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InnerOp { + #[prost(enumeration = "HashOp", tag = "1")] + pub hash: i32, + #[prost(bytes = "vec", tag = "2")] + pub prefix: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "3")] + pub suffix: ::prost::alloc::vec::Vec, +} +/// * +/// ProofSpec defines what the expected parameters are for a given proof type. +/// This can be stored in the client and used to validate any incoming proofs. +/// +/// verify(ProofSpec, Proof) -> Proof | Error +/// +/// As demonstrated in tests, if we don't fix the algorithm used to calculate the +/// LeafHash for a given tree, there are many possible key-value pairs that can +/// generate a given hash (by interpretting the preimage differently). +/// We need this for proper security, requires client knows a priori what +/// tree format server uses. But not in code, rather a configuration object. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProofSpec { + /// any field in the ExistenceProof must be the same as in this spec. + /// except Prefix, which is just the first bytes of prefix (spec can be longer) + #[prost(message, optional, tag = "1")] + pub leaf_spec: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub inner_spec: ::core::option::Option, + /// max_depth (if > 0) is the maximum number of InnerOps allowed (mainly for fixed-depth tries) + #[prost(int32, tag = "3")] + pub max_depth: i32, + /// min_depth (if > 0) is the minimum number of InnerOps allowed (mainly for fixed-depth tries) + #[prost(int32, tag = "4")] + pub min_depth: i32, +} +/// +/// InnerSpec contains all store-specific structure info to determine if two proofs from a +/// given store are neighbors. +/// +/// This enables: +/// +/// isLeftMost(spec: InnerSpec, op: InnerOp) +/// isRightMost(spec: InnerSpec, op: InnerOp) +/// isLeftNeighbor(spec: InnerSpec, left: InnerOp, right: InnerOp) +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InnerSpec { + /// Child order is the ordering of the children node, must count from 0 + /// iavl tree is \[0, 1\] (left then right) + /// merk is \[0, 2, 1\] (left, right, here) + #[prost(int32, repeated, tag = "1")] + pub child_order: ::prost::alloc::vec::Vec, + #[prost(int32, tag = "2")] + pub child_size: i32, + #[prost(int32, tag = "3")] + pub min_prefix_length: i32, + #[prost(int32, tag = "4")] + pub max_prefix_length: i32, + /// empty child is the prehash image that is used when one child is nil (eg. 20 bytes of 0) + #[prost(bytes = "vec", tag = "5")] + pub empty_child: ::prost::alloc::vec::Vec, + /// hash is the algorithm that must be used for each InnerOp + #[prost(enumeration = "HashOp", tag = "6")] + pub hash: i32, +} +/// +/// BatchProof is a group of multiple proof types than can be compressed +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchProof { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, +} +/// Use BatchEntry not CommitmentProof, to avoid recursion +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchEntry { + #[prost(oneof = "batch_entry::Proof", tags = "1, 2")] + pub proof: ::core::option::Option, +} +/// Nested message and enum types in `BatchEntry`. +pub mod batch_entry { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Proof { + #[prost(message, tag = "1")] + Exist(super::ExistenceProof), + #[prost(message, tag = "2")] + Nonexist(super::NonExistenceProof), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompressedBatchProof { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub lookup_inners: ::prost::alloc::vec::Vec, +} +/// Use BatchEntry not CommitmentProof, to avoid recursion +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompressedBatchEntry { + #[prost(oneof = "compressed_batch_entry::Proof", tags = "1, 2")] + pub proof: ::core::option::Option, +} +/// Nested message and enum types in `CompressedBatchEntry`. +pub mod compressed_batch_entry { + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Proof { + #[prost(message, tag = "1")] + Exist(super::CompressedExistenceProof), + #[prost(message, tag = "2")] + Nonexist(super::CompressedNonExistenceProof), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompressedExistenceProof { + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub leaf: ::core::option::Option, + /// these are indexes into the lookup_inners table in CompressedBatchProof + #[prost(int32, repeated, tag = "4")] + pub path: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompressedNonExistenceProof { + /// TODO: remove this as unnecessary??? we prove a range + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub left: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub right: ::core::option::Option, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum HashOp { + /// NO_HASH is the default if no data passed. Note this is an illegal argument some places. + NoHash = 0, + Sha256 = 1, + Sha512 = 2, + Keccak = 3, + Ripemd160 = 4, + /// ripemd160(sha256(x)) + Bitcoin = 5, + Sha512256 = 6, +} +impl HashOp { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::NoHash => "NO_HASH", + Self::Sha256 => "SHA256", + Self::Sha512 => "SHA512", + Self::Keccak => "KECCAK", + Self::Ripemd160 => "RIPEMD160", + Self::Bitcoin => "BITCOIN", + Self::Sha512256 => "SHA512_256", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NO_HASH" => Some(Self::NoHash), + "SHA256" => Some(Self::Sha256), + "SHA512" => Some(Self::Sha512), + "KECCAK" => Some(Self::Keccak), + "RIPEMD160" => Some(Self::Ripemd160), + "BITCOIN" => Some(Self::Bitcoin), + "SHA512_256" => Some(Self::Sha512256), + _ => None, + } + } +} +/// * +/// LengthOp defines how to process the key and value of the LeafOp +/// to include length information. After encoding the length with the given +/// algorithm, the length will be prepended to the key and value bytes. +/// (Each one with it's own encoded length) +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LengthOp { + /// NO_PREFIX don't include any length info + NoPrefix = 0, + /// VAR_PROTO uses protobuf (and go-amino) varint encoding of the length + VarProto = 1, + /// VAR_RLP uses rlp int encoding of the length + VarRlp = 2, + /// FIXED32_BIG uses big-endian encoding of the length as a 32 bit integer + Fixed32Big = 3, + /// FIXED32_LITTLE uses little-endian encoding of the length as a 32 bit integer + Fixed32Little = 4, + /// FIXED64_BIG uses big-endian encoding of the length as a 64 bit integer + Fixed64Big = 5, + /// FIXED64_LITTLE uses little-endian encoding of the length as a 64 bit integer + Fixed64Little = 6, + /// REQUIRE_32_BYTES is like NONE, but will fail if the input is not exactly 32 bytes (sha256 output) + Require32Bytes = 7, + /// REQUIRE_64_BYTES is like NONE, but will fail if the input is not exactly 64 bytes (sha512 output) + Require64Bytes = 8, +} +impl LengthOp { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::NoPrefix => "NO_PREFIX", + Self::VarProto => "VAR_PROTO", + Self::VarRlp => "VAR_RLP", + Self::Fixed32Big => "FIXED32_BIG", + Self::Fixed32Little => "FIXED32_LITTLE", + Self::Fixed64Big => "FIXED64_BIG", + Self::Fixed64Little => "FIXED64_LITTLE", + Self::Require32Bytes => "REQUIRE_32_BYTES", + Self::Require64Bytes => "REQUIRE_64_BYTES", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NO_PREFIX" => Some(Self::NoPrefix), + "VAR_PROTO" => Some(Self::VarProto), + "VAR_RLP" => Some(Self::VarRlp), + "FIXED32_BIG" => Some(Self::Fixed32Big), + "FIXED32_LITTLE" => Some(Self::Fixed32Little), + "FIXED64_BIG" => Some(Self::Fixed64Big), + "FIXED64_LITTLE" => Some(Self::Fixed64Little), + "REQUIRE_32_BYTES" => Some(Self::Require32Bytes), + "REQUIRE_64_BYTES" => Some(Self::Require64Bytes), + _ => None, + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/cosmos.upgrade.v1beta1.rs b/crates/relayer/src/chain/cardano/generated/cosmos.upgrade.v1beta1.rs new file mode 100644 index 0000000000..a32d15f28f --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/cosmos.upgrade.v1beta1.rs @@ -0,0 +1,74 @@ +// This file is @generated by prost-build. +/// Plan specifies information about a planned upgrade and when it should occur. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Plan { + /// Sets the name for the upgrade. This name will be used by the upgraded + /// version of the software to apply any special "on-upgrade" commands during + /// the first BeginBlock method after the upgrade is applied. It is also used + /// to detect whether a software version can handle a given upgrade. If no + /// upgrade handler with this name has been set in the software, it will be + /// assumed that the software is out-of-date when the upgrade Time or Height is + /// reached and the software will exit. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Deprecated: Time based upgrades have been deprecated. Time based upgrade logic + /// has been removed from the SDK. + /// If this field is not empty, an error will be thrown. + #[deprecated] + #[prost(message, optional, tag = "2")] + pub time: ::core::option::Option<::prost_types::Timestamp>, + /// The height at which the upgrade must be performed. + #[prost(int64, tag = "3")] + pub height: i64, + /// Any application specific upgrade info to be included on-chain + /// such as a git commit that validators could automatically upgrade to + #[prost(string, tag = "4")] + pub info: ::prost::alloc::string::String, + /// Deprecated: UpgradedClientState field has been deprecated. IBC upgrade logic has been + /// moved to the IBC module in the sub module 02-client. + /// If this field is not empty, an error will be thrown. + #[deprecated] + #[prost(message, optional, tag = "5")] + pub upgraded_client_state: ::core::option::Option<::prost_types::Any>, +} +/// SoftwareUpgradeProposal is a gov Content type for initiating a software +/// upgrade. +/// Deprecated: This legacy proposal is deprecated in favor of Msg-based gov +/// proposals, see MsgSoftwareUpgrade. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SoftwareUpgradeProposal { + /// title of the proposal + #[prost(string, tag = "1")] + pub title: ::prost::alloc::string::String, + /// description of the proposal + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// plan of the proposal + #[prost(message, optional, tag = "3")] + pub plan: ::core::option::Option, +} +/// CancelSoftwareUpgradeProposal is a gov Content type for cancelling a software +/// upgrade. +/// Deprecated: This legacy proposal is deprecated in favor of Msg-based gov +/// proposals, see MsgCancelUpgrade. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CancelSoftwareUpgradeProposal { + /// title of the proposal + #[prost(string, tag = "1")] + pub title: ::prost::alloc::string::String, + /// description of the proposal + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, +} +/// ModuleVersion specifies a module and its consensus version. +/// +/// Since: cosmos-sdk 0.43 +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModuleVersion { + /// name of the app module + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// consensus version of the app module + #[prost(uint64, tag = "2")] + pub version: u64, +} diff --git a/crates/relayer/src/chain/cardano/generated/cosmos_proto.rs b/crates/relayer/src/chain/cardano/generated/cosmos_proto.rs new file mode 100644 index 0000000000..0bcaba3524 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/cosmos_proto.rs @@ -0,0 +1,72 @@ +// This file is @generated by prost-build. +/// InterfaceDescriptor describes an interface type to be used with +/// accepts_interface and implements_interface and declared by declare_interface. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InterfaceDescriptor { + /// name is the name of the interface. It should be a short-name (without + /// a period) such that the fully qualified name of the interface will be + /// package.name, ex. for the package a.b and interface named C, the + /// fully-qualified name will be a.b.C. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// description is a human-readable description of the interface and its + /// purpose. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, +} +/// ScalarDescriptor describes an scalar type to be used with +/// the scalar field option and declared by declare_scalar. +/// Scalars extend simple protobuf built-in types with additional +/// syntax and semantics, for instance to represent big integers. +/// Scalars should ideally define an encoding such that there is only one +/// valid syntactical representation for a given semantic meaning, +/// i.e. the encoding should be deterministic. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ScalarDescriptor { + /// name is the name of the scalar. It should be a short-name (without + /// a period) such that the fully qualified name of the scalar will be + /// package.name, ex. for the package a.b and scalar named C, the + /// fully-qualified name will be a.b.C. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// description is a human-readable description of the scalar and its + /// encoding format. For instance a big integer or decimal scalar should + /// specify precisely the expected encoding format. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// field_type is the type of field with which this scalar can be used. + /// Scalars can be used with one and only one type of field so that + /// encoding standards and simple and clear. Currently only string and + /// bytes fields are supported for scalars. + #[prost(enumeration = "ScalarType", repeated, tag = "3")] + pub field_type: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ScalarType { + Unspecified = 0, + String = 1, + Bytes = 2, +} +impl ScalarType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "SCALAR_TYPE_UNSPECIFIED", + Self::String => "SCALAR_TYPE_STRING", + Self::Bytes => "SCALAR_TYPE_BYTES", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SCALAR_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "SCALAR_TYPE_STRING" => Some(Self::String), + "SCALAR_TYPE_BYTES" => Some(Self::Bytes), + _ => None, + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/google.api.rs b/crates/relayer/src/chain/cardano/generated/google.api.rs new file mode 100644 index 0000000000..2981c31a81 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/google.api.rs @@ -0,0 +1,364 @@ +// This file is @generated by prost-build. +/// Defines the HTTP configuration for an API service. It contains a list of +/// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +/// to one or more HTTP REST API methods. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Http { + /// A list of HTTP configuration rules that apply to individual API methods. + /// + /// **NOTE:** All service configuration rules follow "last one wins" order. + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, + /// When set to true, URL path parameters will be fully URI-decoded except in + /// cases of single segment matches in reserved expansion, where "%2F" will be + /// left encoded. + /// + /// The default behavior is to not decode RFC 6570 reserved characters in multi + /// segment matches. + #[prost(bool, tag = "2")] + pub fully_decode_reserved_expansion: bool, +} +/// # gRPC Transcoding +/// +/// gRPC Transcoding is a feature for mapping between a gRPC method and one or +/// more HTTP REST endpoints. It allows developers to build a single API service +/// that supports both gRPC APIs and REST APIs. Many systems, including [Google +/// APIs](), +/// [Cloud Endpoints](), [gRPC +/// Gateway](), +/// and [Envoy]() proxy support this feature +/// and use it for large scale production services. +/// +/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +/// how different portions of the gRPC request message are mapped to the URL +/// path, URL query parameters, and HTTP request body. It also controls how the +/// gRPC response message is mapped to the HTTP response body. `HttpRule` is +/// typically specified as an `google.api.http` annotation on the gRPC method. +/// +/// Each mapping specifies a URL path template and an HTTP method. The path +/// template may refer to one or more fields in the gRPC request message, as long +/// as each field is a non-repeated field with a primitive (non-message) type. +/// The path template controls how fields of the request message are mapped to +/// the URL path. +/// +/// Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/{name=messages/*}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string name = 1; // Mapped to URL path. +/// } +/// message Message { +/// string text = 1; // The resource content. +/// } +/// +/// This enables an HTTP REST to gRPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// +/// Any fields in the request message which are not bound by the path template +/// automatically become HTTP query parameters if there is no HTTP request body. +/// For example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get:"/v1/messages/{message_id}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// message SubMessage { +/// string subfield = 1; +/// } +/// string message_id = 1; // Mapped to URL path. +/// int64 revision = 2; // Mapped to URL query parameter `revision`. +/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +/// } +/// +/// This enables a HTTP JSON to RPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +/// "foo"))` +/// +/// Note that fields which are mapped to URL query parameters must have a +/// primitive type or a repeated primitive type or a non-repeated message type. +/// In the case of a repeated type, the parameter can be repeated in the URL +/// as `...?param=A¶m=B`. In the case of a message type, each field of the +/// message is mapped to a separate parameter, such as +/// `...?foo.a=A&foo.b=B&foo.c=C`. +/// +/// For HTTP methods that allow a request body, the `body` field +/// specifies the mapping. Consider a REST update method on the +/// message resource collection: +/// +/// service Messaging { +/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "message" +/// }; +/// } +/// } +/// message UpdateMessageRequest { +/// string message_id = 1; // mapped to the URL +/// Message message = 2; // mapped to the body +/// } +/// +/// The following HTTP JSON to RPC mapping is enabled, where the +/// representation of the JSON in the request body is determined by +/// protos JSON encoding: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" message { text: "Hi!" })` +/// +/// The special name `*` can be used in the body mapping to define that +/// every field not bound by the path template should be mapped to the +/// request body. This enables the following alternative definition of +/// the update method: +/// +/// service Messaging { +/// rpc UpdateMessage(Message) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "*" +/// }; +/// } +/// } +/// message Message { +/// string message_id = 1; +/// string text = 2; +/// } +/// +/// +/// The following HTTP JSON to RPC mapping is enabled: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" text: "Hi!")` +/// +/// Note that when using `*` in the body mapping, it is not possible to +/// have HTTP parameters, as all fields not bound by the path end in +/// the body. This makes this option more rarely used in practice when +/// defining REST APIs. The common usage of `*` is in custom methods +/// which don't use the URL at all for transferring data. +/// +/// It is possible to define multiple HTTP methods for one RPC by using +/// the `additional_bindings` option. Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/messages/{message_id}" +/// additional_bindings { +/// get: "/v1/users/{user_id}/messages/{message_id}" +/// } +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string message_id = 1; +/// string user_id = 2; +/// } +/// +/// This enables the following two alternative HTTP JSON to RPC mappings: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +/// "123456")` +/// +/// ## Rules for HTTP mapping +/// +/// 1. Leaf request fields (recursive expansion nested messages in the request +/// message) are classified into three categories: +/// - Fields referred by the path template. They are passed via the URL path. +/// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +/// request body. +/// - All other fields are passed via the URL query parameters, and the +/// parameter name is the field path in the request message. A repeated +/// field can be represented as multiple query parameters under the same +/// name. +/// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +/// are passed via URL path and HTTP request body. +/// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +/// fields are passed via URL path and URL query parameters. +/// +/// ### Path template syntax +/// +/// Template = "/" Segments \[ Verb \] ; +/// Segments = Segment { "/" Segment } ; +/// Segment = "*" | "**" | LITERAL | Variable ; +/// Variable = "{" FieldPath \[ "=" Segments \] "}" ; +/// FieldPath = IDENT { "." IDENT } ; +/// Verb = ":" LITERAL ; +/// +/// The syntax `*` matches a single URL path segment. The syntax `**` matches +/// zero or more URL path segments, which must be the last part of the URL path +/// except the `Verb`. +/// +/// The syntax `Variable` matches part of the URL path as specified by its +/// template. A variable template must not contain other variables. If a variable +/// matches a single path segment, its template may be omitted, e.g. `{var}` +/// is equivalent to `{var=*}`. +/// +/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +/// contains any reserved character, such characters should be percent-encoded +/// before the matching. +/// +/// If a variable contains exactly one path segment, such as `"{var}"` or +/// `"{var=*}"`, when such a variable is expanded into a URL path on the client +/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The +/// server side does the reverse decoding. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{var}`. +/// +/// If a variable contains multiple path segments, such as `"{var=foo/*}"` +/// or `"{var=**}"`, when such a variable is expanded into a URL path on the +/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. +/// The server side does the reverse decoding, except "%2F" and "%2f" are left +/// unchanged. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{+var}`. +/// +/// ## Using gRPC API Service Configuration +/// +/// gRPC API Service Configuration (service config) is a configuration language +/// for configuring a gRPC service to become a user-facing product. The +/// service config is simply the YAML representation of the `google.api.Service` +/// proto message. +/// +/// As an alternative to annotating your proto file, you can configure gRPC +/// transcoding in your service config YAML files. You do this by specifying a +/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +/// effect as the proto annotation. This can be particularly useful if you +/// have a proto that is reused in multiple services. Note that any transcoding +/// specified in the service config will override any matching transcoding +/// configuration in the proto. +/// +/// Example: +/// +/// http: +/// rules: +/// # Selects a gRPC method and applies HttpRule to it. +/// - selector: example.v1.Messaging.GetMessage +/// get: /v1/messages/{message_id}/{sub.subfield} +/// +/// ## Special notes +/// +/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +/// proto to JSON conversion must follow the [proto3 +/// specification](). +/// +/// While the single segment variable follows the semantics of +/// [RFC 6570]() Section 3.2.2 Simple String +/// Expansion, the multi segment variable **does not** follow RFC 6570 Section +/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +/// does not expand special characters like `?` and `#`, which would lead +/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +/// for multi segment variables. +/// +/// The path variables **must not** refer to any repeated or mapped field, +/// because client libraries are not capable of handling such variable expansion. +/// +/// The path variables **must not** capture the leading "/" character. The reason +/// is that the most common use case "{var}" does not capture the leading "/" +/// character. For consistency, all path variables must share the same behavior. +/// +/// Repeated message fields must not be mapped to URL query parameters, because +/// no client library can support such complicated mapping. +/// +/// If an API needs to use a JSON array for request or response body, it can map +/// the request or response body to a repeated field. However, some gRPC +/// Transcoding implementations may not support this feature. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRule { + /// Selects a method to which this rule applies. + /// + /// Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// The name of the request field whose value is mapped to the HTTP request + /// body, or `*` for mapping all request fields not captured by the path + /// pattern to the HTTP body, or omitted for not having any HTTP request body. + /// + /// NOTE: the referred field must be present at the top-level of the request + /// message type. + #[prost(string, tag = "7")] + pub body: ::prost::alloc::string::String, + /// Optional. The name of the response field whose value is mapped to the HTTP + /// response body. When omitted, the entire response message will be used + /// as the HTTP response body. + /// + /// NOTE: The referred field must be present at the top-level of the response + /// message type. + #[prost(string, tag = "12")] + pub response_body: ::prost::alloc::string::String, + /// Additional HTTP bindings for the selector. Nested bindings must + /// not contain an `additional_bindings` field themselves (that is, + /// the nesting may only be one level deep). + #[prost(message, repeated, tag = "11")] + pub additional_bindings: ::prost::alloc::vec::Vec, + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] + pub pattern: ::core::option::Option, +} +/// Nested message and enum types in `HttpRule`. +pub mod http_rule { + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Pattern { + /// Maps to HTTP GET. Used for listing and getting information about + /// resources. + #[prost(string, tag = "2")] + Get(::prost::alloc::string::String), + /// Maps to HTTP PUT. Used for replacing a resource. + #[prost(string, tag = "3")] + Put(::prost::alloc::string::String), + /// Maps to HTTP POST. Used for creating a resource or performing an action. + #[prost(string, tag = "4")] + Post(::prost::alloc::string::String), + /// Maps to HTTP DELETE. Used for deleting a resource. + #[prost(string, tag = "5")] + Delete(::prost::alloc::string::String), + /// Maps to HTTP PATCH. Used for updating a resource. + #[prost(string, tag = "6")] + Patch(::prost::alloc::string::String), + /// The custom pattern is used for specifying an HTTP method that is not + /// included in the `pattern` field, such as HEAD, or "*" to leave the + /// HTTP method unspecified for this rule. The wild-card rule is useful + /// for services that provide content to Web (HTML) clients. + #[prost(message, tag = "8")] + Custom(super::CustomHttpPattern), + } +} +/// A custom pattern is used for defining custom HTTP verb. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CustomHttpPattern { + /// The name of this custom HTTP verb. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// The path matched by this custom verb. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} diff --git a/crates/relayer/src/chain/cardano/generated/ibc.cardano.v1.rs b/crates/relayer/src/chain/cardano/generated/ibc.cardano.v1.rs new file mode 100644 index 0000000000..6031164a4b --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/ibc.cardano.v1.rs @@ -0,0 +1,309 @@ +// This file is @generated by prost-build. +/// SubmitSignedTxRequest contains a signed Cardano transaction in CBOR format. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SubmitSignedTxRequest { + /// Signed transaction in CBOR hex format. + /// This is the completed, signed Cardano transaction ready for submission. + #[prost(string, tag = "1")] + pub signed_tx_cbor: ::prost::alloc::string::String, + /// Optional description for logging/debugging. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, +} +/// SubmitSignedTxResponse contains the result of submitting a signed transaction. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SubmitSignedTxResponse { + /// Transaction hash (Blake2b-256 hash of the signed transaction). + #[prost(string, tag = "1")] + pub tx_hash: ::prost::alloc::string::String, + /// Block height at which the transaction was confirmed (if available). + #[prost(string, tag = "2")] + pub height: ::prost::alloc::string::String, + /// Raw transaction events (for IBC event parsing). + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec, +} +/// Event represents a transaction event with type and attributes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +/// EventAttribute represents a key-value pair in an event. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventAttribute { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +/// Generated client implementations. +pub mod cardano_msg_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// CardanoMsg defines the Cardano-specific transaction submission service. + /// This service is used by the Hermes relayer to submit signed Cardano transactions. + #[derive(Debug, Clone)] + pub struct CardanoMsgClient { + inner: tonic::client::Grpc, + } + impl CardanoMsgClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl CardanoMsgClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CardanoMsgClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + CardanoMsgClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// SubmitSignedTx submits a signed Cardano transaction. + pub async fn submit_signed_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.cardano.v1.CardanoMsg/SubmitSignedTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.cardano.v1.CardanoMsg", "SubmitSignedTx")); + self.inner.unary(req, path, codec).await + } + } +} +/// QueryEventsRequest is the request type for the Query/Events RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryEventsRequest { + /// Height from which to query events (exclusive - returns events after this height) + #[prost(uint64, tag = "1")] + pub since_height: u64, +} +/// QueryEventsResponse is the response type for the Query/Events RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryEventsResponse { + /// Current chain height at the time of the query + #[prost(uint64, tag = "1")] + pub current_height: u64, + /// Events grouped by block height + #[prost(message, repeated, tag = "2")] + pub events: ::prost::alloc::vec::Vec, +} +/// BlockEvents contains all IBC events for a specific block +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockEvents { + /// Block height + #[prost(uint64, tag = "1")] + pub height: u64, + /// IBC events that occurred in this block + #[prost(message, repeated, tag = "2")] + pub events: ::prost::alloc::vec::Vec< + super::super::core::types::v1::ResponseDeliverTx, + >, +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query provides defines the gRPC querier service for Cardano-specific queries + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Events queries IBC events from Cardano blocks since a given height + pub async fn events( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.cardano.v1.Query/Events", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.cardano.v1.Query", "Events")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/ibc.core.channel.v1.rs b/crates/relayer/src/chain/cardano/generated/ibc.core.channel.v1.rs new file mode 100644 index 0000000000..511768b843 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/ibc.core.channel.v1.rs @@ -0,0 +1,937 @@ +// This file is @generated by prost-build. +/// Channel defines pipeline for exactly-once packet delivery between specific +/// modules on separate blockchains, which has at least one end capable of +/// sending packets and one end capable of receiving packets. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Channel { + /// current state of the channel end + #[prost(enumeration = "State", tag = "1")] + pub state: i32, + /// whether the channel is ordered or unordered + #[prost(enumeration = "Order", tag = "2")] + pub ordering: i32, + /// counterparty channel end + #[prost(message, optional, tag = "3")] + pub counterparty: ::core::option::Option, + /// list of connection identifiers, in order, along which packets sent on + /// this channel will travel + #[prost(string, repeated, tag = "4")] + pub connection_hops: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// opaque channel version, which is agreed upon during the handshake + #[prost(string, tag = "5")] + pub version: ::prost::alloc::string::String, +} +/// IdentifiedChannel defines a channel with additional port and channel +/// identifier fields. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IdentifiedChannel { + /// current state of the channel end + #[prost(enumeration = "State", tag = "1")] + pub state: i32, + /// whether the channel is ordered or unordered + #[prost(enumeration = "Order", tag = "2")] + pub ordering: i32, + /// counterparty channel end + #[prost(message, optional, tag = "3")] + pub counterparty: ::core::option::Option, + /// list of connection identifiers, in order, along which packets sent on + /// this channel will travel + #[prost(string, repeated, tag = "4")] + pub connection_hops: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// opaque channel version, which is agreed upon during the handshake + #[prost(string, tag = "5")] + pub version: ::prost::alloc::string::String, + /// port identifier + #[prost(string, tag = "6")] + pub port_id: ::prost::alloc::string::String, + /// channel identifier + #[prost(string, tag = "7")] + pub channel_id: ::prost::alloc::string::String, +} +/// Counterparty defines a channel end counterparty +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Counterparty { + /// port on the counterparty chain which owns the other end of the channel. + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + /// channel end on the counterparty chain + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, +} +/// Packet defines a type that carries data across different chains through IBC +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Packet { + /// number corresponds to the order of sends and receives, where a Packet + /// with an earlier sequence number must be sent and received before a Packet + /// with a later sequence number. + #[prost(uint64, tag = "1")] + pub sequence: u64, + /// identifies the port on the sending chain. + #[prost(string, tag = "2")] + pub source_port: ::prost::alloc::string::String, + /// identifies the channel end on the sending chain. + #[prost(string, tag = "3")] + pub source_channel: ::prost::alloc::string::String, + /// identifies the port on the receiving chain. + #[prost(string, tag = "4")] + pub destination_port: ::prost::alloc::string::String, + /// identifies the channel end on the receiving chain. + #[prost(string, tag = "5")] + pub destination_channel: ::prost::alloc::string::String, + /// actual opaque bytes transferred directly to the application module + #[prost(bytes = "vec", tag = "6")] + pub data: ::prost::alloc::vec::Vec, + /// block height after which the packet times out + #[prost(message, optional, tag = "7")] + pub timeout_height: ::core::option::Option, + /// block timestamp (in nanoseconds) after which the packet times out + #[prost(uint64, tag = "8")] + pub timeout_timestamp: u64, +} +/// PacketState defines the generic type necessary to retrieve and store +/// packet commitments, acknowledgements, and receipts. +/// Caller is responsible for knowing the context necessary to interpret this +/// state as a commitment, acknowledgement, or a receipt. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PacketState { + /// channel port identifier. + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + /// channel unique identifier. + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, + /// packet sequence. + #[prost(uint64, tag = "3")] + pub sequence: u64, + /// embedded data that represents packet state. + #[prost(bytes = "vec", tag = "4")] + pub data: ::prost::alloc::vec::Vec, +} +/// PacketId is an identifer for a unique Packet +/// Source chains refer to packets by source port/channel +/// Destination chains refer to packets by destination port/channel +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PacketId { + /// channel port identifier + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + /// channel unique identifier + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, + /// packet sequence + #[prost(uint64, tag = "3")] + pub sequence: u64, +} +/// Acknowledgement is the recommended acknowledgement format to be used by +/// app-specific protocols. +/// NOTE: The field numbers 21 and 22 were explicitly chosen to avoid accidental +/// conflicts with other protobuf message formats used for acknowledgements. +/// The first byte of any message with this format will be the non-ASCII values +/// `0xaa` (result) or `0xb2` (error). Implemented as defined by ICS: +/// +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Acknowledgement { + /// response contains either a result or an error and must be non-empty + #[prost(oneof = "acknowledgement::Response", tags = "21, 22")] + pub response: ::core::option::Option, +} +/// Nested message and enum types in `Acknowledgement`. +pub mod acknowledgement { + /// response contains either a result or an error and must be non-empty + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + #[prost(bytes, tag = "21")] + Result(::prost::alloc::vec::Vec), + #[prost(string, tag = "22")] + Error(::prost::alloc::string::String), + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Coin { + #[prost(string, tag = "1")] + pub denom: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub amount: u64, +} +/// State defines if a channel is in one of the following states: +/// CLOSED, INIT, TRYOPEN, OPEN or UNINITIALIZED. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum State { + /// Default State + UninitializedUnspecified = 0, + /// A channel has just started the opening handshake. + Init = 1, + /// A channel has acknowledged the handshake step on the counterparty chain. + Tryopen = 2, + /// A channel has completed the handshake. Open channels are + /// ready to send and receive packets. + Open = 3, + /// A channel has been closed and can no longer be used to send or receive + /// packets. + Closed = 4, +} +impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::UninitializedUnspecified => "STATE_UNINITIALIZED_UNSPECIFIED", + Self::Init => "STATE_INIT", + Self::Tryopen => "STATE_TRYOPEN", + Self::Open => "STATE_OPEN", + Self::Closed => "STATE_CLOSED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNINITIALIZED_UNSPECIFIED" => Some(Self::UninitializedUnspecified), + "STATE_INIT" => Some(Self::Init), + "STATE_TRYOPEN" => Some(Self::Tryopen), + "STATE_OPEN" => Some(Self::Open), + "STATE_CLOSED" => Some(Self::Closed), + _ => None, + } + } +} +/// Order defines if a channel is ORDERED or UNORDERED +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Order { + /// zero-value for channel ordering + NoneUnspecified = 0, + /// packets can be delivered in any order, which may differ from the order in + /// which they were sent. + Unordered = 1, + /// packets are delivered exactly in the order which they were sent + Ordered = 2, +} +impl Order { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::NoneUnspecified => "ORDER_NONE_UNSPECIFIED", + Self::Unordered => "ORDER_UNORDERED", + Self::Ordered => "ORDER_ORDERED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ORDER_NONE_UNSPECIFIED" => Some(Self::NoneUnspecified), + "ORDER_UNORDERED" => Some(Self::Unordered), + "ORDER_ORDERED" => Some(Self::Ordered), + _ => None, + } + } +} +/// MsgChannelOpenInit defines an sdk.Msg to initialize a channel handshake. It +/// is called by a relayer on Chain A. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenInit { + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub channel: ::core::option::Option, + #[prost(string, tag = "3")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelOpenInitResponse defines the Msg/ChannelOpenInit response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenInitResponse { + #[prost(string, tag = "1")] + pub channel_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgChannelOpenInit defines a msg sent by a Relayer to try to open a channel +/// on Chain B. The version field within the Channel field has been deprecated. Its +/// value will be ignored by core IBC. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenTry { + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + /// Deprecated: this field is unused. Crossing hello's are no longer supported in core IBC. + #[deprecated] + #[prost(string, tag = "2")] + pub previous_channel_id: ::prost::alloc::string::String, + /// NOTE: the version field within the channel has been deprecated. Its value will be ignored by core IBC. + #[prost(message, optional, tag = "3")] + pub channel: ::core::option::Option, + #[prost(string, tag = "4")] + pub counterparty_version: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "5")] + pub proof_init: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "7")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelOpenTryResponse defines the Msg/ChannelOpenTry response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenTryResponse { + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgChannelOpenAck defines a msg sent by a Relayer to Chain A to acknowledge +/// the change of channel state to TRYOPEN on Chain B. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenAck { + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub counterparty_channel_id: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub counterparty_version: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "5")] + pub proof_try: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "6")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "7")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelOpenAckResponse defines the Msg/ChannelOpenAck response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenAckResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to +/// acknowledge the change of channel state to OPEN on Chain A. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenConfirm { + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "3")] + pub proof_ack: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "5")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response +/// type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelOpenConfirmResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgChannelCloseInit defines a msg sent by a Relayer to Chain A +/// to close a channel with Chain B. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelCloseInit { + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelCloseInitResponse defines the Msg/ChannelCloseInit response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelCloseInitResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgChannelCloseConfirm defines a msg sent by a Relayer to Chain B +/// to acknowledge the change of channel state to CLOSED on Chain A. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelCloseConfirm { + #[prost(string, tag = "1")] + pub port_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub channel_id: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "3")] + pub proof_init: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "5")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelCloseConfirmResponse defines the Msg/ChannelCloseConfirm response +/// type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgChannelCloseConfirmResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgRecvPacket receives incoming IBC packet +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRecvPacket { + #[prost(message, optional, tag = "1")] + pub packet: ::core::option::Option, + #[prost(bytes = "vec", tag = "2")] + pub proof_commitment: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "4")] + pub signer: ::prost::alloc::string::String, +} +/// MsgRecvPacketResponse defines the Msg/RecvPacket response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRecvPacketResponse { + #[prost(enumeration = "ResponseResultType", tag = "1")] + pub result: i32, + #[prost(message, optional, tag = "2")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgTimeout receives timed-out packet +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTimeout { + #[prost(message, optional, tag = "1")] + pub packet: ::core::option::Option, + #[prost(bytes = "vec", tag = "2")] + pub proof_unreceived: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub proof_height: ::core::option::Option, + #[prost(uint64, tag = "4")] + pub next_sequence_recv: u64, + #[prost(string, tag = "5")] + pub signer: ::prost::alloc::string::String, +} +/// MsgTimeoutResponse defines the Msg/Timeout response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTimeoutResponse { + #[prost(enumeration = "ResponseResultType", tag = "1")] + pub result: i32, + #[prost(message, optional, tag = "2")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgTimeoutOnClose timed-out packet upon counterparty channel closure. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTimeoutOnClose { + #[prost(message, optional, tag = "1")] + pub packet: ::core::option::Option, + #[prost(bytes = "vec", tag = "2")] + pub proof_unreceived: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "3")] + pub proof_close: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub proof_height: ::core::option::Option, + #[prost(uint64, tag = "5")] + pub next_sequence_recv: u64, + #[prost(string, tag = "6")] + pub signer: ::prost::alloc::string::String, +} +/// MsgTimeoutOnCloseResponse defines the Msg/TimeoutOnClose response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTimeoutOnCloseResponse { + #[prost(enumeration = "ResponseResultType", tag = "1")] + pub result: i32, + #[prost(message, optional, tag = "2")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgAcknowledgement receives incoming IBC acknowledgement +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgAcknowledgement { + #[prost(message, optional, tag = "1")] + pub packet: ::core::option::Option, + #[prost(bytes = "vec", tag = "2")] + pub acknowledgement: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "3")] + pub proof_acked: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "5")] + pub signer: ::prost::alloc::string::String, +} +/// MsgAcknowledgementResponse defines the Msg/Acknowledgement response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgAcknowledgementResponse { + #[prost(enumeration = "ResponseResultType", tag = "1")] + pub result: i32, + #[prost(message, optional, tag = "2")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgTransfer send packet +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTransfer { + #[prost(string, tag = "1")] + pub source_port: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub source_channel: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub token: ::core::option::Option, + #[prost(string, tag = "4")] + pub sender: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub receiver: ::prost::alloc::string::String, + #[prost(message, optional, tag = "6")] + pub timeout_height: ::core::option::Option, + #[prost(uint64, tag = "7")] + pub timeout_timestamp: u64, + #[prost(string, tag = "8")] + pub memo: ::prost::alloc::string::String, + #[prost(string, tag = "9")] + pub signer: ::prost::alloc::string::String, +} +/// MsgTransferResponse defines the Msg/Transfer response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTransferResponse { + #[prost(enumeration = "ResponseResultType", tag = "1")] + pub result: i32, + #[prost(message, optional, tag = "2")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgChannelOpenConfirm defines a msg sent by a Relayer to Chain B to +/// acknowledge the change of channel state to OPEN on Chain A. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTimeoutRefresh { + #[prost(string, tag = "1")] + pub channel_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub signer: ::prost::alloc::string::String, +} +/// MsgChannelOpenConfirmResponse defines the Msg/ChannelOpenConfirm response +/// type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgTimeoutRefreshResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// ResponseResultType defines the possible outcomes of the execution of a message +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ResponseResultType { + /// Default zero value enumeration + Unspecified = 0, + /// The message did not call the IBC application callbacks (because, for example, the packet had already been relayed) + Noop = 1, + /// The message was executed successfully + Success = 2, +} +impl ResponseResultType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "RESPONSE_RESULT_TYPE_UNSPECIFIED", + Self::Noop => "RESPONSE_RESULT_TYPE_NOOP", + Self::Success => "RESPONSE_RESULT_TYPE_SUCCESS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RESPONSE_RESULT_TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "RESPONSE_RESULT_TYPE_NOOP" => Some(Self::Noop), + "RESPONSE_RESULT_TYPE_SUCCESS" => Some(Self::Success), + _ => None, + } + } +} +/// Generated client implementations. +pub mod msg_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Msg defines the ibc/channel Msg service. + #[derive(Debug, Clone)] + pub struct MsgClient { + inner: tonic::client::Grpc, + } + impl MsgClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MsgClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MsgClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + MsgClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// ChannelOpenInit defines a rpc handler method for MsgChannelOpenInit. + pub async fn channel_open_init( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/ChannelOpenInit", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "ChannelOpenInit")); + self.inner.unary(req, path, codec).await + } + /// ChannelOpenTry defines a rpc handler method for MsgChannelOpenTry. + pub async fn channel_open_try( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/ChannelOpenTry", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "ChannelOpenTry")); + self.inner.unary(req, path, codec).await + } + /// ChannelOpenAck defines a rpc handler method for MsgChannelOpenAck. + pub async fn channel_open_ack( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/ChannelOpenAck", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "ChannelOpenAck")); + self.inner.unary(req, path, codec).await + } + /// ChannelOpenConfirm defines a rpc handler method for MsgChannelOpenConfirm. + pub async fn channel_open_confirm( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/ChannelOpenConfirm", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.channel.v1.Msg", "ChannelOpenConfirm"), + ); + self.inner.unary(req, path, codec).await + } + /// ChannelCloseInit defines a rpc handler method for MsgChannelCloseInit. + pub async fn channel_close_init( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/ChannelCloseInit", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "ChannelCloseInit")); + self.inner.unary(req, path, codec).await + } + /// ChannelCloseConfirm defines a rpc handler method for + /// MsgChannelCloseConfirm. + pub async fn channel_close_confirm( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/ChannelCloseConfirm", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.channel.v1.Msg", "ChannelCloseConfirm"), + ); + self.inner.unary(req, path, codec).await + } + /// RecvPacket defines a rpc handler method for MsgRecvPacket. + pub async fn recv_packet( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/RecvPacket", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "RecvPacket")); + self.inner.unary(req, path, codec).await + } + /// Timeout defines a rpc handler method for MsgTimeout. + pub async fn timeout( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/Timeout", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "Timeout")); + self.inner.unary(req, path, codec).await + } + /// TimeoutOnClose defines a rpc handler method for MsgTimeoutOnClose. + pub async fn timeout_on_close( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/TimeoutOnClose", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "TimeoutOnClose")); + self.inner.unary(req, path, codec).await + } + /// Acknowledgement defines a rpc handler method for MsgAcknowledgement. + pub async fn acknowledgement( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/Acknowledgement", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "Acknowledgement")); + self.inner.unary(req, path, codec).await + } + /// Transfer defines a rpc handler method for MsgTransfer. + pub async fn transfer( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/Transfer", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "Transfer")); + self.inner.unary(req, path, codec).await + } + /// TimeoutRefresh defines a rpc handler method for MsgTimeoutRefresh. + pub async fn timeout_refresh( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.channel.v1.Msg/TimeoutRefresh", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.channel.v1.Msg", "TimeoutRefresh")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/ibc.core.client.v1.rs b/crates/relayer/src/chain/cardano/generated/ibc.core.client.v1.rs new file mode 100644 index 0000000000..585084ce49 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/ibc.core.client.v1.rs @@ -0,0 +1,1000 @@ +// This file is @generated by prost-build. +/// IdentifiedClientState defines a client state with an additional client +/// identifier field. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IdentifiedClientState { + /// client identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// client state + #[prost(message, optional, tag = "2")] + pub client_state: ::core::option::Option<::prost_types::Any>, +} +/// ConsensusStateWithHeight defines a consensus state with an additional height +/// field. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConsensusStateWithHeight { + /// consensus state height + #[prost(message, optional, tag = "1")] + pub height: ::core::option::Option, + /// consensus state + #[prost(message, optional, tag = "2")] + pub consensus_state: ::core::option::Option<::prost_types::Any>, +} +/// ClientConsensusStates defines all the stored consensus states for a given +/// client. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientConsensusStates { + /// client identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// consensus states and their heights associated with the client + #[prost(message, repeated, tag = "2")] + pub consensus_states: ::prost::alloc::vec::Vec, +} +/// ClientUpdateProposal is a governance proposal. If it passes, the substitute +/// client's latest consensus state is copied over to the subject client. The proposal +/// handler may fail if the subject and the substitute do not match in client and +/// chain parameters (with exception to latest height, frozen height, and chain-id). +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientUpdateProposal { + /// the title of the update proposal + #[prost(string, tag = "1")] + pub title: ::prost::alloc::string::String, + /// the description of the proposal + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// the client identifier for the client to be updated if the proposal passes + #[prost(string, tag = "3")] + pub subject_client_id: ::prost::alloc::string::String, + /// the substitute client identifier for the client standing in for the subject + /// client + #[prost(string, tag = "4")] + pub substitute_client_id: ::prost::alloc::string::String, +} +/// UpgradeProposal is a gov Content type for initiating an IBC breaking +/// upgrade. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpgradeProposal { + #[prost(string, tag = "1")] + pub title: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub plan: ::core::option::Option< + super::super::super::super::cosmos::upgrade::v1beta1::Plan, + >, + /// An UpgradedClientState must be provided to perform an IBC breaking upgrade. + /// This will make the chain commit to the correct upgraded (self) client state + /// before the upgrade occurs, so that connecting chains can verify that the + /// new upgraded client is valid by verifying a proof on the previous version + /// of the chain. This will allow IBC connections to persist smoothly across + /// planned chain upgrades + #[prost(message, optional, tag = "4")] + pub upgraded_client_state: ::core::option::Option<::prost_types::Any>, +} +/// Height is a monotonically increasing data type +/// that can be compared against another Height for the purposes of updating and +/// freezing clients +/// +/// Normally the RevisionHeight is incremented at each height while keeping +/// RevisionNumber the same. However some consensus algorithms may choose to +/// reset the height in certain conditions e.g. hard forks, state-machine +/// breaking changes In these cases, the RevisionNumber is incremented so that +/// height continues to be monitonically increasing even as the RevisionHeight +/// gets reset +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Height { + /// the revision that the client is currently on + #[prost(uint64, tag = "1")] + pub revision_number: u64, + /// the height within the given revision + #[prost(uint64, tag = "2")] + pub revision_height: u64, +} +/// Params defines the set of IBC light client parameters. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Params { + /// allowed_clients defines the list of allowed client state types. + #[prost(string, repeated, tag = "1")] + pub allowed_clients: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryLatestHeightRequest {} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryLatestHeightResponse { + #[prost(uint64, tag = "1")] + pub height: u64, +} +/// QueryClientStateRequest is the request type for the Query/ClientState RPC +/// method +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientStateRequest { + /// client state unique identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, +} +/// QueryClientStateResponse is the response type for the Query/ClientState RPC +/// method. Besides the client state, it includes a proof and the height from +/// which the proof was retrieved. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientStateResponse { + /// client state associated with the request identifier + #[prost(message, optional, tag = "1")] + pub client_state: ::core::option::Option<::prost_types::Any>, + /// merkle proof of existence + #[prost(bytes = "vec", tag = "2")] + pub proof: ::prost::alloc::vec::Vec, + /// height at which the proof was retrieved + #[prost(message, optional, tag = "3")] + pub proof_height: ::core::option::Option, +} +/// QueryClientStatesRequest is the request type for the Query/ClientStates RPC +/// method +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientStatesRequest { + /// pagination request + #[prost(message, optional, tag = "1")] + pub pagination: ::core::option::Option< + super::super::super::super::cosmos::base::query::v1beta1::PageRequest, + >, +} +/// QueryClientStatesResponse is the response type for the Query/ClientStates RPC +/// method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientStatesResponse { + /// list of stored ClientStates of the chain. + #[prost(message, repeated, tag = "1")] + pub client_states: ::prost::alloc::vec::Vec, + /// pagination response + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::super::super::cosmos::base::query::v1beta1::PageResponse, + >, +} +/// QueryConsensusStateRequest is the request type for the Query/ConsensusState +/// RPC method. Besides the consensus state, it includes a proof and the height +/// from which the proof was retrieved. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStateRequest { + /// client identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// consensus state revision number + #[prost(uint64, tag = "2")] + pub revision_number: u64, + /// consensus state revision height + #[prost(uint64, tag = "3")] + pub revision_height: u64, + /// latest_height overrides the height fields and queries the latest stored ConsensusState + #[prost(bool, tag = "4")] + pub latest_height: bool, +} +/// QueryConsensusStateResponse is the response type for the Query/ConsensusState +/// RPC method +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStateResponse { + /// consensus state associated with the client identifier at the given height + #[prost(message, optional, tag = "1")] + pub consensus_state: ::core::option::Option<::prost_types::Any>, + /// merkle proof of existence + #[prost(bytes = "vec", tag = "2")] + pub proof: ::prost::alloc::vec::Vec, + /// height at which the proof was retrieved + #[prost(message, optional, tag = "3")] + pub proof_height: ::core::option::Option, +} +/// QueryConsensusStatesRequest is the request type for the Query/ConsensusStates +/// RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStatesRequest { + /// client identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// pagination request + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::super::super::cosmos::base::query::v1beta1::PageRequest, + >, +} +/// QueryConsensusStatesResponse is the response type for the +/// Query/ConsensusStates RPC method +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStatesResponse { + /// consensus states associated with the identifier + #[prost(message, repeated, tag = "1")] + pub consensus_states: ::prost::alloc::vec::Vec, + /// pagination response + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::super::super::cosmos::base::query::v1beta1::PageResponse, + >, +} +/// QueryConsensusStateHeightsRequest is the request type for Query/ConsensusStateHeights +/// RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStateHeightsRequest { + /// client identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// pagination request + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::super::super::cosmos::base::query::v1beta1::PageRequest, + >, +} +/// QueryConsensusStateHeightsResponse is the response type for the +/// Query/ConsensusStateHeights RPC method +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryConsensusStateHeightsResponse { + /// consensus state heights + #[prost(message, repeated, tag = "1")] + pub consensus_state_heights: ::prost::alloc::vec::Vec, + /// pagination response + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::super::super::cosmos::base::query::v1beta1::PageResponse, + >, +} +/// QueryClientStatusRequest is the request type for the Query/ClientStatus RPC +/// method +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientStatusRequest { + /// client unique identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, +} +/// QueryClientStatusResponse is the response type for the Query/ClientStatus RPC +/// method. It returns the current status of the IBC client. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientStatusResponse { + #[prost(string, tag = "1")] + pub status: ::prost::alloc::string::String, +} +/// QueryClientParamsRequest is the request type for the Query/ClientParams RPC +/// method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryClientParamsRequest {} +/// QueryClientParamsResponse is the response type for the Query/ClientParams RPC +/// method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryClientParamsResponse { + /// params defines the parameters of the module. + #[prost(message, optional, tag = "1")] + pub params: ::core::option::Option, +} +/// QueryUpgradedClientStateRequest is the request type for the +/// Query/UpgradedClientState RPC method +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryUpgradedClientStateRequest {} +/// QueryUpgradedClientStateResponse is the response type for the +/// Query/UpgradedClientState RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryUpgradedClientStateResponse { + /// client state associated with the request identifier + #[prost(message, optional, tag = "1")] + pub upgraded_client_state: ::core::option::Option<::prost_types::Any>, +} +/// QueryUpgradedConsensusStateRequest is the request type for the +/// Query/UpgradedConsensusState RPC method +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryUpgradedConsensusStateRequest {} +/// QueryUpgradedConsensusStateResponse is the response type for the +/// Query/UpgradedConsensusState RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryUpgradedConsensusStateResponse { + /// Consensus state associated with the request identifier + #[prost(message, optional, tag = "1")] + pub upgraded_consensus_state: ::core::option::Option<::prost_types::Any>, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryNewClientRequest { + /// Block number to query + #[prost(uint64, tag = "1")] + pub height: u64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryNewClientResponse { + /// client state associated with the request identifier + #[prost(message, optional, tag = "1")] + pub client_state: ::core::option::Option<::prost_types::Any>, + /// consensus state associated with the request identifier + #[prost(message, optional, tag = "2")] + pub consensus_state: ::core::option::Option<::prost_types::Any>, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryBlockDataRequest { + /// Block number to query + #[prost(uint64, tag = "1")] + pub height: u64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryBlockDataResponse { + /// block data associated with the request identifier + #[prost(message, optional, tag = "1")] + pub block_data: ::core::option::Option<::prost_types::Any>, +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query provides defines the gRPC querier service + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// ClientState queries an IBC light client. + pub async fn client_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ClientState", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "ClientState")); + self.inner.unary(req, path, codec).await + } + /// ClientStates queries all the IBC light clients of a chain. + pub async fn client_states( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ClientStates", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "ClientStates")); + self.inner.unary(req, path, codec).await + } + /// ConsensusState queries a consensus state associated with a client state at + /// a given height. + pub async fn consensus_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ConsensusState", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "ConsensusState")); + self.inner.unary(req, path, codec).await + } + /// ConsensusStates queries all the consensus state associated with a given + /// client. + pub async fn consensus_states( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ConsensusStates", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "ConsensusStates")); + self.inner.unary(req, path, codec).await + } + /// ConsensusStateHeights queries the height of every consensus states associated with a given client. + pub async fn consensus_state_heights( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ConsensusStateHeights", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.client.v1.Query", "ConsensusStateHeights"), + ); + self.inner.unary(req, path, codec).await + } + /// Status queries the status of an IBC client. + pub async fn client_status( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ClientStatus", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "ClientStatus")); + self.inner.unary(req, path, codec).await + } + /// ClientParams queries all parameters of the ibc client submodule. + pub async fn client_params( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/ClientParams", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "ClientParams")); + self.inner.unary(req, path, codec).await + } + /// UpgradedClientState queries an Upgraded IBC light client. + pub async fn upgraded_client_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/UpgradedClientState", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.client.v1.Query", "UpgradedClientState"), + ); + self.inner.unary(req, path, codec).await + } + /// UpgradedConsensusState queries an Upgraded IBC consensus state. + pub async fn upgraded_consensus_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/UpgradedConsensusState", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.client.v1.Query", "UpgradedConsensusState"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn latest_height( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/LatestHeight", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "LatestHeight")); + self.inner.unary(req, path, codec).await + } + pub async fn new_client( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/NewClient", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "NewClient")); + self.inner.unary(req, path, codec).await + } + pub async fn block_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Query/BlockData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Query", "BlockData")); + self.inner.unary(req, path, codec).await + } + } +} +/// MsgCreateClient defines a message to create an IBC client +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgCreateClient { + /// light client state + #[prost(message, optional, tag = "1")] + pub client_state: ::core::option::Option<::prost_types::Any>, + /// consensus state associated with the client that corresponds to a given + /// height. + #[prost(message, optional, tag = "2")] + pub consensus_state: ::core::option::Option<::prost_types::Any>, + /// signer address + #[prost(string, tag = "3")] + pub signer: ::prost::alloc::string::String, +} +/// MsgCreateClientResponse defines the Msg/CreateClient response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgCreateClientResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, + #[prost(string, tag = "2")] + pub client_id: ::prost::alloc::string::String, +} +/// MsgUpdateClient defines an sdk.Msg to update a IBC client state using +/// the given client message. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgUpdateClient { + /// client unique identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// client message to update the light client + #[prost(message, optional, tag = "2")] + pub client_message: ::core::option::Option<::prost_types::Any>, + /// signer address + #[prost(string, tag = "3")] + pub signer: ::prost::alloc::string::String, +} +/// MsgUpdateClientResponse defines the Msg/UpdateClient response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgUpdateClientResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgUpgradeClient defines an sdk.Msg to upgrade an IBC client to a new client +/// state +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgUpgradeClient { + /// client unique identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// upgraded client state + #[prost(message, optional, tag = "2")] + pub client_state: ::core::option::Option<::prost_types::Any>, + /// upgraded consensus state, only contains enough information to serve as a + /// basis of trust in update logic + #[prost(message, optional, tag = "3")] + pub consensus_state: ::core::option::Option<::prost_types::Any>, + /// proof that old chain committed to new client + #[prost(bytes = "vec", tag = "4")] + pub proof_upgrade_client: ::prost::alloc::vec::Vec, + /// proof that old chain committed to new consensus state + #[prost(bytes = "vec", tag = "5")] + pub proof_upgrade_consensus_state: ::prost::alloc::vec::Vec, + /// signer address + #[prost(string, tag = "6")] + pub signer: ::prost::alloc::string::String, +} +/// MsgUpgradeClientResponse defines the Msg/UpgradeClient response type. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct MsgUpgradeClientResponse {} +/// MsgSubmitMisbehaviour defines an sdk.Msg type that submits Evidence for +/// light client misbehaviour. +/// Warning: DEPRECATED +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgSubmitMisbehaviour { + /// client unique identifier + #[deprecated] + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// misbehaviour used for freezing the light client + #[deprecated] + #[prost(message, optional, tag = "2")] + pub misbehaviour: ::core::option::Option<::prost_types::Any>, + /// signer address + #[deprecated] + #[prost(string, tag = "3")] + pub signer: ::prost::alloc::string::String, +} +/// MsgSubmitMisbehaviourResponse defines the Msg/SubmitMisbehaviour response +/// type. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct MsgSubmitMisbehaviourResponse {} +/// Generated client implementations. +pub mod msg_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Msg defines the ibc/client Msg service. + #[derive(Debug, Clone)] + pub struct MsgClient { + inner: tonic::client::Grpc, + } + impl MsgClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MsgClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MsgClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + MsgClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// CreateClient defines a rpc handler method for MsgCreateClient. + pub async fn create_client( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Msg/CreateClient", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Msg", "CreateClient")); + self.inner.unary(req, path, codec).await + } + /// UpdateClient defines a rpc handler method for MsgUpdateClient. + pub async fn update_client( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Msg/UpdateClient", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Msg", "UpdateClient")); + self.inner.unary(req, path, codec).await + } + /// UpgradeClient defines a rpc handler method for MsgUpgradeClient. + pub async fn upgrade_client( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Msg/UpgradeClient", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Msg", "UpgradeClient")); + self.inner.unary(req, path, codec).await + } + /// SubmitMisbehaviour defines a rpc handler method for MsgSubmitMisbehaviour. + pub async fn submit_misbehaviour( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.client.v1.Msg/SubmitMisbehaviour", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.client.v1.Msg", "SubmitMisbehaviour")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/ibc.core.commitment.v1.rs b/crates/relayer/src/chain/cardano/generated/ibc.core.commitment.v1.rs new file mode 100644 index 0000000000..a430513750 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/ibc.core.commitment.v1.rs @@ -0,0 +1,36 @@ +// This file is @generated by prost-build. +/// MerkleRoot defines a merkle root hash. +/// In the Cosmos SDK, the AppHash of a block header becomes the root. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerkleRoot { + #[prost(bytes = "vec", tag = "1")] + pub hash: ::prost::alloc::vec::Vec, +} +/// MerklePrefix is merkle path prefixed to the key. +/// The constructed key from the Path and the key will be append(Path.KeyPath, +/// append(Path.KeyPrefix, key...)) +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerklePrefix { + #[prost(bytes = "vec", tag = "1")] + pub key_prefix: ::prost::alloc::vec::Vec, +} +/// MerklePath is the path used to verify commitment proofs, which can be an +/// arbitrary structured object (defined by a commitment type). +/// MerklePath is represented from root-to-leaf +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerklePath { + #[prost(string, repeated, tag = "1")] + pub key_path: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// MerkleProof is a wrapper type over a chain of CommitmentProofs. +/// It demonstrates membership or non-membership for an element or set of +/// elements, verifiable in conjunction with a known commitment root. Proofs +/// should be succinct. +/// MerkleProofs are ordered from leaf-to-root +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerkleProof { + #[prost(message, repeated, tag = "1")] + pub proofs: ::prost::alloc::vec::Vec< + super::super::super::super::cosmos::ics23::v1::CommitmentProof, + >, +} diff --git a/crates/relayer/src/chain/cardano/generated/ibc.core.connection.v1.rs b/crates/relayer/src/chain/cardano/generated/ibc.core.connection.v1.rs new file mode 100644 index 0000000000..4f10bc7e16 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/ibc.core.connection.v1.rs @@ -0,0 +1,472 @@ +// This file is @generated by prost-build. +/// ConnectionEnd defines a stateful object on a chain connected to another +/// separate one. +/// NOTE: there must only be 2 defined ConnectionEnds to establish +/// a connection between two chains. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConnectionEnd { + /// client associated with this connection. + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// IBC version which can be utilised to determine encodings or protocols for + /// channels or packets utilising this connection. + #[prost(message, repeated, tag = "2")] + pub versions: ::prost::alloc::vec::Vec, + /// current state of the connection end. + #[prost(enumeration = "State", tag = "3")] + pub state: i32, + /// counterparty chain associated with this connection. + #[prost(message, optional, tag = "4")] + pub counterparty: ::core::option::Option, + /// delay period that must pass before a consensus state can be used for + /// packet-verification NOTE: delay period logic is only implemented by some + /// clients. + #[prost(uint64, tag = "5")] + pub delay_period: u64, +} +/// IdentifiedConnection defines a connection with additional connection +/// identifier field. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IdentifiedConnection { + /// connection identifier. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// client associated with this connection. + #[prost(string, tag = "2")] + pub client_id: ::prost::alloc::string::String, + /// IBC version which can be utilised to determine encodings or protocols for + /// channels or packets utilising this connection + #[prost(message, repeated, tag = "3")] + pub versions: ::prost::alloc::vec::Vec, + /// current state of the connection end. + #[prost(enumeration = "State", tag = "4")] + pub state: i32, + /// counterparty chain associated with this connection. + #[prost(message, optional, tag = "5")] + pub counterparty: ::core::option::Option, + /// delay period associated with this connection. + #[prost(uint64, tag = "6")] + pub delay_period: u64, +} +/// Counterparty defines the counterparty chain associated with a connection end. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Counterparty { + /// identifies the client on the counterparty chain associated with a given + /// connection. + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// identifies the connection end on the counterparty chain associated with a + /// given connection. + #[prost(string, tag = "2")] + pub connection_id: ::prost::alloc::string::String, + /// commitment merkle prefix of the counterparty chain. + #[prost(message, optional, tag = "3")] + pub prefix: ::core::option::Option, +} +/// ClientPaths define all the connection paths for a client state. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientPaths { + /// list of connection paths + #[prost(string, repeated, tag = "1")] + pub paths: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// ConnectionPaths define all the connection paths for a given client state. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConnectionPaths { + /// client state unique identifier + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// list of connection paths + #[prost(string, repeated, tag = "2")] + pub paths: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Version defines the versioning scheme used to negotiate the IBC verison in +/// the connection handshake. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Version { + /// unique version identifier + #[prost(string, tag = "1")] + pub identifier: ::prost::alloc::string::String, + /// list of features compatible with the specified identifier + #[prost(string, repeated, tag = "2")] + pub features: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Params defines the set of Connection parameters. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct Params { + /// maximum expected time per block (in nanoseconds), used to enforce block delay. This parameter should reflect the + /// largest amount of time that the chain might reasonably take to produce the next block under normal operating + /// conditions. A safe choice is 3-5x the expected time per block. + #[prost(uint64, tag = "1")] + pub max_expected_time_per_block: u64, +} +/// State defines if a connection is in one of the following states: +/// INIT, TRYOPEN, OPEN or UNINITIALIZED. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum State { + /// Default State + UninitializedUnspecified = 0, + /// A connection end has just started the opening handshake. + Init = 1, + /// A connection end has acknowledged the handshake step on the counterparty + /// chain. + Tryopen = 2, + /// A connection end has completed the handshake. + Open = 3, +} +impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::UninitializedUnspecified => "STATE_UNINITIALIZED_UNSPECIFIED", + Self::Init => "STATE_INIT", + Self::Tryopen => "STATE_TRYOPEN", + Self::Open => "STATE_OPEN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATE_UNINITIALIZED_UNSPECIFIED" => Some(Self::UninitializedUnspecified), + "STATE_INIT" => Some(Self::Init), + "STATE_TRYOPEN" => Some(Self::Tryopen), + "STATE_OPEN" => Some(Self::Open), + _ => None, + } + } +} +/// MsgConnectionOpenInit defines the msg sent by an account on Chain A to +/// initialize a connection with Chain B. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenInit { + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub counterparty: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub version: ::core::option::Option, + #[prost(uint64, tag = "4")] + pub delay_period: u64, + #[prost(string, tag = "5")] + pub signer: ::prost::alloc::string::String, +} +/// MsgConnectionOpenInitResponse defines the Msg/ConnectionOpenInit response +/// type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenInitResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgConnectionOpenTry defines a msg sent by a Relayer to try to open a +/// connection on Chain B. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenTry { + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// Deprecated: this field is unused. Crossing hellos are no longer supported in core IBC. + #[deprecated] + #[prost(string, tag = "2")] + pub previous_connection_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub client_state: ::core::option::Option<::prost_types::Any>, + #[prost(message, optional, tag = "4")] + pub counterparty: ::core::option::Option, + #[prost(uint64, tag = "5")] + pub delay_period: u64, + #[prost(message, repeated, tag = "6")] + pub counterparty_versions: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "7")] + pub proof_height: ::core::option::Option, + /// proof of the initialization the connection on Chain A: `UNITIALIZED -> + /// INIT` + #[prost(bytes = "vec", tag = "8")] + pub proof_init: ::prost::alloc::vec::Vec, + /// proof of client state included in message + #[prost(bytes = "vec", tag = "9")] + pub proof_client: ::prost::alloc::vec::Vec, + /// proof of client consensus state + #[prost(bytes = "vec", tag = "10")] + pub proof_consensus: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "11")] + pub consensus_height: ::core::option::Option, + #[prost(string, tag = "12")] + pub signer: ::prost::alloc::string::String, + /// optional proof data for host state machines that are unable to introspect their own consensus state + #[prost(bytes = "vec", tag = "13")] + pub host_consensus_state_proof: ::prost::alloc::vec::Vec, +} +/// MsgConnectionOpenTryResponse defines the Msg/ConnectionOpenTry response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenTryResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgConnectionOpenAck defines a msg sent by a Relayer to Chain A to +/// acknowledge the change of connection state to TRYOPEN on Chain B. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenAck { + #[prost(string, tag = "1")] + pub connection_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub counterparty_connection_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub version: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub client_state: ::core::option::Option<::prost_types::Any>, + #[prost(message, optional, tag = "5")] + pub proof_height: ::core::option::Option, + /// proof of the initialization the connection on Chain B: `UNITIALIZED -> + /// TRYOPEN` + #[prost(bytes = "vec", tag = "6")] + pub proof_try: ::prost::alloc::vec::Vec, + /// proof of client state included in message + #[prost(bytes = "vec", tag = "7")] + pub proof_client: ::prost::alloc::vec::Vec, + /// proof of client consensus state + #[prost(bytes = "vec", tag = "8")] + pub proof_consensus: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "9")] + pub consensus_height: ::core::option::Option, + #[prost(string, tag = "10")] + pub signer: ::prost::alloc::string::String, + /// optional proof data for host state machines that are unable to introspect their own consensus state + #[prost(bytes = "vec", tag = "11")] + pub host_consensus_state_proof: ::prost::alloc::vec::Vec, +} +/// MsgConnectionOpenAckResponse defines the Msg/ConnectionOpenAck response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenAckResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// MsgConnectionOpenConfirm defines a msg sent by a Relayer to Chain B to +/// acknowledge the change of connection state to OPEN on Chain A. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenConfirm { + #[prost(string, tag = "1")] + pub connection_id: ::prost::alloc::string::String, + /// proof for the change of the connection state on Chain A: `INIT -> OPEN` + #[prost(bytes = "vec", tag = "2")] + pub proof_ack: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub proof_height: ::core::option::Option, + #[prost(string, tag = "4")] + pub signer: ::prost::alloc::string::String, +} +/// MsgConnectionOpenConfirmResponse defines the Msg/ConnectionOpenConfirm +/// response type. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgConnectionOpenConfirmResponse { + #[prost(message, optional, tag = "1")] + pub unsigned_tx: ::core::option::Option<::prost_types::Any>, +} +/// Generated client implementations. +pub mod msg_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Msg defines the ibc/connection Msg service. + #[derive(Debug, Clone)] + pub struct MsgClient { + inner: tonic::client::Grpc, + } + impl MsgClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MsgClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MsgClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + MsgClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// ConnectionOpenInit defines a rpc handler method for MsgConnectionOpenInit. + pub async fn connection_open_init( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.connection.v1.Msg/ConnectionOpenInit", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.connection.v1.Msg", "ConnectionOpenInit"), + ); + self.inner.unary(req, path, codec).await + } + /// ConnectionOpenTry defines a rpc handler method for MsgConnectionOpenTry. + pub async fn connection_open_try( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.connection.v1.Msg/ConnectionOpenTry", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.connection.v1.Msg", "ConnectionOpenTry"), + ); + self.inner.unary(req, path, codec).await + } + /// ConnectionOpenAck defines a rpc handler method for MsgConnectionOpenAck. + pub async fn connection_open_ack( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.connection.v1.Msg/ConnectionOpenAck", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("ibc.core.connection.v1.Msg", "ConnectionOpenAck"), + ); + self.inner.unary(req, path, codec).await + } + /// ConnectionOpenConfirm defines a rpc handler method for + /// MsgConnectionOpenConfirm. + pub async fn connection_open_confirm( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.connection.v1.Msg/ConnectionOpenConfirm", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "ibc.core.connection.v1.Msg", + "ConnectionOpenConfirm", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/ibc.core.types.v1.rs b/crates/relayer/src/chain/cardano/generated/ibc.core.types.v1.rs new file mode 100644 index 0000000000..7b81ac8d86 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/ibc.core.types.v1.rs @@ -0,0 +1,302 @@ +// This file is @generated by prost-build. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventAttribute { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, + #[prost(bool, tag = "3")] + pub index: bool, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub event_attribute: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseDeliverTx { + #[prost(uint32, tag = "1")] + pub code: u32, + #[prost(message, repeated, tag = "2")] + pub events: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResultBlockResults { + /// height at which the proof was retrieved + #[prost(message, optional, tag = "1")] + pub height: ::core::option::Option, + /// txs result in blocks + #[prost(message, repeated, tag = "2")] + pub txs_results: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct BlockInfo { + #[prost(int64, tag = "1")] + pub height: i64, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ResultBlockSearch { + #[prost(uint64, tag = "1")] + pub block_id: u64, + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} +/// QueryBlockResultsRequest is the request type for the Query/BlockResults RPC method. +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryBlockResultsRequest { + #[prost(uint64, tag = "1")] + pub height: u64, +} +/// QueryBlockResultsResponse is the response type for the Query/BlockResults RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryBlockResultsResponse { + /// params defines the parameters of the module. + #[prost(message, optional, tag = "1")] + pub block_results: ::core::option::Option, +} +/// QueryBlockSearchRequest is the request type for the Query/BlockSearch RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryBlockSearchRequest { + #[prost(string, tag = "1")] + pub packet_src_channel: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub packet_dst_channel: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub packet_sequence: ::prost::alloc::string::String, + #[prost(uint64, tag = "4")] + pub limit: u64, + #[prost(uint64, tag = "5")] + pub page: u64, +} +/// QueryBlockSearchResponse is the response type for the Query/BlockSearch RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryBlockSearchResponse { + /// params defines the parameters of the module. + #[prost(message, repeated, tag = "1")] + pub blocks: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "2")] + pub total_count: u64, +} +/// QueryTransactionByHashRequest is the response type for the Query/BlockSearch RPC method. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTransactionByHashRequest { + /// Transaction hash in hex format + #[prost(string, tag = "1")] + pub hash: ::prost::alloc::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryTransactionByHashResponse { + /// Whether the transaction existed on the blockchain + #[prost(string, tag = "1")] + pub hash: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub height: u64, + #[prost(uint64, tag = "3")] + pub gas_fee: u64, + #[prost(uint64, tag = "4")] + pub tx_size: u64, + #[prost(message, repeated, tag = "5")] + pub events: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct QueryIbcHeaderRequest { + #[prost(uint64, tag = "2")] + pub height: u64, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryIbcHeaderResponse { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option<::prost_types::Any>, +} +/// Generated client implementations. +pub mod query_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Query provides defines the gRPC querier service + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn block_results( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.types.v1.Query/BlockResults", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.types.v1.Query", "BlockResults")); + self.inner.unary(req, path, codec).await + } + pub async fn block_search( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.types.v1.Query/BlockSearch", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.types.v1.Query", "BlockSearch")); + self.inner.unary(req, path, codec).await + } + pub async fn transaction_by_hash( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.types.v1.Query/TransactionByHash", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.types.v1.Query", "TransactionByHash")); + self.inner.unary(req, path, codec).await + } + pub async fn ibc_header( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/ibc.core.types.v1.Query/IBCHeader", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("ibc.core.types.v1.Query", "IBCHeader")); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/crates/relayer/src/chain/cardano/generated/mod.rs b/crates/relayer/src/chain/cardano/generated/mod.rs new file mode 100644 index 0000000000..74a2d7d5c2 --- /dev/null +++ b/crates/relayer/src/chain/cardano/generated/mod.rs @@ -0,0 +1,78 @@ +//! Generated protobuf code for Cardano-specific gRPC services +//! +//! These files are checked in and should not be edited by hand. + +// Allow clippy warnings for generated code +#![allow(clippy::all)] +#![allow(warnings)] + +// Cosmos dependencies +pub mod cosmos_proto { + include!("cosmos_proto.rs"); +} + +pub mod cosmos { + pub mod base { + pub mod query { + pub mod v1beta1 { + include!("cosmos.base.query.v1beta1.rs"); + } + } + } + pub mod ics23 { + pub mod v1 { + include!("cosmos.ics23.v1.rs"); + } + } + pub mod upgrade { + pub mod v1beta1 { + include!("cosmos.upgrade.v1beta1.rs"); + } + } +} + +// The `google.api` proto includes documentation snippets that are not valid Rust code. +// Exclude it from doctest builds to keep `cargo test` (which runs doctests by default) +// working without disabling doctests for the whole crate. +#[cfg(not(doctest))] +pub mod google { + pub mod api { + include!("google.api.rs"); + } +} + +// IBC core modules +pub mod ibc { + pub mod cardano { + pub mod v1 { + include!("ibc.cardano.v1.rs"); + } + } + pub mod core { + pub mod client { + pub mod v1 { + include!("ibc.core.client.v1.rs"); + } + } + pub mod connection { + pub mod v1 { + include!("ibc.core.connection.v1.rs"); + } + } + pub mod channel { + pub mod v1 { + include!("ibc.core.channel.v1.rs"); + } + } + pub mod commitment { + pub mod v1 { + include!("ibc.core.commitment.v1.rs"); + } + } + pub mod types { + pub mod v1 { + include!("ibc.core.types.v1.rs"); + } + } + } +} diff --git a/crates/relayer/src/chain/cardano/keyring.rs b/crates/relayer/src/chain/cardano/keyring.rs new file mode 100644 index 0000000000..532e6ac3c3 --- /dev/null +++ b/crates/relayer/src/chain/cardano/keyring.rs @@ -0,0 +1,179 @@ +//! Cardano keyring implementation with CIP-1852 derivation + +use super::error::Error; +use blake2::digest::{Update, VariableOutput}; +use blake2::Blake2bVar; +use ed25519_dalek::{Signature, Signer, SigningKey, VerifyingKey}; +use slip10::BIP32Path; +use std::str::FromStr; + +/// Cardano keyring for signing transactions +#[derive(Clone, Debug)] +pub struct CardanoKeyring { + signing_key: SigningKey, + verifying_key: VerifyingKey, +} + +impl CardanoKeyring { + /// Create a keyring from a bech32-encoded private key (ed25519_sk...) + pub fn from_bech32_key(bech32_key: &str) -> Result { + use bech32::FromBase32; + + // Decode bech32 key + let (hrp, data, _variant) = bech32::decode(bech32_key) + .map_err(|e| Error::Keyring(format!("Invalid bech32 key: {:?}", e)))?; + + if hrp != "ed25519_sk" { + return Err(Error::Keyring(format!( + "Expected ed25519_sk prefix, got: {}", + hrp + ))); + } + + // Convert from base32 (u5) to bytes + let bytes = Vec::::from_base32(&data) + .map_err(|e| Error::Keyring(format!("Failed to decode base32: {:?}", e)))?; + + // Data should be 32 bytes for Ed25519 private key + if bytes.len() != 32 { + return Err(Error::Keyring(format!( + "Invalid key length: expected 32, got {}", + bytes.len() + ))); + } + + let mut key_bytes = [0u8; 32]; + key_bytes.copy_from_slice(&bytes); + + let signing_key = SigningKey::from_bytes(&key_bytes); + let verifying_key = signing_key.verifying_key(); + + Ok(Self { + signing_key, + verifying_key, + }) + } + + /// Create a new keyring from a mnemonic phrase + /// Uses CIP-1852 derivation: m/1852'/1815'/account'/2'/0' + pub fn from_mnemonic(mnemonic: &str, account: u32) -> Result { + // Parse mnemonic using tiny-bip39 crate (hyphenated crate name, underscore in code) + let mnemonic = bip39::Mnemonic::from_phrase(mnemonic, bip39::Language::English) + .map_err(|e| Error::Keyring(format!("Invalid mnemonic: {:?}", e)))?; + + // Generate seed + let seed = bip39::Seed::new(&mnemonic, ""); + let seed_bytes = seed.as_bytes(); + + // CIP-1852 path: m/1852'/1815'/account'/2'/0' + // 1852' = purpose (CIP-1852), 1815' = coin type (Cardano), 2' = payment key role + let path = BIP32Path::from_str(&format!("m/1852'/1815'/{}'/2'/0'", account)) + .map_err(|e| Error::Keyring(format!("Invalid derivation path: {:?}", e)))?; + + // Derive key using SLIP-0010 Ed25519 + let derived_key = slip10::derive_key_from_path(seed_bytes, slip10::Curve::Ed25519, &path) + .map_err(|e| Error::Keyring(format!("Key derivation failed: {:?}", e)))?; + + // Create Ed25519 signing key + let signing_key = SigningKey::from_bytes(&derived_key.key); + let verifying_key = signing_key.verifying_key(); + + Ok(Self { + signing_key, + verifying_key, + }) + } + + /// Get the public key (verifying key) + pub fn verifying_key(&self) -> &VerifyingKey { + &self.verifying_key + } + + /// Sign a message + pub fn sign(&self, message: &[u8]) -> Signature { + self.signing_key.sign(message) + } + + /// Get the Cardano payment address (enterprise address for simplicity) + /// Enterprise address = (0x60 | network_id) | Blake2b-224(verifying_key) + pub fn address(&self, network_id: u8) -> String { + let vkey_bytes = self.verifying_key.as_bytes(); + + // Hash the public key with Blake2b-224 (28 bytes) + let mut hasher = Blake2bVar::new(28).expect("Blake2b-224 initialization must succeed"); + hasher.update(vkey_bytes); + let mut payment_hash = [0u8; 28]; + hasher + .finalize_variable(&mut payment_hash) + .expect("Blake2b-224 finalize must succeed"); + + // Construct enterprise address: header | payment_hash + // + // Address header encoding: + // - High nibble = address type (enterprise keyhash = 0b0110 = 6) + // - Low nibble = network id (testnet = 0, mainnet = 1) + let header = 0x60 | (network_id & 0x0f); + + let mut address_bytes = vec![header]; + address_bytes.extend_from_slice(&payment_hash); + + // Encode as hex + hex::encode(address_bytes) + } + + /// Create a test keyring with deterministic keys + pub fn new_for_testing() -> Result { + // Standard test mnemonic (DO NOT USE IN PRODUCTION) + let mnemonic = "test walk nut penalty hip pave soap entry language right filter choice"; + Self::from_mnemonic(mnemonic, 0) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_keyring_derivation() { + let mnemonic = "test walk nut penalty hip pave soap entry language right filter choice"; + let keyring = CardanoKeyring::from_mnemonic(mnemonic, 0).unwrap(); + + // Should generate consistent keys + let address = keyring.address(0); + assert!(!address.is_empty()); + assert!(address.starts_with("60")); // Enterprise testnet address + } + + #[test] + fn test_signing() { + let keyring = CardanoKeyring::new_for_testing().unwrap(); + let message = b"test message"; + + let signature = keyring.sign(message); + + // Verify the signature + use ed25519_dalek::Verifier; + assert!(keyring.verifying_key.verify(message, &signature).is_ok()); + } + + #[test] + fn test_different_accounts() { + let mnemonic = "test walk nut penalty hip pave soap entry language right filter choice"; + let keyring1 = CardanoKeyring::from_mnemonic(mnemonic, 0).unwrap(); + let keyring2 = CardanoKeyring::from_mnemonic(mnemonic, 1).unwrap(); + + // Different accounts should produce different addresses + assert_ne!(keyring1.address(0), keyring2.address(0)); + } + + #[test] + fn test_from_bech32_key() { + let key = "ed25519_sk1rvgjxs8sddhl46uqtv862s53vu4jf6lnk63rcn7f0qwzyq85wnlqgrsx42"; + let result = CardanoKeyring::from_bech32_key(key); + assert!( + result.is_ok(), + "Failed to load from bech32 key: {:?}", + result.err() + ); + } +} diff --git a/crates/relayer/src/chain/cardano/mod.rs b/crates/relayer/src/chain/cardano/mod.rs new file mode 100644 index 0000000000..88a4da6c98 --- /dev/null +++ b/crates/relayer/src/chain/cardano/mod.rs @@ -0,0 +1,27 @@ +//! Cardano chain implementation for Hermes IBC relayer +//! +//! This module provides complete Cardano integration following the same pattern +//! as Cosmos and Penumbra implementations in Hermes. + +pub mod chain_handle; +pub mod config; +pub mod endpoint; +pub mod error; +pub mod event_parser; +pub mod event_source; +pub mod gateway_client; +pub mod generated; +pub mod keyring; +pub mod signer; +pub mod signing_key_pair; + +// Re-export key types for convenience +pub use config::CardanoConfig; +pub use endpoint::CardanoChainEndpoint; +pub use error::Error as CardanoError; +pub use gateway_client::GatewayClient; +pub use keyring::CardanoKeyring; +pub use signing_key_pair::CardanoSigningKeyPair; + +// Type alias matching Cosmos/Penumbra pattern +pub type CardanoChain = CardanoChainEndpoint; diff --git a/crates/relayer/src/chain/cardano/signer.rs b/crates/relayer/src/chain/cardano/signer.rs new file mode 100644 index 0000000000..ba41ec518d --- /dev/null +++ b/crates/relayer/src/chain/cardano/signer.rs @@ -0,0 +1,384 @@ +//! Cardano transaction signing using Pallas + +use super::error::Error; +use super::keyring::CardanoKeyring; +use blake2::Digest; +use pallas_codec::minicbor; +use pallas_primitives::conway::{MintedTx, VKeyWitness}; + +/// Sign a Cardano transaction +pub fn sign_transaction( + unsigned_tx_cbor: &[u8], + keyring: &CardanoKeyring, +) -> Result, Error> { + // 1. Parse the unsigned transaction + let tx: MintedTx<'_> = minicbor::decode(unsigned_tx_cbor) + .map_err(|e| Error::CborDecode(format!("Failed to decode transaction: {:?}", e)))?; + + // 2. Extract and hash the transaction body + // Use the original raw bytes preserved by KeepRaw, not re-encoded bytes + let tx_body_cbor = tx.transaction_body.raw_cbor(); + + // Cardano uses Blake2b-256 for transaction hashing + use blake2::digest::consts::U32; + use blake2::Blake2b; + let mut hasher = Blake2b::::new(); + hasher.update(tx_body_cbor); + let tx_hash = hasher.finalize(); + + // 3. Sign the transaction hash + let signature = keyring.sign(tx_hash.as_slice()); + + // 4. Create VKeyWitness + let vkey = keyring.verifying_key().as_bytes().to_vec(); + let sig = signature.to_bytes().to_vec(); + + let vkey_witness = VKeyWitness { + vkey: vkey.into(), + signature: sig.into(), + }; + + // 5. Reconstruct the transaction with the new witness + // We need to work around Pallas's KeepRaw immutability by manually building CBOR + + // Get existing witnesses + let mut new_vkeywitnesses: Vec = tx + .transaction_witness_set + .vkeywitness + .clone() + .map(|set| set.to_vec()) + .unwrap_or_default(); + new_vkeywitnesses.push(vkey_witness); + + // Encode the new witness set manually + let mut witness_set_cbor = Vec::new(); + { + let mut encoder = minicbor::Encoder::new(&mut witness_set_cbor); + + // Count how many witness set fields we have + let ws = &tx.transaction_witness_set; + let mut map_size = 1u64; // Always have vkeywitness + if ws.native_script.is_some() { + map_size += 1; + } + if ws.bootstrap_witness.is_some() { + map_size += 1; + } + if ws.plutus_v1_script.is_some() { + map_size += 1; + } + if ws.plutus_data.is_some() { + map_size += 1; + } + if ws.redeemer.is_some() { + map_size += 1; + } + if ws.plutus_v2_script.is_some() { + map_size += 1; + } + if ws.plutus_v3_script.is_some() { + map_size += 1; + } + + // Witness set is a CBOR map + encoder + .map(map_size) + .map_err(|e| Error::Signer(format!("Failed to encode witness map: {:?}", e)))?; + + // Key 0: vkeywitness array + encoder + .u8(0) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .array(new_vkeywitnesses.len() as u64) + .map_err(|e| Error::Signer(format!("Failed to encode array: {:?}", e)))?; + for witness in &new_vkeywitnesses { + encoder + .encode(witness) + .map_err(|e| Error::Signer(format!("Failed to encode witness: {:?}", e)))?; + } + + // Copy other witness set fields if present + if let Some(ref native_scripts) = ws.native_script { + encoder + .u8(1) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(native_scripts) + .map_err(|e| Error::Signer(format!("Failed to encode native scripts: {:?}", e)))?; + } + + if let Some(ref bootstrap) = ws.bootstrap_witness { + encoder + .u8(2) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(bootstrap) + .map_err(|e| Error::Signer(format!("Failed to encode bootstrap: {:?}", e)))?; + } + + if let Some(ref plutus_v1) = ws.plutus_v1_script { + encoder + .u8(3) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(plutus_v1) + .map_err(|e| Error::Signer(format!("Failed to encode plutus v1: {:?}", e)))?; + } + + if let Some(ref plutus_data) = ws.plutus_data { + encoder + .u8(4) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(plutus_data) + .map_err(|e| Error::Signer(format!("Failed to encode plutus data: {:?}", e)))?; + } + + if let Some(ref redeemers) = ws.redeemer { + encoder + .u8(5) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(redeemers) + .map_err(|e| Error::Signer(format!("Failed to encode redeemers: {:?}", e)))?; + } + + if let Some(ref plutus_v2) = ws.plutus_v2_script { + encoder + .u8(6) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(plutus_v2) + .map_err(|e| Error::Signer(format!("Failed to encode plutus v2: {:?}", e)))?; + } + + if let Some(ref plutus_v3) = ws.plutus_v3_script { + encoder + .u8(7) + .map_err(|e| Error::Signer(format!("Failed to encode key: {:?}", e)))?; + encoder + .encode(plutus_v3) + .map_err(|e| Error::Signer(format!("Failed to encode plutus v3: {:?}", e)))?; + } + } + + // Build the final signed transaction CBOR + // Conway transaction is an array: [transaction_body, transaction_witness_set, is_valid, auxiliary_data] + // where auxiliary_data can be null + let mut signed_tx_cbor = Vec::new(); + { + let mut encoder = minicbor::Encoder::new(&mut signed_tx_cbor); + + // Conway transactions always have 4 elements + encoder + .array(4) + .map_err(|e| Error::Signer(format!("Failed to encode tx array: {:?}", e)))?; + + // Encode transaction body + encoder + .encode(&tx.transaction_body) + .map_err(|e| Error::Signer(format!("Failed to encode tx body: {:?}", e)))?; + + // Write the witness set CBOR directly (not as a byte string wrapper) + use std::io::Write; + encoder + .writer_mut() + .write_all(&witness_set_cbor) + .map_err(|e| Error::Signer(format!("Failed to write witness set: {:?}", e)))?; + + // Encode isValid flag + encoder + .bool(tx.success) + .map_err(|e| Error::Signer(format!("Failed to encode success: {:?}", e)))?; + + // Encode auxiliary data (using Nullable encoding) + encoder + .encode(&tx.auxiliary_data) + .map_err(|e| Error::Signer(format!("Failed to encode aux data: {:?}", e)))?; + } + + Ok(signed_tx_cbor) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sign_transaction_adds_vkey_witness_and_signature_verifies() { + fn unsigned_tx_fixture(existing_vkey_witnesses: usize) -> Vec { + let mut out = Vec::new(); + let mut enc = minicbor::Encoder::new(&mut out); + + // Conway transaction: [transaction_body, transaction_witness_set, is_valid, auxiliary_data] + enc.array(4).unwrap(); + + // transaction_body is a CBOR map with numeric keys. We include only the minimum set of + // fields required for decoding: inputs (0), outputs (1), fee (2). + enc.map(3).unwrap(); + + // inputs: Set (we omit the optional tag and encode as a plain array) + enc.u8(0).unwrap(); + enc.array(1).unwrap(); + enc.array(2).unwrap(); + enc.bytes(&[0u8; 32]).unwrap(); // transaction_id hash bytes + enc.u64(0).unwrap(); // output index + + // outputs: Vec (we use the legacy/array form) + enc.u8(1).unwrap(); + enc.array(1).unwrap(); + enc.array(3).unwrap(); + enc.bytes(&[1u8; 32]).unwrap(); // address bytes (opaque for this test) + enc.u64(1).unwrap(); // amount (Value::Coin) + enc.null().unwrap(); // datum_hash = None + + // fee + enc.u8(2).unwrap(); + enc.u64(0).unwrap(); + + // transaction_witness_set: CBOR map with numeric keys. Start with either empty map or one + // containing dummy vkey witnesses. + if existing_vkey_witnesses == 0 { + enc.map(0).unwrap(); + } else { + enc.map(1).unwrap(); + enc.u8(0).unwrap(); + enc.array(existing_vkey_witnesses as u64).unwrap(); + for _ in 0..existing_vkey_witnesses { + enc.array(2).unwrap(); + enc.bytes(&[2u8; 32]).unwrap(); + enc.bytes(&[3u8; 64]).unwrap(); + } + } + + // is_valid + enc.bool(true).unwrap(); + + // auxiliary_data = null + enc.null().unwrap(); + + out + } + + let keyring = CardanoKeyring::new_for_testing().unwrap(); + + let unsigned = unsigned_tx_fixture(0); + let unsigned_tx: MintedTx<'_> = minicbor::decode(&unsigned).unwrap(); + + let signed = sign_transaction(&unsigned, &keyring).unwrap(); + let signed_tx: MintedTx<'_> = minicbor::decode(&signed).unwrap(); + + // Signing must not mutate the transaction body bytes (the hash is over the body). + assert_eq!( + signed_tx.transaction_body.raw_cbor(), + unsigned_tx.transaction_body.raw_cbor() + ); + + // The signing must preserve the success flag and auxiliary data field. + assert_eq!(signed_tx.success, unsigned_tx.success); + assert!(matches!( + signed_tx.auxiliary_data, + pallas_codec::utils::Nullable::Null + )); + + // Verify that a vkey witness was added and that it verifies against the tx hash. + let witnesses = signed_tx + .transaction_witness_set + .vkeywitness + .clone() + .expect("expected vkey witness set") + .to_vec(); + + let added_witness = witnesses + .iter() + .find(|w| w.vkey.as_slice() == keyring.verifying_key().as_bytes()) + .expect("expected witness with the keyring verifying key"); + + assert_eq!(added_witness.signature.len(), 64); + + let tx_body_cbor = signed_tx.transaction_body.raw_cbor(); + + use blake2::digest::consts::U32; + use blake2::Blake2b; + let mut hasher = Blake2b::::new(); + hasher.update(tx_body_cbor); + let tx_hash = hasher.finalize(); + + use ed25519_dalek::Verifier; + let signature = { + let mut sig_bytes = [0u8; 64]; + sig_bytes.copy_from_slice(&added_witness.signature); + ed25519_dalek::Signature::from_bytes(&sig_bytes) + }; + + keyring + .verifying_key() + .verify(tx_hash.as_slice(), &signature) + .unwrap(); + } + + #[test] + fn sign_transaction_appends_to_existing_witnesses() { + fn unsigned_tx_fixture(existing_vkey_witnesses: usize) -> Vec { + let mut out = Vec::new(); + let mut enc = minicbor::Encoder::new(&mut out); + + enc.array(4).unwrap(); + enc.map(3).unwrap(); + + enc.u8(0).unwrap(); + enc.array(1).unwrap(); + enc.array(2).unwrap(); + enc.bytes(&[0u8; 32]).unwrap(); + enc.u64(0).unwrap(); + + enc.u8(1).unwrap(); + enc.array(1).unwrap(); + enc.array(3).unwrap(); + enc.bytes(&[1u8; 32]).unwrap(); + enc.u64(1).unwrap(); + enc.null().unwrap(); + + enc.u8(2).unwrap(); + enc.u64(0).unwrap(); + + enc.map(1).unwrap(); + enc.u8(0).unwrap(); + enc.array(existing_vkey_witnesses as u64).unwrap(); + for _ in 0..existing_vkey_witnesses { + enc.array(2).unwrap(); + enc.bytes(&[2u8; 32]).unwrap(); + enc.bytes(&[3u8; 64]).unwrap(); + } + + enc.bool(true).unwrap(); + enc.null().unwrap(); + + out + } + + let keyring = CardanoKeyring::new_for_testing().unwrap(); + + let unsigned = unsigned_tx_fixture(1); + let signed = sign_transaction(&unsigned, &keyring).unwrap(); + let signed_tx: MintedTx<'_> = minicbor::decode(&signed).unwrap(); + + let witnesses = signed_tx + .transaction_witness_set + .vkeywitness + .clone() + .expect("expected vkey witness set") + .to_vec(); + + assert_eq!(witnesses.len(), 2); + } + + #[test] + fn sign_transaction_rejects_invalid_cbor() { + let keyring = CardanoKeyring::new_for_testing().unwrap(); + + let err = sign_transaction(&[0xff], &keyring).unwrap_err(); + assert!(matches!(err, Error::CborDecode(_))); + } +} diff --git a/crates/relayer/src/chain/cardano/signing_key_pair.rs b/crates/relayer/src/chain/cardano/signing_key_pair.rs new file mode 100644 index 0000000000..478c72e8fa --- /dev/null +++ b/crates/relayer/src/chain/cardano/signing_key_pair.rs @@ -0,0 +1,206 @@ +//! Cardano SigningKeyPair implementation for Hermes keyring + +use super::keyring::CardanoKeyring; +use crate::config::AddressType; +use crate::keyring::{errors::Error as KeyringError, KeyType, SigningKeyPair}; +use hdpath::StandardHDPath; +use serde::{Deserialize, Serialize}; +use std::any::Any; + +/// Keyfile format for Cardano keys +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct CardanoKeyFile { + pub name: String, + pub r#type: String, + pub address: String, + pub pubkey: String, + pub mnemonic: String, +} + +/// Cardano signing key pair wrapper for Hermes +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct CardanoSigningKeyPair { + #[serde(skip)] + keyring: Option, + // Store serializable data + mnemonic: String, + account: u32, + network_id: u8, +} + +impl CardanoSigningKeyPair { + /// Create a new CardanoSigningKeyPair from components + /// Supports both mnemonic phrases and bech32-encoded private keys (ed25519_sk...) + pub fn new( + mnemonic_or_key: String, + account: u32, + network_id: u8, + ) -> Result { + // Check if this is a bech32 private key instead of a mnemonic + let keyring = if mnemonic_or_key.starts_with("ed25519_sk") { + CardanoKeyring::from_bech32_key(&mnemonic_or_key).map_err(|_| { + KeyringError::invalid_mnemonic(anyhow::anyhow!( + "Failed to load Cardano key from bech32" + )) + })? + } else { + CardanoKeyring::from_mnemonic(&mnemonic_or_key, account).map_err(|_| { + KeyringError::invalid_mnemonic(anyhow::anyhow!( + "Failed to derive Cardano key from mnemonic" + )) + })? + }; + + Ok(Self { + keyring: Some(keyring), + mnemonic: mnemonic_or_key, + account, + network_id, + }) + } + + /// Ensure the keyring is initialized (for after deserialization) + fn ensure_keyring(&mut self) -> Result<(), KeyringError> { + if self.keyring.is_none() { + let keyring = if self.mnemonic.starts_with("ed25519_sk") { + CardanoKeyring::from_bech32_key(&self.mnemonic).map_err(|_| { + KeyringError::invalid_mnemonic(anyhow::anyhow!( + "Failed to reinitialize keyring from bech32" + )) + })? + } else { + CardanoKeyring::from_mnemonic(&self.mnemonic, self.account).map_err(|_| { + KeyringError::invalid_mnemonic(anyhow::anyhow!( + "Failed to reinitialize keyring from mnemonic" + )) + })? + }; + self.keyring = Some(keyring); + } + Ok(()) + } + + /// Get a reference to the keyring, initializing if needed + fn keyring(&mut self) -> Result<&CardanoKeyring, KeyringError> { + self.ensure_keyring()?; + self.keyring + .as_ref() + .ok_or_else(KeyringError::key_not_found) + } + + /// Get a mutable reference to the keyring, initializing if needed + fn keyring_mut(&mut self) -> Result<&mut CardanoKeyring, KeyringError> { + self.ensure_keyring()?; + self.keyring + .as_mut() + .ok_or_else(KeyringError::key_not_found) + } + + /// Get a clone of the CardanoKeyring (public method for external signing) + /// This clones self internally to handle lazy initialization + pub fn get_cardano_keyring(&self) -> Result { + let mut mutable_self = self.clone(); + mutable_self.ensure_keyring()?; + mutable_self.keyring.ok_or_else(KeyringError::key_not_found) + } +} + +impl SigningKeyPair for CardanoSigningKeyPair { + const KEY_TYPE: KeyType = KeyType::Ed25519; + type KeyFile = CardanoKeyFile; + + fn from_key_file( + key_file: Self::KeyFile, + hd_path: &StandardHDPath, + ) -> Result + where + Self: Sized, + { + // For Cardano, we use the account from the HD path + let account = hd_path.account(); + // Cardano testnet by default (can be overridden in config) + let network_id = 0; + + Self::new(key_file.mnemonic, account, network_id) + } + + fn from_mnemonic( + mnemonic: &str, + hd_path: &StandardHDPath, + _address_type: &AddressType, + _account_prefix: &str, + ) -> Result + where + Self: Sized, + { + let account = hd_path.account(); + // Cardano testnet by default + let network_id = 0; + + Self::new(mnemonic.to_string(), account, network_id) + } + + fn account(&self) -> String { + // Return cached address or generate it + // Clone self to make it mutable for ensure_keyring + let mut mutable_self = self.clone(); + match mutable_self.keyring() { + Ok(keyring) => keyring.address(self.network_id), + Err(_) => format!("cardano_address_error_account_{}", self.account), + } + } + + fn sign(&self, message: &[u8]) -> Result, KeyringError> { + let mut mutable_self = self.clone(); + let keyring = mutable_self.keyring_mut()?; + let signature = keyring.sign(message); + Ok(signature.to_bytes().to_vec()) + } + + fn as_any(&self) -> &dyn Any { + self + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cardano_signing_key_pair_creation() { + let mnemonic = "test walk nut penalty hip pave soap entry language right filter choice"; + let key_pair = CardanoSigningKeyPair::new(mnemonic.to_string(), 0, 0).unwrap(); + + let account = key_pair.account(); + assert!(!account.is_empty()); + assert!(account.starts_with("60")); // Cardano enterprise testnet address + } + + #[test] + fn test_cardano_signing() { + let mnemonic = "test walk nut penalty hip pave soap entry language right filter choice"; + let key_pair = CardanoSigningKeyPair::new(mnemonic.to_string(), 0, 0).unwrap(); + + let message = b"test message"; + let signature = key_pair.sign(message).unwrap(); + + assert_eq!(signature.len(), 64); // Ed25519 signature is 64 bytes + } + + #[test] + fn test_serialization_roundtrip() { + let mnemonic = "test walk nut penalty hip pave soap entry language right filter choice"; + let key_pair = CardanoSigningKeyPair::new(mnemonic.to_string(), 0, 0).unwrap(); + + // Serialize + let json = serde_json::to_string(&key_pair).unwrap(); + + // Deserialize + let deserialized: CardanoSigningKeyPair = serde_json::from_str(&json).unwrap(); + + // Test that it still works + let message = b"test"; + let signature = deserialized.sign(message).unwrap(); + assert_eq!(signature.len(), 64); + } +} diff --git a/crates/relayer/src/chain/cosmos.rs b/crates/relayer/src/chain/cosmos.rs index b25ee8ddef..661cb5d3e8 100644 --- a/crates/relayer/src/chain/cosmos.rs +++ b/crates/relayer/src/chain/cosmos.rs @@ -30,7 +30,6 @@ use ibc_relayer_types::clients::ics07_tendermint::client_state::{ }; use ibc_relayer_types::clients::ics07_tendermint::consensus_state::ConsensusState as TmConsensusState; use ibc_relayer_types::clients::ics07_tendermint::header::Header as TmHeader; -use ibc_relayer_types::core::ics02_client::client_type::ClientType; use ibc_relayer_types::core::ics02_client::error::Error as ClientError; use ibc_relayer_types::core::ics02_client::events::UpdateClient; use ibc_relayer_types::core::ics03_connection::connection::{ @@ -1387,12 +1386,10 @@ impl ChainEndpoint for CosmosSdkChain { let consensus_state = AnyConsensusState::decode_vec(&res.value).map_err(Error::decode)?; - if !matches!(consensus_state, AnyConsensusState::Tendermint(_)) { - return Err(Error::consensus_state_type_mismatch( - ClientType::Tendermint, - consensus_state.client_type(), - )); - } + // Note: Upstream Hermes assumed Cosmos chains only ever store Tendermint consensus states. + // In this repo we also support non-Tendermint clients on Cosmos chains (e.g. the Mithril + // client used to track Cardano). Therefore we must accept whatever consensus state type is + // actually stored under the requested client ID and height. match include_proof { IncludeProof::Yes => { diff --git a/crates/relayer/src/chain/endpoint.rs b/crates/relayer/src/chain/endpoint.rs index ff33a4b815..7b9145867c 100644 --- a/crates/relayer/src/chain/endpoint.rs +++ b/crates/relayer/src/chain/endpoint.rs @@ -479,6 +479,17 @@ pub trait ChainEndpoint: Sized { _ => {} } + // Proof height semantics are chain-specific. + // + // Hermes historically uses `query_height + 1` as `proof_height` for Tendermint/Cosmos SDK + // chains. For Cardano (Mithril snapshot heights), the consensus state at height H commits to + // the IBC root at height H directly, so the proof height must be exactly `query_height`. + let proof_height = if matches!(self.config(), ChainConfig::Cardano(_)) { + height + } else { + height.increment() + }; + Ok(( client_state, Proofs::new( @@ -487,7 +498,7 @@ pub trait ChainEndpoint: Sized { consensus_proof, None, // TODO: Retrieve host consensus proof when available None, - height.increment(), + proof_height, ) .map_err(Error::malformed_proof)?, )) @@ -517,15 +528,14 @@ pub trait ChainEndpoint: Sized { let channel_proof_bytes = CommitmentProofBytes::try_from(channel_proof).map_err(Error::malformed_proof)?; - Proofs::new( - channel_proof_bytes, - None, - None, - None, - None, - height.increment(), - ) - .map_err(Error::malformed_proof) + let proof_height = if matches!(self.config(), ChainConfig::Cardano(_)) { + height + } else { + height.increment() + }; + + Proofs::new(channel_proof_bytes, None, None, None, None, proof_height) + .map_err(Error::malformed_proof) } /// Builds the proof for packet messages. @@ -659,13 +669,19 @@ pub trait ChainEndpoint: Sized { return Err(Error::queried_proof_not_found()); }; + let proof_height = if matches!(self.config(), ChainConfig::Cardano(_)) { + height + } else { + height.increment() + }; + let proofs = Proofs::new( CommitmentProofBytes::try_from(packet_proof).map_err(Error::malformed_proof)?, None, None, None, channel_proof, - height.increment(), + proof_height, ) .map_err(Error::malformed_proof)?; diff --git a/crates/relayer/src/client_state.rs b/crates/relayer/src/client_state.rs index 85f5b83dc4..a4e2d14676 100644 --- a/crates/relayer/src/client_state.rs +++ b/crates/relayer/src/client_state.rs @@ -9,6 +9,10 @@ use ibc_proto::Protobuf; use ibc_relayer_types::clients::ics07_tendermint::client_state::{ ClientState as TmClientState, TENDERMINT_CLIENT_STATE_TYPE_URL, }; +use ibc_relayer_types::clients::ics08_cardano::client_state::{ + ClientState as MithrilClientState, MITHRIL_CLIENT_STATE_TYPE_URL, +}; + use ibc_relayer_types::core::ics02_client::client_state::ClientState; use ibc_relayer_types::core::ics02_client::client_type::ClientType; use ibc_relayer_types::core::ics02_client::error::Error; @@ -21,54 +25,64 @@ use ibc_relayer_types::Height; #[serde(tag = "type")] pub enum AnyClientState { Tendermint(TmClientState), + /// Cardano-tracking client state (`08-cardano`), encoded as `ibc.lightclients.mithril.v1.ClientState`. + Mithril(MithrilClientState), } impl AnyClientState { pub fn chain_id(&self) -> ChainId { match self { AnyClientState::Tendermint(tm_state) => tm_state.chain_id(), + AnyClientState::Mithril(mithril_state) => mithril_state.chain_id(), } } pub fn latest_height(&self) -> Height { match self { Self::Tendermint(tm_state) => tm_state.latest_height(), + Self::Mithril(mithril_state) => mithril_state.latest_height(), } } pub fn frozen_height(&self) -> Option { match self { Self::Tendermint(tm_state) => tm_state.frozen_height(), + Self::Mithril(mithril_state) => mithril_state.frozen_height(), } } pub fn trust_threshold(&self) -> Option { match self { AnyClientState::Tendermint(state) => Some(state.trust_threshold), + AnyClientState::Mithril(_) => None, // Mithril client doesn't use trust threshold } } pub fn trusting_period(&self) -> Duration { match self { AnyClientState::Tendermint(state) => state.trusting_period, + AnyClientState::Mithril(state) => state.trusting_period, } } pub fn max_clock_drift(&self) -> Duration { match self { AnyClientState::Tendermint(state) => state.max_clock_drift, + AnyClientState::Mithril(_) => Duration::from_secs(300), // 5 minutes default } } pub fn client_type(&self) -> ClientType { match self { Self::Tendermint(state) => state.client_type(), + Self::Mithril(state) => state.client_type(), } } pub fn expired(&self, elapsed: Duration) -> bool { match self { Self::Tendermint(state) => state.expired(elapsed), + Self::Mithril(state) => state.expired(elapsed), } } } @@ -87,6 +101,8 @@ impl TryFrom for AnyClientState { .map_err(Error::decode_raw_client_state)?, )), + MITHRIL_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Mithril(raw.try_into()?)), + _ => Err(Error::unknown_client_state_type(raw.type_url)), } } @@ -99,6 +115,7 @@ impl From for Any { type_url: TENDERMINT_CLIENT_STATE_TYPE_URL.to_string(), value: Protobuf::::encode_vec(value), }, + AnyClientState::Mithril(value) => value.into(), } } } @@ -131,6 +148,12 @@ impl From for AnyClientState { } } +impl From for AnyClientState { + fn from(cs: MithrilClientState) -> Self { + Self::Mithril(cs) + } +} + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] pub struct IdentifiedAnyClientState { diff --git a/crates/relayer/src/config.rs b/crates/relayer/src/config.rs index a2ec4e8a97..c805a69bcf 100644 --- a/crates/relayer/src/config.rs +++ b/crates/relayer/src/config.rs @@ -329,6 +329,7 @@ impl Config { .map_err(Into::>::into)?; } ChainConfig::Penumbra { .. } => { /* no-op for now (erwan) */ } + ChainConfig::Cardano { .. } => { /* no-op for Cardano */ } } } @@ -664,6 +665,7 @@ pub enum ChainConfig { // Reuse CosmosSdkConfig for tendermint light clients Namada(CosmosSdkConfig), Penumbra(PenumbraConfig), + Cardano(crate::chain::cardano::CardanoConfig), } impl ChainConfig { @@ -672,6 +674,7 @@ impl ChainConfig { Self::CosmosSdk(config) => &config.id, Self::Namada(config) => &config.id, Self::Penumbra(config) => &config.id, + Self::Cardano(config) => &config.id, } } @@ -680,6 +683,7 @@ impl ChainConfig { Self::CosmosSdk(config) => &config.packet_filter, Self::Namada(config) => &config.packet_filter, Self::Penumbra(config) => &config.packet_filter, + Self::Cardano(config) => &config.packet_filter, } } @@ -688,6 +692,7 @@ impl ChainConfig { Self::CosmosSdk(config) => config.max_block_time, Self::Namada(config) => config.max_block_time, Self::Penumbra(config) => config.max_block_time, + Self::Cardano(config) => config.max_block_time, } } @@ -696,6 +701,7 @@ impl ChainConfig { Self::CosmosSdk(config) => &config.key_name, Self::Namada(config) => &config.key_name, Self::Penumbra(config) => &config.stub_key_name, + Self::Cardano(config) => &config.key_name, } } @@ -704,6 +710,7 @@ impl ChainConfig { Self::CosmosSdk(config) => config.key_name = key_name, Self::Namada(config) => config.key_name = key_name, Self::Penumbra(_) => { /* no-op */ } + Self::Cardano(config) => config.key_name = key_name, } } @@ -732,6 +739,20 @@ impl ChainConfig { .collect() } ChainConfig::Penumbra(_) => vec![], + ChainConfig::Cardano(config) => { + use crate::chain::cardano::signing_key_pair::CardanoSigningKeyPair; + let keyring: KeyRing = KeyRing::new( + config.key_store_type, + "cardano", + &config.id, + &config.key_store_folder, + )?; + keyring + .keys()? + .into_iter() + .map(|(key_name, keys)| (key_name, keys.into())) + .collect() + } }; Ok(keys) @@ -742,6 +763,7 @@ impl ChainConfig { Self::CosmosSdk(config) => config.trust_threshold, Self::Namada(config) => config.trust_threshold, Self::Penumbra(config) => config.trust_threshold, + Self::Cardano(config) => config.trust_threshold.unwrap_or_default(), } } @@ -749,6 +771,7 @@ impl ChainConfig { match self { Self::CosmosSdk(config) | Self::Namada(config) => config.clear_interval, Self::Penumbra(config) => config.clear_interval, + Self::Cardano(config) => config.clear_interval, } } @@ -756,6 +779,7 @@ impl ChainConfig { match self { Self::CosmosSdk(config) | Self::Namada(config) => config.query_packets_chunk_size, Self::Penumbra(config) => config.query_packets_chunk_size, + Self::Cardano(config) => config.query_packets_chunk_size, } } @@ -765,6 +789,7 @@ impl ChainConfig { config.query_packets_chunk_size = query_packets_chunk_size } Self::Penumbra(config) => config.query_packets_chunk_size = query_packets_chunk_size, + Self::Cardano(config) => config.query_packets_chunk_size = query_packets_chunk_size, } } @@ -777,6 +802,7 @@ impl ChainConfig { .map(|seqs| Cow::Borrowed(seqs.as_slice())) .unwrap_or_else(|| Cow::Owned(Vec::new())), Self::Penumbra(_config) => Cow::Owned(Vec::new()), + Self::Cardano(_config) => Cow::Owned(Vec::new()), } } @@ -784,6 +810,7 @@ impl ChainConfig { match self { Self::CosmosSdk(config) | Self::Namada(config) => config.allow_ccq, Self::Penumbra(_config) => false, + Self::Cardano(_config) => false, } } @@ -791,6 +818,7 @@ impl ChainConfig { match self { Self::CosmosSdk(config) | Self::Namada(config) => config.clock_drift, Self::Penumbra(config) => config.clock_drift, + Self::Cardano(config) => config.clock_drift, } } @@ -798,6 +826,7 @@ impl ChainConfig { match self { Self::Namada(_) | Self::CosmosSdk(_) => true, Self::Penumbra(_) => false, + Self::Cardano(_) => true, } } } @@ -834,6 +863,9 @@ impl<'de> Deserialize<'de> for ChainConfig { "Penumbra" => PenumbraConfig::deserialize(value) .map(Self::Penumbra) .map_err(|e| serde::de::Error::custom(format!("invalid Penumbra config: {e}"))), + "Cardano" => crate::chain::cardano::CardanoConfig::deserialize(value) + .map(Self::Cardano) + .map_err(|e| serde::de::Error::custom(format!("invalid Cardano config: {e}"))), // chain_type => Err(serde::de::Error::custom(format!( "unknown chain type: {chain_type}", @@ -1031,6 +1063,7 @@ mod tests { chain_config.excluded_sequences.clone() } ChainConfig::Penumbra(_) => panic!("expected cosmos chain config"), + ChainConfig::Cardano(_) => panic!("expected cosmos chain config"), }; assert_eq!(excluded_sequences1, excluded_sequences2); diff --git a/crates/relayer/src/connection.rs b/crates/relayer/src/connection.rs index 7f968bcdcf..3f61e6338c 100644 --- a/crates/relayer/src/connection.rs +++ b/crates/relayer/src/connection.rs @@ -25,6 +25,7 @@ use crate::chain::requests::{ IncludeProof, PageRequest, QueryConnectionRequest, QueryConnectionsRequest, QueryHeight, }; use crate::chain::tracking::TrackedMsgs; +use crate::config::ChainConfig; use crate::foreign_client::{ForeignClient, HasExpiredOrFrozenError}; use crate::object::Connection as WorkerConnectionObject; use crate::util::pretty::{PrettyDuration, PrettyOption}; @@ -937,6 +938,22 @@ impl Connection { "dst_chain": self.dst_chain().id(), } ); + let dst_chain_is_cardano = matches!( + self.dst_chain().config().map_err(ConnectionError::relayer)?, + ChainConfig::Cardano(_) + ); + + // Cardano query_latest_height is Mithril-certified snapshot height, not raw tip. + // A strict pre-wait on this value can deadlock the handshake before we submit the tx + // that would advance certified state. + if dst_chain_is_cardano { + debug!( + "skipping destination-height pre-wait for Cardano (required consensus proof height: {})", + consensus_height + ); + return Ok(()); + } + let dst_application_latest_height = || { self.dst_chain() .query_latest_height() diff --git a/crates/relayer/src/consensus_state.rs b/crates/relayer/src/consensus_state.rs index 500fb3bcef..07f27a7f54 100644 --- a/crates/relayer/src/consensus_state.rs +++ b/crates/relayer/src/consensus_state.rs @@ -7,6 +7,10 @@ use ibc_proto::Protobuf; use ibc_relayer_types::clients::ics07_tendermint::consensus_state::{ ConsensusState as TmConsensusState, TENDERMINT_CONSENSUS_STATE_TYPE_URL, }; +use ibc_relayer_types::clients::ics08_cardano::consensus_state::{ + ConsensusState as MithrilConsensusState, MITHRIL_CONSENSUS_STATE_TYPE_URL, +}; + use ibc_relayer_types::core::ics02_client::client_type::ClientType; use ibc_relayer_types::core::ics02_client::consensus_state::ConsensusState; use ibc_relayer_types::core::ics02_client::error::Error; @@ -16,20 +20,25 @@ use ibc_relayer_types::Height; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(tag = "type")] +#[allow(clippy::large_enum_variant)] pub enum AnyConsensusState { Tendermint(TmConsensusState), + /// Cardano-tracking consensus state (`08-cardano`), encoded as `ibc.lightclients.mithril.v1.ConsensusState`. + Mithril(MithrilConsensusState), } impl AnyConsensusState { pub fn timestamp(&self) -> Timestamp { match self { Self::Tendermint(cs_state) => cs_state.timestamp.into(), + Self::Mithril(cs_state) => ConsensusState::timestamp(cs_state), } } pub fn client_type(&self) -> ClientType { match self { AnyConsensusState::Tendermint(_cs) => ClientType::Tendermint, + AnyConsensusState::Mithril(_cs) => ClientType::Cardano, } } } @@ -48,6 +57,8 @@ impl TryFrom for AnyConsensusState { .map_err(Error::decode_raw_client_state)?, )), + MITHRIL_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Mithril(value.try_into()?)), + _ => Err(Error::unknown_consensus_state_type(value.type_url)), } } @@ -60,6 +71,7 @@ impl From for Any { type_url: TENDERMINT_CONSENSUS_STATE_TYPE_URL.to_string(), value: Protobuf::::encode_vec(value), }, + AnyConsensusState::Mithril(value) => value.into(), } } } @@ -70,6 +82,12 @@ impl From for AnyConsensusState { } } +impl From for AnyConsensusState { + fn from(cs: MithrilConsensusState) -> Self { + Self::Mithril(cs) + } +} + #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct AnyConsensusStateWithHeight { pub height: Height, @@ -115,6 +133,7 @@ impl ConsensusState for AnyConsensusState { fn root(&self) -> &CommitmentRoot { match self { Self::Tendermint(cs_state) => cs_state.root(), + Self::Mithril(cs_state) => ConsensusState::root(cs_state), } } diff --git a/crates/relayer/src/event/source.rs b/crates/relayer/src/event/source.rs index c114ac5e51..57d793bad7 100644 --- a/crates/relayer/src/event/source.rs +++ b/crates/relayer/src/event/source.rs @@ -82,6 +82,11 @@ pub type EventReceiver = channel::Receiver>; pub struct TxEventSourceCmd(channel::Sender); impl TxEventSourceCmd { + /// Create a new TxEventSourceCmd from a command sender channel + pub fn new(sender: channel::Sender) -> Self { + Self(sender) + } + pub fn shutdown(&self) -> Result<()> { self.0 .send(EventSourceCmd::Shutdown) diff --git a/crates/relayer/src/foreign_client.rs b/crates/relayer/src/foreign_client.rs index 027e825a43..ef0515dcc2 100644 --- a/crates/relayer/src/foreign_client.rs +++ b/crates/relayer/src/foreign_client.rs @@ -720,7 +720,7 @@ impl ForeignClient ForeignClient config.client_refresh_rate, + ChainConfig::Cardano(config) => config.client_refresh_rate, }; let refresh_period = client_state @@ -1765,6 +1766,7 @@ impl ForeignClient false, + ChainConfig::Cardano(_) => false, }; let mut msgs = vec![]; diff --git a/crates/relayer/src/keyring/any_signing_key_pair.rs b/crates/relayer/src/keyring/any_signing_key_pair.rs index 3bd57177a2..f1b0710bfe 100644 --- a/crates/relayer/src/keyring/any_signing_key_pair.rs +++ b/crates/relayer/src/keyring/any_signing_key_pair.rs @@ -1,13 +1,16 @@ use serde::Serialize; use super::{Ed25519KeyPair, KeyType, NamadaKeyPair, Secp256k1KeyPair, SigningKeyPair}; +use crate::chain::cardano::CardanoSigningKeyPair; #[derive(Clone, Debug, Serialize)] #[serde(untagged)] +#[allow(clippy::large_enum_variant)] pub enum AnySigningKeyPair { Secp256k1(Secp256k1KeyPair), Ed25519(Ed25519KeyPair), Namada(NamadaKeyPair), + Cardano(CardanoSigningKeyPair), } impl AnySigningKeyPair { @@ -16,6 +19,7 @@ impl AnySigningKeyPair { Self::Secp256k1(key_pair) => key_pair.account(), Self::Ed25519(key_pair) => key_pair.account(), Self::Namada(key_pair) => key_pair.account(), + Self::Cardano(key_pair) => key_pair.account(), } } @@ -24,6 +28,7 @@ impl AnySigningKeyPair { Self::Secp256k1(_) => Secp256k1KeyPair::KEY_TYPE, Self::Ed25519(_) => Ed25519KeyPair::KEY_TYPE, Self::Namada(_) => NamadaKeyPair::KEY_TYPE, + Self::Cardano(_) => CardanoSigningKeyPair::KEY_TYPE, } } @@ -32,6 +37,7 @@ impl AnySigningKeyPair { Self::Secp256k1(key_pair) => key_pair.as_any(), Self::Ed25519(key_pair) => key_pair.as_any(), Self::Namada(key_pair) => key_pair.as_any(), + Self::Cardano(key_pair) => key_pair.as_any(), } .downcast_ref::() .cloned() @@ -55,3 +61,9 @@ impl From for AnySigningKeyPair { Self::Namada(key_pair) } } + +impl From for AnySigningKeyPair { + fn from(key_pair: CardanoSigningKeyPair) -> Self { + Self::Cardano(key_pair) + } +} diff --git a/crates/relayer/src/light_client/tendermint.rs b/crates/relayer/src/light_client/tendermint.rs index a62ead4e5f..2d7e241345 100644 --- a/crates/relayer/src/light_client/tendermint.rs +++ b/crates/relayer/src/light_client/tendermint.rs @@ -156,10 +156,17 @@ impl super::LightClient for LightClient { let update_header = match any_header { AnyHeader::Tendermint(header) => Ok::<_, Error>(header), + AnyHeader::Mithril(_) => Err(Error::misbehaviour(format!( + "received Mithril header in Tendermint light client for chain {}", + self.chain_id + ))), }?; let client_state = match client_state { AnyClientState::Tendermint(client_state) => Ok::<_, Error>(client_state), + AnyClientState::Mithril(_) => Err(Error::client_state_type( + "received Mithril client state in Tendermint light client".to_string(), + )), }?; let next_validators = self @@ -358,6 +365,9 @@ impl LightClient { let client_state = match client_state { AnyClientState::Tendermint(client_state) => Ok::<_, Error>(client_state), + AnyClientState::Mithril(_) => Err(Error::client_state_type( + "received Mithril client state in Tendermint light client".to_string(), + )), }?; Ok(TmLightClient::new( diff --git a/crates/relayer/src/link/operational_data.rs b/crates/relayer/src/link/operational_data.rs index 38d8495119..f57d1afa24 100644 --- a/crates/relayer/src/link/operational_data.rs +++ b/crates/relayer/src/link/operational_data.rs @@ -14,6 +14,7 @@ use crate::chain::requests::QueryClientStateRequest; use crate::chain::requests::QueryHeight; use crate::chain::tracking::TrackedMsgs; use crate::chain::tracking::TrackingId; +use crate::config::ChainConfig; use crate::event::IbcEventWithHeight; use crate::link::error::LinkError; use crate::link::RelayPath; @@ -159,7 +160,39 @@ impl OperationalData { ) -> Result { // For zero delay we prepend the client update msgs. let client_update_msgs = if !self.conn_delay_needed() { - let update_height = self.proofs_height.increment(); + // Hermes normally updates the on-chain light client to `proof_height + 1` before + // sending proof-bearing messages (connection/channel/packet). + // + // For Cardano↔Cosmos (Mithril) in our system, proofs are verified against the + // consensus state stored at the exact `proof_height` returned by the Gateway. + // If we update to `proof_height + 1` first, the Mithril client will not have a + // consensus state stored at `proof_height`, and verification fails with: + // "consensus state not found". + // + // Therefore, when we are updating a Cardano-tracking client (i.e. the counterparty + // chain in this relay path is Cardano), we update to `proof_height` directly. + let src_chain_is_cardano = matches!( + relay_path + .src_chain() + .config() + .map_err(LinkError::relayer)?, + ChainConfig::Cardano(_) + ); + let dst_chain_is_cardano = matches!( + relay_path + .dst_chain() + .config() + .map_err(LinkError::relayer)?, + ChainConfig::Cardano(_) + ); + let update_height = if (matches!(self.target, OperationalDataTarget::Destination) + && src_chain_is_cardano) + || (matches!(self.target, OperationalDataTarget::Source) && dst_chain_is_cardano) + { + self.proofs_height + } else { + self.proofs_height.increment() + }; debug!( "prepending {} client update at height {}", diff --git a/crates/relayer/src/spawn.rs b/crates/relayer/src/spawn.rs index e0f3b69bf8..86422ebfc5 100644 --- a/crates/relayer/src/spawn.rs +++ b/crates/relayer/src/spawn.rs @@ -7,8 +7,8 @@ use ibc_relayer_types::core::ics24_host::identifier::ChainId; use crate::{ chain::{ - cosmos::CosmosSdkChain, handle::ChainHandle, namada::NamadaChain, penumbra::PenumbraChain, - runtime::ChainRuntime, + cardano::CardanoChain, cosmos::CosmosSdkChain, handle::ChainHandle, namada::NamadaChain, + penumbra::PenumbraChain, runtime::ChainRuntime, }, config::{ChainConfig, Config}, error::Error as RelayerError, @@ -87,6 +87,7 @@ pub fn spawn_chain_runtime_with_config( ChainConfig::CosmosSdk(_) => ChainRuntime::::spawn(config, rt), ChainConfig::Namada(_) => ChainRuntime::::spawn(config, rt), ChainConfig::Penumbra(_) => ChainRuntime::::spawn(config, rt), + ChainConfig::Cardano(_) => ChainRuntime::::spawn(config, rt), } .map_err(SpawnError::relayer)?; diff --git a/tools/integration-test/src/bin/test_setup_with_binary_channel.rs b/tools/integration-test/src/bin/test_setup_with_binary_channel.rs index be2ede13e7..842f8750c4 100644 --- a/tools/integration-test/src/bin/test_setup_with_binary_channel.rs +++ b/tools/integration-test/src/bin/test_setup_with_binary_channel.rs @@ -52,6 +52,7 @@ impl TestOverrides for Test { // with external relayer commands. chain_config.key_store_type = Store::Test; } + ChainConfig::Cardano(_) => { /* no-op */ } ChainConfig::Penumbra(_) => { /* no-op */ } } } diff --git a/tools/integration-test/src/bin/test_setup_with_fee_enabled_binary_channel.rs b/tools/integration-test/src/bin/test_setup_with_fee_enabled_binary_channel.rs index f58dc0c600..f2258ccfd5 100644 --- a/tools/integration-test/src/bin/test_setup_with_fee_enabled_binary_channel.rs +++ b/tools/integration-test/src/bin/test_setup_with_fee_enabled_binary_channel.rs @@ -53,6 +53,7 @@ impl TestOverrides for Test { // with external relayer commands. chain_config.key_store_type = Store::Test; } + ChainConfig::Cardano(_) => { /* no-op */ } ChainConfig::Penumbra(_) => { /* no-op */ } } } diff --git a/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs b/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs index f690eb9edf..949dd2c05f 100644 --- a/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs +++ b/tools/integration-test/src/bin/test_setup_with_ternary_channel.rs @@ -52,6 +52,7 @@ impl TestOverrides for Test { // with external relayer commands. chain_config.key_store_type = Store::Test; } + ChainConfig::Cardano(_) => { /* no-op */ } ChainConfig::Penumbra(_) => { /* no-op */ } } } diff --git a/tools/integration-test/src/tests/async_icq/simple_query.rs b/tools/integration-test/src/tests/async_icq/simple_query.rs index 96eb5feb0f..4713784e9b 100644 --- a/tools/integration-test/src/tests/async_icq/simple_query.rs +++ b/tools/integration-test/src/tests/async_icq/simple_query.rs @@ -338,6 +338,7 @@ fn assert_eventual_async_icq_success( let rpc_addr = match relayer.config.chains.first().unwrap() { ChainConfig::CosmosSdk(c) | ChainConfig::Namada(c) => c.rpc_addr.clone(), ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), }; let mut rpc_client = HttpClient::new(rpc_addr).unwrap(); @@ -374,6 +375,7 @@ fn assert_eventual_async_icq_error( let rpc_addr = match relayer.config.chains.first().unwrap() { ChainConfig::CosmosSdk(c) | ChainConfig::Namada(c) => c.rpc_addr.clone(), ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), }; let mut rpc_client = HttpClient::new(rpc_addr).unwrap(); diff --git a/tools/integration-test/src/tests/channel_upgrade/ica.rs b/tools/integration-test/src/tests/channel_upgrade/ica.rs index 2c4293f7d3..ce75cd0f1a 100644 --- a/tools/integration-test/src/tests/channel_upgrade/ica.rs +++ b/tools/integration-test/src/tests/channel_upgrade/ica.rs @@ -321,6 +321,9 @@ impl TestOverrides for ChannelUpgradeICAUnordered { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } diff --git a/tools/integration-test/src/tests/clear_packet.rs b/tools/integration-test/src/tests/clear_packet.rs index cd383a7879..d4dc191ac7 100644 --- a/tools/integration-test/src/tests/clear_packet.rs +++ b/tools/integration-test/src/tests/clear_packet.rs @@ -346,6 +346,7 @@ impl TestOverrides for ClearPacketOverrideTest { chain_config.clear_interval = Some(10) } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } } diff --git a/tools/integration-test/src/tests/client_expiration.rs b/tools/integration-test/src/tests/client_expiration.rs index a5c70dfd3d..b504dda2c2 100644 --- a/tools/integration-test/src/tests/client_expiration.rs +++ b/tools/integration-test/src/tests/client_expiration.rs @@ -118,6 +118,7 @@ impl TestOverrides for ExpirationTestOverrides { chain_config.trusting_period = Some(CLIENT_EXPIRY); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } } diff --git a/tools/integration-test/src/tests/client_refresh.rs b/tools/integration-test/src/tests/client_refresh.rs index 8a575e9e97..c9a56d4bc2 100644 --- a/tools/integration-test/src/tests/client_refresh.rs +++ b/tools/integration-test/src/tests/client_refresh.rs @@ -135,6 +135,7 @@ impl BinaryChainTest for ClientFailsTest { config_chain_a.gas_multiplier = Some(GasMultiplier::unsafe_new(0.8)); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } @@ -146,6 +147,7 @@ impl BinaryChainTest for ClientFailsTest { config_chain_b.gas_multiplier = Some(GasMultiplier::unsafe_new(0.8)); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } }, config, diff --git a/tools/integration-test/src/tests/client_settings.rs b/tools/integration-test/src/tests/client_settings.rs index a30031fe4b..51c4e91bef 100644 --- a/tools/integration-test/src/tests/client_settings.rs +++ b/tools/integration-test/src/tests/client_settings.rs @@ -33,6 +33,7 @@ impl TestOverrides for ClientDefaultsTest { chain_config_a.trust_threshold = TrustThreshold::new(13, 23).unwrap(); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } match &mut config.chains[1] { @@ -43,6 +44,7 @@ impl TestOverrides for ClientDefaultsTest { chain_config_b.trust_threshold = TrustThreshold::TWO_THIRDS; } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } } diff --git a/tools/integration-test/src/tests/client_upgrade.rs b/tools/integration-test/src/tests/client_upgrade.rs index b420c481a5..5ecf9d706d 100644 --- a/tools/integration-test/src/tests/client_upgrade.rs +++ b/tools/integration-test/src/tests/client_upgrade.rs @@ -173,6 +173,7 @@ impl BinaryChainTest for ClientUpgradeTest { assert_eq!(client_state.chain_id, upgraded_chain_id); Ok(()) } + AnyClientState::Mithril(_) => unreachable!("unexpected client state type"), } } } @@ -226,6 +227,7 @@ impl BinaryChainTest for InvalidClientUpgradeTest { assert_eq!(client_state.chain_id, chains.handle_a().id()); Ok(()) } + AnyClientState::Mithril(_) => unreachable!("unexpected client state type"), } } } @@ -326,6 +328,7 @@ impl BinaryChainTest for HeightTooHighClientUpgradeTest { assert_eq!(client_state.chain_id, chains.handle_a().id()); Ok(()) } + AnyClientState::Mithril(_) => unreachable!("unexpected client state type"), } } } @@ -429,6 +432,7 @@ impl BinaryChainTest for HeightTooLowClientUpgradeTest { assert_eq!(client_state.chain_id, chains.handle_a().id()); Ok(()) } + AnyClientState::Mithril(_) => unreachable!("unexpected client state type"), } } } diff --git a/tools/integration-test/src/tests/dynamic_gas_fee.rs b/tools/integration-test/src/tests/dynamic_gas_fee.rs index aa091d2efc..2e449c33f9 100644 --- a/tools/integration-test/src/tests/dynamic_gas_fee.rs +++ b/tools/integration-test/src/tests/dynamic_gas_fee.rs @@ -54,6 +54,7 @@ impl TestOverrides for DynamicGasTest { } ChainConfig::Namada(_) => {} ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), } match &mut config.chains[1] { @@ -65,6 +66,7 @@ impl TestOverrides for DynamicGasTest { } ChainConfig::Namada(_) => {} ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), } } @@ -104,6 +106,7 @@ impl BinaryChannelTest for DynamicGasTest { chain_config.gas_price.denom.clone() } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), }; let gas_denom_str_b: String = match relayer @@ -116,6 +119,7 @@ impl BinaryChannelTest for DynamicGasTest { chain_config.gas_price.denom.clone() } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), }; let gas_denom_a: MonoTagged = diff --git a/tools/integration-test/src/tests/fee/filter_fees.rs b/tools/integration-test/src/tests/fee/filter_fees.rs index 5decfb8647..0eeea39e87 100644 --- a/tools/integration-test/src/tests/fee/filter_fees.rs +++ b/tools/integration-test/src/tests/fee/filter_fees.rs @@ -38,6 +38,9 @@ impl TestOverrides for FilterIncentivizedFeesRelayerTest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } @@ -193,6 +196,9 @@ impl TestOverrides for FilterByChannelIncentivizedFeesRelayerTest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } diff --git a/tools/integration-test/src/tests/fee_grant.rs b/tools/integration-test/src/tests/fee_grant.rs index 599d0ee7d5..4bd10d6065 100644 --- a/tools/integration-test/src/tests/fee_grant.rs +++ b/tools/integration-test/src/tests/fee_grant.rs @@ -89,6 +89,7 @@ impl BinaryChannelTest for FeeGrantTest { chain_config.gas_price.denom.clone() } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), }; let gas_denom: MonoTagged = @@ -118,6 +119,9 @@ impl BinaryChannelTest for FeeGrantTest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } }); @@ -243,6 +247,7 @@ impl BinaryChannelTest for NoFeeGrantTest { chain_config.gas_price.denom.clone() } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), }; let gas_denom: MonoTagged = diff --git a/tools/integration-test/src/tests/ica.rs b/tools/integration-test/src/tests/ica.rs index f7214b08e2..197af7d3b5 100644 --- a/tools/integration-test/src/tests/ica.rs +++ b/tools/integration-test/src/tests/ica.rs @@ -77,6 +77,9 @@ impl TestOverrides for IcaFilterTestAllow { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } @@ -201,6 +204,9 @@ impl TestOverrides for IcaFilterTestDeny { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } diff --git a/tools/integration-test/src/tests/interchain_security/dynamic_gas_fee.rs b/tools/integration-test/src/tests/interchain_security/dynamic_gas_fee.rs index 36ff4f4855..0f1023ec0e 100644 --- a/tools/integration-test/src/tests/interchain_security/dynamic_gas_fee.rs +++ b/tools/integration-test/src/tests/interchain_security/dynamic_gas_fee.rs @@ -87,6 +87,7 @@ impl TestOverrides for DynamicGasTest { chain_config_a.dynamic_gas_price = DynamicGasPrice::unsafe_new(false, 1.1, 0.6); } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), } match &mut config.chains[1] { @@ -100,6 +101,7 @@ impl TestOverrides for DynamicGasTest { DynamicGasPrice::unsafe_new(self.dynamic_gas_enabled, 1.1, 0.6); } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), } } diff --git a/tools/integration-test/src/tests/interchain_security/icq.rs b/tools/integration-test/src/tests/interchain_security/icq.rs index 32297c0c20..408dcfadf3 100644 --- a/tools/integration-test/src/tests/interchain_security/icq.rs +++ b/tools/integration-test/src/tests/interchain_security/icq.rs @@ -98,6 +98,9 @@ impl TestOverrides for InterchainSecurityIcqTest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } diff --git a/tools/integration-test/src/tests/manual/simulation.rs b/tools/integration-test/src/tests/manual/simulation.rs index 9b04535337..baa6c2e15d 100644 --- a/tools/integration-test/src/tests/manual/simulation.rs +++ b/tools/integration-test/src/tests/manual/simulation.rs @@ -35,6 +35,9 @@ impl TestOverrides for SimulationTest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } } diff --git a/tools/integration-test/src/tests/memo.rs b/tools/integration-test/src/tests/memo.rs index 7755e4d31e..1cdf574166 100644 --- a/tools/integration-test/src/tests/memo.rs +++ b/tools/integration-test/src/tests/memo.rs @@ -40,6 +40,7 @@ impl TestOverrides for MemoTest { chain_config.memo_prefix = self.memo.clone(); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } } @@ -101,6 +102,7 @@ impl TestOverrides for MemoOverwriteTest { chain_config.memo_overwrite = Some(Memo::new(OVERWRITE_MEMO).unwrap()) } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } } diff --git a/tools/integration-test/src/tests/ordered_channel_clear.rs b/tools/integration-test/src/tests/ordered_channel_clear.rs index 472cb8cea8..214ca944f2 100644 --- a/tools/integration-test/src/tests/ordered_channel_clear.rs +++ b/tools/integration-test/src/tests/ordered_channel_clear.rs @@ -57,6 +57,9 @@ impl TestOverrides for OrderedChannelClearTest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } @@ -66,6 +69,7 @@ impl TestOverrides for OrderedChannelClearTest { chain_config.sequential_batch_tx = self.sequential_batch_tx; } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), } } @@ -208,6 +212,9 @@ impl TestOverrides for OrderedChannelClearEqualCLITest { ChainConfig::Penumbra(_) => { panic!("running tests with Penumbra chain not supported") } + ChainConfig::Cardano(_) => { + panic!("running tests with Cardano chain not supported") + } } } @@ -218,6 +225,7 @@ impl TestOverrides for OrderedChannelClearEqualCLITest { chain_config.max_msg_num = MaxMsgNum::new(3).unwrap(); } ChainConfig::Penumbra(_) => panic!("running tests with Penumbra chain not supported"), + ChainConfig::Cardano(_) => panic!("running tests with Cardano chain not supported"), } } diff --git a/tools/integration-test/src/tests/python.rs b/tools/integration-test/src/tests/python.rs index f8ad604561..101fe3f9c5 100644 --- a/tools/integration-test/src/tests/python.rs +++ b/tools/integration-test/src/tests/python.rs @@ -17,6 +17,7 @@ impl TestOverrides for PythonTest { chain_config.key_store_type = Store::Test; } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } } } diff --git a/tools/integration-test/src/tests/sequence_filter.rs b/tools/integration-test/src/tests/sequence_filter.rs index c691ee608c..50c1fa4729 100644 --- a/tools/integration-test/src/tests/sequence_filter.rs +++ b/tools/integration-test/src/tests/sequence_filter.rs @@ -56,6 +56,7 @@ impl TestOverrides for FilterClearOnStartTest { chain_config.excluded_sequences = ExcludedSequences::new(excluded_sequences); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } config.mode.channels.enabled = true; @@ -94,6 +95,7 @@ impl TestOverrides for FilterClearIntervalTest { chain_config.excluded_sequences = ExcludedSequences::new(excluded_sequences); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } config.mode.channels.enabled = true; @@ -254,6 +256,7 @@ impl TestOverrides for StandardRelayingNoFilterTest { chain_config.excluded_sequences = ExcludedSequences::new(excluded_sequences); } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } } config.mode.packets.clear_on_start = true; config.mode.packets.clear_interval = 0; diff --git a/tools/integration-test/src/tests/tendermint/sequential.rs b/tools/integration-test/src/tests/tendermint/sequential.rs index ac737f7c90..7027d88630 100644 --- a/tools/integration-test/src/tests/tendermint/sequential.rs +++ b/tools/integration-test/src/tests/tendermint/sequential.rs @@ -49,6 +49,7 @@ impl TestOverrides for SequentialCommitTest { chain_config_a.sequential_batch_tx = true; } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } }; match &mut config.chains[1] { @@ -57,6 +58,7 @@ impl TestOverrides for SequentialCommitTest { chain_config_b.sequential_batch_tx = false; } ChainConfig::Penumbra(_) => { /* no-op */ } + ChainConfig::Cardano(_) => { /* no-op */ } }; } diff --git a/tools/test-framework/src/util/interchain_security.rs b/tools/test-framework/src/util/interchain_security.rs index 3888e60fbd..0edd985a4f 100644 --- a/tools/test-framework/src/util/interchain_security.rs +++ b/tools/test-framework/src/util/interchain_security.rs @@ -37,6 +37,7 @@ pub fn update_relayer_config_for_consumer_chain(config: &mut Config) { } ChainConfig::CosmosSdk(_) | ChainConfig::Namada(_) => {} ChainConfig::Penumbra(_) => { /* no-op Penumbra does not support CCV */ } + ChainConfig::Cardano(_) => { /* no-op Cardano does not support CCV */ } } } }