diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..789ed5db --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* merge=mergiraf diff --git a/Cargo.lock b/Cargo.lock index b6d66d41..13d46453 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1596,6 +1596,8 @@ dependencies = [ "cb-pbs", "cb-signer", "eyre", + "futures", + "jsonwebtoken", "reqwest", "serde_json", "tempfile", diff --git a/Cargo.toml b/Cargo.toml index 3955e9cc..124e23a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["benches/*", "bin", "crates/*", "examples/builder_log", "examples/da_commit", "examples/status_api", "tests"] +members = ["benches/*", "bin", "crates/cli", "crates/common", "crates/metrics", "crates/pbs", "crates/signer", "examples/builder_log", "examples/da_commit", "examples/status_api", "tests"] resolver = "2" [workspace.package] diff --git a/api/signer-api.yml b/api/signer-api.yml index c876a3a2..69239e38 100644 --- a/api/signer-api.yml +++ b/api/signer-api.yml @@ -60,7 +60,7 @@ paths: /signer/v1/request_signature: post: - summary: Send a signature request + summary: Request a signature for a 32-byte blob of data (typically a hash), signed by the requested BLS or ECDSA key. tags: - Signer security: @@ -81,15 +81,15 @@ paths: type: string enum: [consensus, proxy_bls, proxy_ecdsa] pubkey: - description: Public key of the validator for consensus signatures + description: The 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. $ref: "#/components/schemas/BlsPubkey" proxy: - description: BLS proxy pubkey or ECDSA address for proxy signatures + description: The 48-byte BLS public key (for `proxy_bls` mode) or the 20-byte Ethereum address (for `proxy_ecdsa` mode), with optional `0x` prefix, of the proxy key that you want to request a signature from. oneOf: - $ref: "#/components/schemas/BlsPubkey" - $ref: "#/components/schemas/EcdsaAddress" object_root: - description: The root of the object to be signed + description: The 32-byte data you want to sign, with optional `0x` prefix. type: string format: hex pattern: "^0x[a-fA-F0-9]{64}$" @@ -112,7 +112,7 @@ paths: object_root: "0x3e9f4a78b5c21d64f0b8e3d9a7f5c02b4d1e67a3c8f29b5d6e4a3b1c8f72e6d9" responses: "200": - description: Success + description: A successful signature response. The returned signature is the Merkle root hash of the provided `object_root` field and the requesting module's Signing ID as specified in the Commit-Boost configuration. For details on this signature, see the [signature structure documentation](https://commit-boost.github.io/commit-boost-client/developing/prop-commit-signing.md#structure-of-a-signature). content: application/json: schema: @@ -126,8 +126,45 @@ paths: value: "0xa3ffa9241f78279f1af04644cb8c79c2d8f02bcf0e28e2f186f6dcccac0a869c2be441fda50f0dea895cfce2e53f0989a3ffa9241f78279f1af04644cb8c79c2d8f02bcf0e28e2f186f6dcccac0a869c2be441fda50f0dea895cfce2e53f0989" ProxyEcdsa: value: "0x985b495f49d1b96db3bba3f6c5dd1810950317c10d4c2042bd316f338cdbe74359072e209b85e56ac492092d7860063dd096ca31b4e164ef27e3f8d508e656801c" + "400": + description: | + This can occur in several scenarios: + + - You requested an operation while using the Dirk signer mode instead of locally-managed signer mode, but Dirk doesn't support that operation. + - Something went wrong while preparing your request; the error text will provide more information. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 400 + message: + type: string + example: "Bad request: Invalid pubkey format" + "401": + description: The requesting module did not provide a JWT string in the request's authorization header, or the JWT string was not configured in the signer service's configuration file as belonging to the module. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 401 + message: + type: string + example: "Unauthorized" + "404": - description: Unknown value (pubkey, etc.) + description: You either requested a route that doesn't exist, or you requested a signature from a key that does not exist. content: application/json: schema: @@ -142,8 +179,24 @@ paths: message: type: string example: "Unknown pubkey" + "429": + description: Your module attempted and failed JWT authentication too many times recently, and is currently timed out. It cannot make any more requests until the timeout ends. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 429 + message: + type: string + example: "Too many requests" "500": - description: Internal error + description: Your request was valid, but something went wrong internally that prevented it from being fulfilled. content: application/json: schema: @@ -158,6 +211,22 @@ paths: message: type: string example: "Internal error" + "502": + description: The signer service is running in Dirk signer mode, but Dirk could not be reached. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 502 + message: + type: string + example: "Bad gateway: Dirk signer service is unreachable" /signer/v1/generate_proxy_key: post: diff --git a/benches/pbs/src/main.rs b/benches/pbs/src/main.rs index c013fd61..a4bf76bd 100644 --- a/benches/pbs/src/main.rs +++ b/benches/pbs/src/main.rs @@ -162,6 +162,7 @@ fn get_mock_validator(bench: BenchConfig) -> RelayClient { target_first_request_ms: None, frequency_get_header_ms: None, validator_registration_batch_size: None, + bid_boost: None, }; RelayClient::new(config).unwrap() diff --git a/bin/src/lib.rs b/bin/src/lib.rs index 126847b6..122a35fc 100644 --- a/bin/src/lib.rs +++ b/bin/src/lib.rs @@ -10,6 +10,9 @@ pub mod prelude { load_pbs_custom_config, LogsSettings, StartCommitModuleConfig, PBS_MODULE_NAME, }, pbs::{BuilderEvent, BuilderEventClient, OnBuilderApiEvent}, + signature::{ + verify_proposer_commitment_signature_bls, verify_proposer_commitment_signature_ecdsa, + }, signer::{BlsPublicKey, BlsSignature, EcdsaSignature}, types::Chain, utils::{initialize_tracing_log, utcnow_ms, utcnow_ns, utcnow_sec, utcnow_us}, diff --git a/config.example.toml b/config.example.toml index 8ed5b139..f4612081 100644 --- a/config.example.toml +++ b/config.example.toml @@ -152,10 +152,10 @@ url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09f # - Dirk: a remote Dirk instance # - Local: a local Signer module # More details on the docs (https://commit-boost.github.io/commit-boost-client/get_started/configuration/#signer-module) -# [signer] +[signer] # Docker image to use for the Signer module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/signer:latest -# docker_image = "ghcr.io/commit-boost/signer:latest" +docker_image = "ghcr.io/commit-boost/signer:latest" # Host to bind the Signer API server to # OPTIONAL, DEFAULT: 127.0.0.1 host = "127.0.0.1" @@ -249,6 +249,8 @@ proxy_dir = "./proxies" [[modules]] # Unique ID of the module id = "DA_COMMIT" +# Unique hash that the Signer service will combine with the incoming data in signing requests to generate a signature specific to this module +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" # Type of the module. Supported values: commit, events type = "commit" # Docker image of the module diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 570d5d97..44899f3f 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -6,16 +6,16 @@ use std::{ use cb_common::{ config::{ - CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, BUILDER_PORT_ENV, - BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, DIRK_CA_CERT_DEFAULT, - DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, DIRK_DIR_SECRETS_DEFAULT, - DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, LOGS_DIR_DEFAULT, - LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, - PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, - PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, - SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, - SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, - SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, + CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, ADMIN_JWT_ENV, + BUILDER_PORT_ENV, BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, + DIRK_CA_CERT_DEFAULT, DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, + DIRK_DIR_SECRETS_DEFAULT, DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, + LOGS_DIR_DEFAULT, LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, + PBS_ENDPOINT_ENV, PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, + PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, + PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, + SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, + SIGNER_MODULE_NAME, SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, }, pbs::{BUILDER_V1_API_PATH, GET_STATUS_PATH}, signer::{ProxyStore, SignerLoader}, @@ -86,8 +86,8 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut warnings = Vec::new(); - let needs_signer_module = cb_config.pbs.with_signer || - cb_config.modules.as_ref().is_some_and(|modules| { + let needs_signer_module = cb_config.pbs.with_signer + || cb_config.modules.as_ref().is_some_and(|modules| { modules.iter().any(|module| matches!(module.kind, ModuleKind::Commit)) }); @@ -161,9 +161,10 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re // depends_on let mut module_dependencies = IndexMap::new(); - module_dependencies.insert("cb_signer".into(), DependsCondition { - condition: "service_healthy".into(), - }); + module_dependencies.insert( + "cb_signer".into(), + DependsCondition { condition: "service_healthy".into() }, + ); Service { container_name: Some(module_cid.clone()), @@ -333,6 +334,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), + get_env_same(ADMIN_JWT_ENV), ]); // Bind the signer API to 0.0.0.0 @@ -366,6 +368,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re // write jwts to env envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); + envs.insert(ADMIN_JWT_ENV.into(), random_jwt_secret()); // volumes let mut volumes = vec![config_volume.clone()]; diff --git a/crates/common/src/commit/constants.rs b/crates/common/src/commit/constants.rs index 7c9f948c..ea9cd9bb 100644 --- a/crates/common/src/commit/constants.rs +++ b/crates/common/src/commit/constants.rs @@ -3,3 +3,4 @@ pub const REQUEST_SIGNATURE_PATH: &str = "/signer/v1/request_signature"; pub const GENERATE_PROXY_KEY_PATH: &str = "/signer/v1/generate_proxy_key"; pub const STATUS_PATH: &str = "/status"; pub const RELOAD_PATH: &str = "/reload"; +pub const REVOKE_MODULE_PATH: &str = "/revoke_jwt"; diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index b8843234..5bc3a14b 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -1,21 +1,26 @@ use std::{ + collections::HashMap, fmt::{self, Debug, Display}, str::FromStr, }; use alloy::{ hex, - primitives::{Address, B256}, + primitives::{aliases::B32, Address, B256}, rpc::types::beacon::BlsSignature, }; use derive_more::derive::From; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ - constants::COMMIT_BOOST_DOMAIN, error::BlstErrorWrapper, signature::verify_signed_message, - signer::BlsPublicKey, types::Chain, + config::decode_string_to_map, + constants::COMMIT_BOOST_DOMAIN, + error::BlstErrorWrapper, + signature::verify_signed_message, + signer::BlsPublicKey, + types::{Chain, ModuleId}, }; pub trait ProxyId: AsRef<[u8]> + Debug + Clone + Copy + TreeHash + Display {} @@ -57,7 +62,8 @@ impl SignedProxyDelegation { &self.message.delegator, &self.message, &self.signature, - COMMIT_BOOST_DOMAIN, + None, + &B32::from(COMMIT_BOOST_DOMAIN), ) } } @@ -198,6 +204,31 @@ pub struct GetPubkeysResponse { pub keys: Vec, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReloadRequest { + #[serde(default, deserialize_with = "deserialize_jwt_secrets")] + pub jwt_secrets: Option>, + pub admin_secret: Option, +} + +pub fn deserialize_jwt_secrets<'de, D>( + deserializer: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = Deserialize::deserialize(deserializer)?; + + decode_string_to_map(&raw) + .map(Some) + .map_err(|_| serde::de::Error::custom("Invalid format".to_string())) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RevokeModuleRequest { + pub module_id: ModuleId, +} + /// Map of consensus pubkeys to proxies #[derive(Debug, Clone, Deserialize, Serialize)] pub struct ConsensusProxyMap { @@ -288,7 +319,7 @@ mod tests { let _: SignedProxyDelegationBls = serde_json::from_str(data).unwrap(); - let data = r#"{ + let data = r#"{ "message": { "delegator": "0xa3366b54f28e4bf1461926a3c70cdb0ec432b5c92554ecaae3742d33fb33873990cbed1761c68020e6d3c14d30a22050", "proxy": "0x4ca9939a8311a7cab3dde201b70157285fa81a9d" diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 406f1375..39f3ed53 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -47,6 +47,7 @@ pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT: u32 = 5 * 60; /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; +pub const ADMIN_JWT_ENV: &str = "CB_SIGNER_ADMIN_JWT"; /// Path to json file with plaintext keys (testing only) pub const SIGNER_KEYS_ENV: &str = "CB_SIGNER_LOADER_FILE"; diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index 664fd13e..cd0b2181 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -13,6 +13,7 @@ mod mux; mod pbs; mod signer; mod utils; +mod validation; pub use constants::*; pub use log::*; @@ -22,6 +23,7 @@ pub use mux::*; pub use pbs::*; pub use signer::*; pub use utils::*; +pub use validation::*; #[derive(Debug, Deserialize, Serialize)] pub struct CommitBoostConfig { @@ -41,10 +43,17 @@ pub struct CommitBoostConfig { impl CommitBoostConfig { /// Validate config pub async fn validate(&self) -> Result<()> { + // Validate PBS configuration self.pbs.pbs_config.validate(self.chain).await?; + + // Validate signer configuration if present if let Some(signer) = &self.signer { signer.validate().await?; } + + // Validate module conflicts + validate_no_conflicts(self)?; + Ok(()) } @@ -93,13 +102,10 @@ impl CommitBoostConfig { /// Returns the path to the chain spec file if any pub fn chain_spec_file(path: &PathBuf) -> Option { match load_from_file::<_, ChainConfig>(path) { - Ok(config) => { - if let ChainLoader::Path { path, genesis_time_secs: _ } = config.chain { - Some(path) - } else { - None - } - } + Ok(config) => match config.chain { + ChainLoader::Path { path, .. } => Some(path), + _ => None, + }, Err(_) => None, } } diff --git a/crates/common/src/config/module.rs b/crates/common/src/config/module.rs index 16b089ca..71c4891b 100644 --- a/crates/common/src/config/module.rs +++ b/crates/common/src/config/module.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use alloy::primitives::B256; use eyre::{ContextCompat, Result}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use toml::Table; @@ -37,6 +38,8 @@ pub struct StaticModuleConfig { /// Type of the module #[serde(rename = "type")] pub kind: ModuleKind, + /// Signing ID for the module to use when requesting signatures + pub signing_id: B256, } /// Runtime config to start a module diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index d04b3394..cb970f39 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -58,6 +58,9 @@ pub struct RelayConfig { /// request #[serde(deserialize_with = "empty_string_as_none", default)] pub validator_registration_batch_size: Option, + /// Simple bid boost: 1.05 = 5% increase + #[serde(default)] + pub bid_boost: Option, } fn empty_string_as_none<'de, D>(deserializer: D) -> Result, D::Error> diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 0674d1f7..a397d696 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -4,25 +4,59 @@ use std::{ path::PathBuf, }; +use alloy::primitives::B256; use docker_image::DockerImage; -use eyre::{bail, ensure, OptionExt, Result}; +use eyre::{bail, ensure, Context, OptionExt, Result}; use serde::{Deserialize, Serialize}; use tonic::transport::{Certificate, Identity}; use url::Url; use super::{ - load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, - SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, - SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, - SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, SIGNER_PORT_DEFAULT, + load_optional_env_var, utils::load_env_var, CommitBoostConfig, SIGNER_ENDPOINT_ENV, + SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, + SIGNER_PORT_DEFAULT, }; use crate::{ - config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, + config::{ + load_jwt_secrets, DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV, + }, signer::{ProxyStore, SignerLoader}, types::{Chain, ModuleId}, utils::{default_host, default_u16, default_u32}, }; +/// The signing configuration for a commitment module. +#[derive(Clone, Debug, PartialEq)] +pub struct ModuleSigningConfig { + /// Human-readable name of the module. + pub module_name: ModuleId, + + /// The JWT secret for the module to communicate with the signer module. + pub jwt_secret: String, + + /// A unique identifier for the module, which is used when signing requests + /// to generate signatures for this module. Must be a 32-byte hex string. + /// A leading 0x prefix is optional. + pub signing_id: B256, +} + +impl ModuleSigningConfig { + pub fn validate(&self) -> Result<()> { + // Ensure the JWT secret is not empty + if self.jwt_secret.is_empty() { + bail!("JWT secret cannot be empty"); + } + + // Ensure the signing ID is a valid B256 + if self.signing_id.is_zero() { + bail!("Signing ID cannot be zero"); + } + + Ok(()) + } +} + #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct SignerConfig { @@ -130,7 +164,8 @@ pub struct StartSignerConfig { pub loader: Option, pub store: Option, pub endpoint: SocketAddr, - pub jwts: HashMap, + pub mod_signing_configs: HashMap, + pub admin_secret: String, pub jwt_auth_fail_limit: u32, pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, @@ -140,7 +175,11 @@ impl StartSignerConfig { pub fn load_from_env() -> Result { let config = CommitBoostConfig::from_env_path()?; - let jwts = load_jwt_secrets()?; + let (admin_secret, jwt_secrets) = load_jwt_secrets()?; + + // Load the module signing configs + let mod_signing_configs = load_module_signing_configs(&config, &jwt_secrets) + .wrap_err("Failed to load module signing configs")?; let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; @@ -174,7 +213,8 @@ impl StartSignerConfig { chain: config.chain, loader: Some(loader), endpoint, - jwts, + mod_signing_configs, + admin_secret, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, store, @@ -204,7 +244,8 @@ impl StartSignerConfig { Ok(StartSignerConfig { chain: config.chain, endpoint, - jwts, + mod_signing_configs, + admin_secret, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, loader: None, @@ -232,3 +273,341 @@ impl StartSignerConfig { } } } + +/// Loads the signing configurations for each module defined in the Commit Boost +/// config, coupling them with their JWT secrets and handling any potential +/// duplicates or missing values. +pub fn load_module_signing_configs( + config: &CommitBoostConfig, + jwt_secrets: &HashMap, +) -> Result> { + let mut mod_signing_configs = HashMap::new(); + let modules = config.modules.as_ref().ok_or_eyre("No modules defined in the config")?; + + let mut seen_jwt_secrets = HashMap::new(); + let mut seen_signing_ids = HashMap::new(); + for module in modules { + // Validate the module ID + ensure!(!module.id.is_empty(), "Module ID cannot be empty"); + + // Make sure it hasn't been used yet + ensure!( + !mod_signing_configs.contains_key(&module.id), + "Duplicate module config detected: ID {} is already used", + module.id + ); + + // Make sure the JWT secret is present + let jwt_secret = match jwt_secrets.get(&module.id) { + Some(secret) => secret.clone(), + None => bail!("JWT secret for module {} is missing", module.id), + }; + // Create the module signing config and validate it + let module_signing_config = ModuleSigningConfig { + module_name: module.id.clone(), + jwt_secret, + signing_id: module.signing_id, + }; + module_signing_config + .validate() + .wrap_err(format!("Invalid signing config for module {}", module.id))?; + + // Check for duplicates in JWT secrets and signing IDs + if let Some(existing_module) = + seen_jwt_secrets.insert(module_signing_config.jwt_secret.clone(), &module.id) + { + bail!("Duplicate JWT secret detected for modules {} and {}", existing_module, module.id) + }; + if let Some(existing_module) = + seen_signing_ids.insert(module_signing_config.signing_id, &module.id) + { + bail!("Duplicate signing ID detected for modules {} and {}", existing_module, module.id) + }; + + mod_signing_configs.insert(module.id.clone(), module_signing_config); + } + + Ok(mod_signing_configs) +} + +#[cfg(test)] +mod tests { + use alloy::primitives::{b256, Uint}; + + use super::*; + use crate::config::{LogsSettings, ModuleKind, PbsConfig, StaticModuleConfig, StaticPbsConfig}; + + async fn get_base_config() -> CommitBoostConfig { + CommitBoostConfig { + chain: Chain::Hoodi, + relays: vec![], + pbs: StaticPbsConfig { + docker_image: String::from(""), + pbs_config: PbsConfig { + host: Ipv4Addr::new(127, 0, 0, 1), + port: 0, + relay_check: false, + wait_all_registrations: false, + timeout_get_header_ms: 0, + timeout_get_payload_ms: 0, + timeout_register_validator_ms: 0, + skip_sigverify: false, + min_bid_wei: Uint::<256, 4>::from(0), + late_in_slot_time_ms: 0, + extra_validation_enabled: false, + rpc_url: None, + http_timeout_seconds: 30, + register_validator_retry_limit: 3, + }, + with_signer: true, + }, + muxes: None, + modules: Some(vec![]), + signer: None, + metrics: None, + logs: LogsSettings::default(), + } + } + + async fn create_module_config(id: ModuleId, signing_id: B256) -> StaticModuleConfig { + StaticModuleConfig { + id: id.clone(), + signing_id, + docker_image: String::from(""), + env: None, + env_file: None, + kind: ModuleKind::Commit, + } + } + + #[tokio::test] + async fn test_good_config() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), second_signing_id).await, + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "another-secret".to_string()), + ]); + + // Load the mod signing configuration + let mod_signing_configs = load_module_signing_configs(&cfg, &jwts) + .wrap_err("Failed to load module signing configs")?; + assert!(mod_signing_configs.len() == 2, "Expected 2 mod signing configurations"); + + // Check the first module + let module_1 = mod_signing_configs + .get(&first_module_id) + .unwrap_or_else(|| panic!("Missing '{first_module_id}' in mod signing configs")); + assert_eq!(module_1.module_name, first_module_id, "Module name mismatch for 'test_module'"); + assert_eq!( + module_1.jwt_secret, jwts[&first_module_id], + "JWT secret mismatch for '{first_module_id}'" + ); + assert_eq!( + module_1.signing_id, first_signing_id, + "Signing ID mismatch for '{first_module_id}'" + ); + + // Check the second module + let module_2 = mod_signing_configs + .get(&second_module_id) + .unwrap_or_else(|| panic!("Missing '{second_module_id}' in mod signing configs")); + assert_eq!( + module_2.module_name, second_module_id, + "Module name mismatch for '{second_module_id}'" + ); + assert_eq!( + module_2.jwt_secret, jwts[&second_module_id], + "JWT secret mismatch for '{second_module_id}'" + ); + assert_eq!( + module_2.signing_id, second_signing_id, + "Signing ID mismatch for '{second_module_id}'" + ); + + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_module_names() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(first_module_id.clone(), second_signing_id).await, /* Duplicate + * module + * name */ + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "another-secret".to_string()), + ]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to duplicate module names"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!("Duplicate module config detected: ID {first_module_id} is already used") + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_jwt_secrets() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), second_signing_id).await, + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "supersecret".to_string()), /* Duplicate JWT secret */ + ]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to duplicate JWT secrets"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!( + "Duplicate JWT secret detected for modules {first_module_id} and {second_module_id}", + ) + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_signing_ids() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + + cfg.modules = Some(vec![ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), first_signing_id).await, /* Duplicate signing ID */ + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "another-secret".to_string()), + ]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to duplicate signing IDs"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!( + "Duplicate signing ID detected for modules {first_module_id} and {second_module_id}", + ) + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_missing_jwt_secret() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), second_signing_id).await, + ]); + + let jwts = HashMap::from([(second_module_id.clone(), "another-secret".to_string())]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to missing JWT secret"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!("JWT secret for module {first_module_id} is missing") + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_empty_jwt_secret() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + cfg.modules = + Some(vec![create_module_config(first_module_id.clone(), first_signing_id).await]); + + let jwts = HashMap::from([(first_module_id.clone(), "".to_string())]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to empty JWT secret"); + if let Err(e) = result { + assert!(format!("{:?}", e).contains("JWT secret cannot be empty")); + } + + Ok(()) + } + + #[tokio::test] + async fn test_zero_signing_id() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0000000000000000000000000000000000000000000000000000000000000000"); + + cfg.modules = + Some(vec![create_module_config(first_module_id.clone(), first_signing_id).await]); + + let jwts = HashMap::from([(first_module_id.clone(), "supersecret".to_string())]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to zero signing ID"); + if let Err(e) = result { + assert!(format!("{:?}", e).contains("Signing ID cannot be zero")); + } + Ok(()) + } +} diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 13784316..5e8e3a65 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -4,8 +4,11 @@ use alloy::rpc::types::beacon::BlsPublicKey; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; -use super::JWTS_ENV; -use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId, utils::read_chunked_body_with_max}; +use crate::{ + config::{ADMIN_JWT_ENV, JWTS_ENV, MUXER_HTTP_MAX_LENGTH}, + types::ModuleId, + utils::read_chunked_body_with_max, +}; pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) @@ -26,9 +29,10 @@ pub fn load_file_from_env(env: &str) -> Result { } /// Loads a map of module id -> jwt secret from a json env -pub fn load_jwt_secrets() -> Result> { +pub fn load_jwt_secrets() -> Result<(String, HashMap)> { + let admin_jwt = std::env::var(ADMIN_JWT_ENV).wrap_err(format!("{ADMIN_JWT_ENV} is not set"))?; let jwt_secrets = std::env::var(JWTS_ENV).wrap_err(format!("{JWTS_ENV} is not set"))?; - decode_string_to_map(&jwt_secrets) + decode_string_to_map(&jwt_secrets).map(|secrets| (admin_jwt, secrets)) } /// Reads an HTTP response safely, erroring out if it failed or if the body is @@ -71,7 +75,7 @@ pub fn remove_duplicate_keys(keys: Vec) -> Vec { unique_keys } -fn decode_string_to_map(raw: &str) -> Result> { +pub fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() .split(',') @@ -89,6 +93,7 @@ fn decode_string_to_map(raw: &str) -> Result> { mod tests { use super::*; + /// TODO: This was only used by the old JWT loader, can it be removed now? #[test] fn test_decode_string_to_map() { let raw = " KEY=VALUE , KEY2=value2 "; diff --git a/crates/common/src/config/validation.rs b/crates/common/src/config/validation.rs new file mode 100644 index 00000000..aeb85594 --- /dev/null +++ b/crates/common/src/config/validation.rs @@ -0,0 +1,82 @@ +use eyre::{bail, Result}; +use std::collections::HashSet; + +use super::CommitBoostConfig; + +/// Validate configuration for module conflicts +pub fn validate_no_conflicts(config: &CommitBoostConfig) -> Result<()> { + let mut module_ids = HashSet::new(); + let mut ports = HashSet::new(); + + // Check module IDs for uniqueness + if let Some(modules) = &config.modules { + for module in modules { + if !module_ids.insert(&module.id) { + bail!("Duplicate module ID: {}", module.id); + } + } + + // Check module compatibility + for i in 0..modules.len() { + for j in i + 1..modules.len() { + let module_a = modules[i].id.as_str(); + let module_b = modules[j].id.as_str(); + + // Basic incompatibility checks + if is_incompatible(module_a, module_b) { + bail!("Incompatible modules: '{}' and '{}'", module_a, module_b); + } + } + } + } + + // Check port conflicts + if !ports.insert(config.pbs.pbs_config.port) { + bail!("Port conflict: PBS port {} already in use", config.pbs.pbs_config.port); + } + + if let Some(signer) = &config.signer { + if !ports.insert(signer.port) { + bail!("Port conflict: Signer port {} already in use", signer.port); + } + } + + if let Some(metrics) = &config.metrics { + if !ports.insert(metrics.start_port) { + bail!("Port conflict: Metrics port {} already in use", metrics.start_port); + } + } + + Ok(()) +} + +/// Check if two modules are known to be incompatible +fn is_incompatible(module_a: &str, module_b: &str) -> bool { + let incompatible_pairs = + [("pbs_relay_a", "pbs_relay_b"), ("custom_pbs", "pbs"), ("duplicate_signer", "signer")]; + + for (a, b) in &incompatible_pairs { + if (module_a == *a && module_b == *b) || (module_a == *b && module_b == *a) { + return true; + } + } + + false +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_incompatible() { + assert!(is_incompatible("pbs_relay_a", "pbs_relay_b")); + assert!(is_incompatible("pbs_relay_b", "pbs_relay_a")); // Symmetric + assert!(is_incompatible("custom_pbs", "pbs")); + assert!(is_incompatible("pbs", "custom_pbs")); // Symmetric + + // Compatible pairs should return false + assert!(!is_incompatible("pbs", "signer")); + assert!(!is_incompatible("signer", "metrics")); + } +} diff --git a/crates/common/src/pbs/types/get_header.rs b/crates/common/src/pbs/types/get_header.rs index 18d5361f..006a2689 100644 --- a/crates/common/src/pbs/types/get_header.rs +++ b/crates/common/src/pbs/types/get_header.rs @@ -57,6 +57,12 @@ impl GetHeaderResponse { } } + pub fn set_value(&mut self, value: U256) { + match self { + VersionedResponse::Electra(data) => data.message.value = value, + } + } + pub fn transactions_root(&self) -> B256 { match self { GetHeaderResponse::Electra(data) => data.message.header.transactions_root, @@ -94,7 +100,7 @@ pub struct ExecutionPayloadHeaderMessageElectra { #[cfg(test)] mod tests { - use alloy::primitives::U256; + use alloy::primitives::{aliases::B32, U256}; use super::*; use crate::{ @@ -176,11 +182,62 @@ mod tests { &parsed.message.pubkey, &parsed.message, &parsed.signature, - APPLICATION_BUILDER_DOMAIN + None, + &B32::from(APPLICATION_BUILDER_DOMAIN) ) .is_ok()) } + #[test] + fn test_set_value() { + let data = r#"{ + "version": "electra", + "data": { + "message": { + "header": { + "parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09", + "state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "block_number": "1", + "gas_limit": "1", + "gas_used": "1", + "timestamp": "1", + "extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "base_fee_per_gas": "1", + "blob_gas_used": "1", + "excess_blob_gas": "1", + "block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", + "withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2" + }, + "blob_kzg_commitments": [], + "execution_requests": { + "deposits": [], + "withdrawals": [], + "consolidations": [] + }, + "value": "100", + "pubkey": "0x86b1cea87eed94cad99244356abcd83995947670f0553a1d3fe83c4a9e8116f4891fb1c51db232e736be1cb3327164bc" + }, + "signature": "0x8addecd35e0ffe27b74e41aff2836527e6fea0efdb46dbb0f7436f5087d0cd5665bd16d924f640fc928cdba0173971e400dc603dbd6310bfb6f249c1554b044fe06ae4cf5d5f452f3ff19d9d130809b34d3d3abdca3d192c839ba2ac91129c15" + } + }"#; + + let mut parsed: GetHeaderResponse = serde_json::from_str(data).unwrap(); + + // Check initial value + assert_eq!(parsed.value(), U256::from(100)); + + // Set new value + parsed.set_value(U256::from(105)); + + // Verify value was updated + assert_eq!(parsed.value(), U256::from(105)); + } + #[test] // this is dummy data generated with https://github.com/attestantio/go-builder-client fn test_signed_execution_payload_header_ssz() { diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index e51e2291..cd960031 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -1,12 +1,15 @@ -use alloy::rpc::types::beacon::{constants::BLS_DST_SIG, BlsPublicKey, BlsSignature}; +use alloy::{ + primitives::{aliases::B32, Address, B256}, + rpc::types::beacon::{constants::BLS_DST_SIG, BlsPublicKey, BlsSignature}, +}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ constants::{COMMIT_BOOST_DOMAIN, GENESIS_VALIDATORS_ROOT}, error::BlstErrorWrapper, - signer::{verify_bls_signature, BlsSecretKey}, - types::Chain, + signer::{verify_bls_signature, verify_ecdsa_signature, BlsSecretKey, EcdsaSignature}, + types::{self, Chain}, }; pub fn sign_message(secret_key: &BlsSecretKey, msg: &[u8]) -> BlsSignature { @@ -14,21 +17,29 @@ pub fn sign_message(secret_key: &BlsSecretKey, msg: &[u8]) -> BlsSignature { BlsSignature::from_slice(&signature) } -pub fn compute_signing_root(object_root: [u8; 32], signing_domain: [u8; 32]) -> [u8; 32] { - #[derive(Default, Debug, TreeHash)] - struct SigningData { - object_root: [u8; 32], - signing_domain: [u8; 32], +pub fn compute_prop_commit_signing_root( + chain: Chain, + object_root: &B256, + module_signing_id: Option<&B256>, + domain_mask: &B32, +) -> B256 { + let domain = compute_domain(chain, domain_mask); + match module_signing_id { + Some(id) => { + let object_root = + types::PropCommitSigningInfo { data: *object_root, module_signing_id: *id } + .tree_hash_root(); + types::SigningData { object_root, signing_domain: domain }.tree_hash_root() + } + None => types::SigningData { object_root: *object_root, signing_domain: domain } + .tree_hash_root(), } - - let signing_data = SigningData { object_root, signing_domain }; - signing_data.tree_hash_root().0 } // NOTE: this currently works only for builder domain signatures and // verifications // ref: https://github.com/ralexstokes/ethereum-consensus/blob/cf3c404043230559660810bc0c9d6d5a8498d819/ethereum-consensus/src/builder/mod.rs#L26-L29 -pub fn compute_domain(chain: Chain, domain_mask: [u8; 4]) -> [u8; 32] { +pub fn compute_domain(chain: Chain, domain_mask: &B32) -> B256 { #[derive(Debug, TreeHash)] struct ForkData { fork_version: [u8; 4], @@ -36,7 +47,7 @@ pub fn compute_domain(chain: Chain, domain_mask: [u8; 4]) -> [u8; 32] { } let mut domain = [0u8; 32]; - domain[..4].copy_from_slice(&domain_mask); + domain[..4].copy_from_slice(&domain_mask.0); let fork_version = chain.genesis_fork_version(); let fd = ForkData { fork_version, genesis_validators_root: GENESIS_VALIDATORS_ROOT }; @@ -44,7 +55,7 @@ pub fn compute_domain(chain: Chain, domain_mask: [u8; 4]) -> [u8; 32] { domain[4..].copy_from_slice(&fork_data_root[..28]); - domain + B256::from(domain) } pub fn verify_signed_message( @@ -52,69 +63,114 @@ pub fn verify_signed_message( pubkey: &BlsPublicKey, msg: &T, signature: &BlsSignature, - domain_mask: [u8; 4], + module_signing_id: Option<&B256>, + domain_mask: &B32, ) -> Result<(), BlstErrorWrapper> { - let domain = compute_domain(chain, domain_mask); - let signing_root = compute_signing_root(msg.tree_hash_root().0, domain); - - verify_bls_signature(pubkey, &signing_root, signature) + let signing_root = compute_prop_commit_signing_root( + chain, + &msg.tree_hash_root(), + module_signing_id, + domain_mask, + ); + verify_bls_signature(pubkey, signing_root.as_slice(), signature) } +/// Signs a message with the Beacon builder domain. pub fn sign_builder_message( chain: Chain, secret_key: &BlsSecretKey, msg: &impl TreeHash, ) -> BlsSignature { - sign_builder_root(chain, secret_key, msg.tree_hash_root().0) + sign_builder_root(chain, secret_key, &msg.tree_hash_root()) } pub fn sign_builder_root( chain: Chain, secret_key: &BlsSecretKey, - object_root: [u8; 32], + object_root: &B256, ) -> BlsSignature { - let domain = chain.builder_domain(); - let signing_root = compute_signing_root(object_root, domain); - sign_message(secret_key, &signing_root) + let signing_domain = chain.builder_domain(); + let signing_data = + types::SigningData { object_root: object_root.tree_hash_root(), signing_domain }; + let signing_root = signing_data.tree_hash_root(); + sign_message(secret_key, signing_root.as_slice()) } pub fn sign_commit_boost_root( chain: Chain, secret_key: &BlsSecretKey, - object_root: [u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> BlsSignature { - let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(object_root, domain); - sign_message(secret_key, &signing_root) + let signing_root = compute_prop_commit_signing_root( + chain, + object_root, + module_signing_id, + &B32::from(COMMIT_BOOST_DOMAIN), + ); + sign_message(secret_key, signing_root.as_slice()) } +// ============================== +// === Signature Verification === +// ============================== + +/// Verifies that a proposer commitment signature was generated by the given BLS +/// key for the provided message, chain ID, and module signing ID. +pub fn verify_proposer_commitment_signature_bls( + chain: Chain, + pubkey: &BlsPublicKey, + msg: &impl TreeHash, + signature: &BlsSignature, + module_signing_id: &B256, +) -> Result<(), BlstErrorWrapper> { + let signing_domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: msg.tree_hash_root(), + module_signing_id: *module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); + verify_bls_signature(pubkey, signing_root.as_slice(), signature) +} + +/// Verifies that a proposer commitment signature was generated by the given +/// ECDSA key for the provided message, chain ID, and module signing ID. +pub fn verify_proposer_commitment_signature_ecdsa( + chain: Chain, + address: &Address, + msg: &impl TreeHash, + signature: &EcdsaSignature, + module_signing_id: &B256, +) -> Result<(), eyre::Report> { + let object_root = msg.tree_hash_root(); + let signing_domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = + types::PropCommitSigningInfo { data: object_root, module_signing_id: *module_signing_id } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); + verify_ecdsa_signature(address, &signing_root, signature) +} + +// =============== +// === Testing === +// =============== + #[cfg(test)] mod tests { + use alloy::primitives::aliases::B32; + use super::compute_domain; use crate::{constants::APPLICATION_BUILDER_DOMAIN, types::Chain}; #[test] fn test_builder_domains() { - assert_eq!( - compute_domain(Chain::Mainnet, APPLICATION_BUILDER_DOMAIN), - Chain::Mainnet.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Holesky, APPLICATION_BUILDER_DOMAIN), - Chain::Holesky.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Sepolia, APPLICATION_BUILDER_DOMAIN), - Chain::Sepolia.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Helder, APPLICATION_BUILDER_DOMAIN), - Chain::Helder.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Hoodi, APPLICATION_BUILDER_DOMAIN), - Chain::Hoodi.builder_domain() - ); + let domain = &B32::from(APPLICATION_BUILDER_DOMAIN); + assert_eq!(compute_domain(Chain::Mainnet, domain), Chain::Mainnet.builder_domain()); + assert_eq!(compute_domain(Chain::Holesky, domain), Chain::Holesky.builder_domain()); + assert_eq!(compute_domain(Chain::Sepolia, domain), Chain::Sepolia.builder_domain()); + assert_eq!(compute_domain(Chain::Helder, domain), Chain::Helder.builder_domain()); + assert_eq!(compute_domain(Chain::Hoodi, domain), Chain::Hoodi.builder_domain()); } } diff --git a/crates/common/src/signer/loader.rs b/crates/common/src/signer/loader.rs index 4fb9adb1..9fc1b3f2 100644 --- a/crates/common/src/signer/loader.rs +++ b/crates/common/src/signer/loader.rs @@ -123,13 +123,13 @@ fn load_from_lighthouse_format( .into_par_iter() .filter_map(|path| { if !path.is_dir() { - return None + return None; } let maybe_pubkey = path.file_name().and_then(|d| d.to_str())?; let Ok(pubkey) = BlsPublicKey::from_hex(maybe_pubkey) else { warn!("Invalid pubkey: {}", maybe_pubkey); - return None + return None; }; let ks_path = keys_path.join(maybe_pubkey).join("voting-keystore.json"); @@ -286,13 +286,13 @@ fn load_from_nimbus_format( .into_par_iter() .filter_map(|path| { if !path.is_dir() { - return None + return None; } let maybe_pubkey = path.file_name().and_then(|d| d.to_str())?; let Ok(pubkey) = BlsPublicKey::from_hex(maybe_pubkey) else { warn!("Invalid pubkey: {}", maybe_pubkey); - return None + return None; }; let ks_path = keys_path.join(maybe_pubkey).join("keystore.json"); diff --git a/crates/common/src/signer/schemes/bls.rs b/crates/common/src/signer/schemes/bls.rs index f133b2bc..0441a6e0 100644 --- a/crates/common/src/signer/schemes/bls.rs +++ b/crates/common/src/signer/schemes/bls.rs @@ -1,5 +1,5 @@ -use alloy::rpc::types::beacon::constants::BLS_DST_SIG; pub use alloy::rpc::types::beacon::BlsSignature; +use alloy::{primitives::B256, rpc::types::beacon::constants::BLS_DST_SIG}; use blst::BLST_ERROR; use tree_hash::TreeHash; @@ -32,20 +32,32 @@ impl BlsSigner { } } - pub fn secret(&self) -> [u8; 32] { + pub fn secret(&self) -> B256 { match self { - BlsSigner::Local(secret) => secret.clone().to_bytes(), + BlsSigner::Local(secret) => B256::from(secret.clone().to_bytes()), } } - pub async fn sign(&self, chain: Chain, object_root: [u8; 32]) -> BlsSignature { + pub async fn sign( + &self, + chain: Chain, + object_root: &B256, + module_signing_id: Option<&B256>, + ) -> BlsSignature { match self { - BlsSigner::Local(sk) => sign_commit_boost_root(chain, sk, object_root), + BlsSigner::Local(sk) => { + sign_commit_boost_root(chain, sk, object_root, module_signing_id) + } } } - pub async fn sign_msg(&self, chain: Chain, msg: &impl TreeHash) -> BlsSignature { - self.sign(chain, msg.tree_hash_root().0).await + pub async fn sign_msg( + &self, + chain: Chain, + msg: &impl TreeHash, + module_signing_id: Option<&B256>, + ) -> BlsSignature { + self.sign(chain, &msg.tree_hash_root(), module_signing_id).await } } @@ -56,11 +68,7 @@ pub fn random_secret() -> BlsSecretKey { let mut ikm = [0u8; 32]; rng.fill_bytes(&mut ikm); - match BlsSecretKey::key_gen(&ikm, &[]) { - Ok(key) => key, - // Key material is always valid (32 `u8`s), so `key_gen` can't return Err. - Err(_) => unreachable!(), - } + BlsSecretKey::key_gen(&ikm, &[]).unwrap_or_else(|_| unreachable!()) } pub fn verify_bls_signature( diff --git a/crates/common/src/signer/schemes/ecdsa.rs b/crates/common/src/signer/schemes/ecdsa.rs index 612df5e3..907340f1 100644 --- a/crates/common/src/signer/schemes/ecdsa.rs +++ b/crates/common/src/signer/schemes/ecdsa.rs @@ -1,7 +1,7 @@ use std::{ops::Deref, str::FromStr}; use alloy::{ - primitives::{Address, PrimitiveSignature}, + primitives::{aliases::B32, Address, PrimitiveSignature, B256}, signers::{local::PrivateKeySigner, SignerSync}, }; use eyre::ensure; @@ -9,8 +9,8 @@ use tree_hash::TreeHash; use crate::{ constants::COMMIT_BOOST_DOMAIN, - signature::{compute_domain, compute_signing_root}, - types::Chain, + signature::compute_domain, + types::{self, Chain}, }; #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -86,32 +86,44 @@ impl EcdsaSigner { pub async fn sign( &self, chain: Chain, - object_root: [u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { match self { EcdsaSigner::Local(sk) => { - let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(object_root, domain).into(); + let signing_domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_root = match module_signing_id { + Some(id) => { + let object_root = types::PropCommitSigningInfo { + data: *object_root, + module_signing_id: *id, + } + .tree_hash_root(); + types::SigningData { object_root, signing_domain }.tree_hash_root() + } + None => types::SigningData { object_root: *object_root, signing_domain } + .tree_hash_root(), + }; sk.sign_hash_sync(&signing_root).map(EcdsaSignature::from) } } } - pub async fn sign_msg( &self, chain: Chain, msg: &impl TreeHash, + module_signing_id: Option<&B256>, ) -> Result { - self.sign(chain, msg.tree_hash_root().0).await + self.sign(chain, &msg.tree_hash_root(), module_signing_id).await } } pub fn verify_ecdsa_signature( address: &Address, - msg: &[u8; 32], + msg: &B256, signature: &EcdsaSignature, ) -> eyre::Result<()> { - let recovered = signature.recover_address_from_prehash(msg.into())?; + let recovered = signature.recover_address_from_prehash(msg)?; ensure!(recovered == *address, "invalid signature"); Ok(()) } @@ -124,15 +136,16 @@ mod test { use super::*; #[tokio::test] - async fn test_ecdsa_signer() { + async fn test_ecdsa_signer_noncommit() { let pk = bytes!("88bcd6672d95bcba0d52a3146494ed4d37675af4ed2206905eb161aa99a6c0d1"); let signer = EcdsaSigner::new_from_bytes(&pk).unwrap(); - let object_root = [1; 32]; - let signature = signer.sign(Chain::Holesky, object_root).await.unwrap(); + let object_root = B256::from([1; 32]); + let signature = signer.sign(Chain::Holesky, &object_root, None).await.unwrap(); - let domain = compute_domain(Chain::Holesky, COMMIT_BOOST_DOMAIN); - let msg = compute_signing_root(object_root, domain); + let domain = compute_domain(Chain::Holesky, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_data = types::SigningData { object_root, signing_domain: domain }; + let msg = signing_data.tree_hash_root(); assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); @@ -140,4 +153,26 @@ mod test { let verified = verify_ecdsa_signature(&address, &msg, &signature); assert!(verified.is_ok()); } + + #[tokio::test] + async fn test_ecdsa_signer_prop_commit() { + let pk = bytes!("88bcd6672d95bcba0d52a3146494ed4d37675af4ed2206905eb161aa99a6c0d1"); + let signer = EcdsaSigner::new_from_bytes(&pk).unwrap(); + + let object_root = B256::from([1; 32]); + let module_signing_id = B256::from([2; 32]); + let signature = + signer.sign(Chain::Hoodi, &object_root, Some(&module_signing_id)).await.unwrap(); + + let signing_domain = compute_domain(Chain::Hoodi, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = + types::PropCommitSigningInfo { data: object_root, module_signing_id }.tree_hash_root(); + let msg = types::SigningData { object_root, signing_domain }.tree_hash_root(); + + assert_eq!(msg, hex!("8cd49ccf2f9b0297796ff96ce5f7c5d26e20a59d0032ee2ad6249dcd9682b808")); + + let address = signer.address(); + let verified = verify_ecdsa_signature(&address, &msg, &signature); + assert!(verified.is_ok()); + } } diff --git a/crates/common/src/signer/store.rs b/crates/common/src/signer/store.rs index 479a4016..19743bbd 100644 --- a/crates/common/src/signer/store.rs +++ b/crates/common/src/signer/store.rs @@ -307,8 +307,8 @@ impl ProxyStore { let entry = entry?; let path = entry.path(); - if !path.is_file() || - path.extension().is_none_or(|ext| ext != "json") + if !path.is_file() + || path.extension().is_none_or(|ext| ext != "json") { continue; } @@ -363,8 +363,8 @@ impl ProxyStore { let entry = entry?; let path = entry.path(); - if !path.is_file() || - path.extension().is_none_or(|ext| ext != "json") + if !path.is_file() + || path.extension().is_none_or(|ext| ext != "json") { continue; } @@ -532,7 +532,8 @@ mod test { delegator: consensus_signer.pubkey(), proxy: proxy_signer.pubkey(), }; - let signature = consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0).await; + let signature = + consensus_signer.sign(Chain::Mainnet, &message.tree_hash_root(), None).await; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; @@ -645,7 +646,8 @@ mod test { delegator: consensus_signer.pubkey(), proxy: proxy_signer.pubkey(), }; - let signature = consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0).await; + let signature = + consensus_signer.sign(Chain::Mainnet, &message.tree_hash_root(), None).await; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index 5293a789..4bc28b7d 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -1,9 +1,10 @@ use std::path::PathBuf; -use alloy::primitives::{hex, Bytes}; +use alloy::primitives::{aliases::B32, hex, Bytes, B256}; use derive_more::{Deref, Display, From, Into}; use eyre::{bail, Context}; use serde::{Deserialize, Serialize}; +use tree_hash_derive::TreeHash; use crate::{constants::APPLICATION_BUILDER_DOMAIN, signature::compute_domain}; @@ -23,6 +24,12 @@ pub struct JwtClaims { pub module: String, } +#[derive(Debug, Serialize, Deserialize)] +pub struct JwtAdmin { + pub exp: u64, + pub admin: bool, +} + #[derive(Clone, Copy, PartialEq, Eq)] pub enum Chain { Mainnet, @@ -78,14 +85,14 @@ impl Chain { } } - pub fn builder_domain(&self) -> [u8; 32] { + pub fn builder_domain(&self) -> B256 { match self { Chain::Mainnet => KnownChain::Mainnet.builder_domain(), Chain::Holesky => KnownChain::Holesky.builder_domain(), Chain::Sepolia => KnownChain::Sepolia.builder_domain(), Chain::Helder => KnownChain::Helder.builder_domain(), Chain::Hoodi => KnownChain::Hoodi.builder_domain(), - Chain::Custom { .. } => compute_domain(*self, APPLICATION_BUILDER_DOMAIN), + Chain::Custom { .. } => compute_domain(*self, &B32::from(APPLICATION_BUILDER_DOMAIN)), } } @@ -149,28 +156,28 @@ impl KnownChain { } } - pub fn builder_domain(&self) -> [u8; 32] { + pub fn builder_domain(&self) -> B256 { match self { - KnownChain::Mainnet => [ + KnownChain::Mainnet => B256::from([ 0, 0, 0, 1, 245, 165, 253, 66, 209, 106, 32, 48, 39, 152, 239, 110, 211, 9, 151, 155, 67, 0, 61, 35, 32, 217, 240, 232, 234, 152, 49, 169, - ], - KnownChain::Holesky => [ + ]), + KnownChain::Holesky => B256::from([ 0, 0, 0, 1, 91, 131, 162, 55, 89, 197, 96, 178, 208, 198, 69, 118, 225, 220, 252, 52, 234, 148, 196, 152, 143, 62, 13, 159, 119, 240, 83, 135, - ], - KnownChain::Sepolia => [ + ]), + KnownChain::Sepolia => B256::from([ 0, 0, 0, 1, 211, 1, 7, 120, 205, 8, 238, 81, 75, 8, 254, 103, 182, 197, 3, 181, 16, 152, 122, 76, 228, 63, 66, 48, 109, 151, 198, 124, - ], - KnownChain::Helder => [ + ]), + KnownChain::Helder => B256::from([ 0, 0, 0, 1, 148, 196, 26, 244, 132, 255, 247, 150, 73, 105, 224, 189, 217, 34, 248, 45, 255, 15, 75, 232, 122, 96, 208, 102, 76, 201, 209, 255, - ], - KnownChain::Hoodi => [ + ]), + KnownChain::Hoodi => B256::from([ 0, 0, 0, 1, 113, 145, 3, 81, 30, 250, 79, 19, 98, 255, 42, 80, 153, 108, 204, 243, 41, 204, 132, 203, 65, 12, 94, 92, 125, 53, 29, 3, - ], + ]), } } @@ -196,11 +203,11 @@ impl KnownChain { pub fn slot_time_sec(&self) -> u64 { match self { - KnownChain::Mainnet | - KnownChain::Holesky | - KnownChain::Sepolia | - KnownChain::Helder | - KnownChain::Hoodi => 12, + KnownChain::Mainnet + | KnownChain::Holesky + | KnownChain::Sepolia + | KnownChain::Helder + | KnownChain::Hoodi => 12, } } } @@ -283,6 +290,22 @@ impl<'de> Deserialize<'de> for Chain { } } +/// Structure for signatures used in Beacon chain operations +#[derive(Default, Debug, TreeHash)] +pub struct SigningData { + pub object_root: B256, + pub signing_domain: B256, +} + +/// Structure for signatures used for proposer commitments in Commit Boost. +/// The signing root of this struct must be used as the object_root of a +/// SigningData for signatures. +#[derive(Default, Debug, TreeHash)] +pub struct PropCommitSigningInfo { + pub data: B256, + pub module_signing_id: B256, +} + /// Returns seconds_per_slot and genesis_fork_version from a spec, such as /// returned by /eth/v1/config/spec ref: https://ethereum.github.io/beacon-APIs/#/Config/getSpec /// Try to load two formats: @@ -359,11 +382,14 @@ mod tests { fn test_load_custom() { let s = r#"chain = { genesis_time_secs = 1, slot_time_secs = 2, genesis_fork_version = "0x01000000" }"#; let decoded: MockConfig = toml::from_str(s).unwrap(); - assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: 1, - slot_time_secs: 2, - genesis_fork_version: [1, 0, 0, 0] - }) + assert_eq!( + decoded.chain, + Chain::Custom { + genesis_time_secs: 1, + slot_time_secs: 2, + genesis_fork_version: [1, 0, 0, 0] + } + ) } #[test] @@ -400,11 +426,14 @@ mod tests { let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain.slot_time_sec(), KnownChain::Holesky.slot_time_sec()); - assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: 1, - slot_time_secs: KnownChain::Holesky.slot_time_sec(), - genesis_fork_version: KnownChain::Holesky.genesis_fork_version() - }) + assert_eq!( + decoded.chain, + Chain::Custom { + genesis_time_secs: 1, + slot_time_secs: KnownChain::Holesky.slot_time_sec(), + genesis_fork_version: KnownChain::Holesky.genesis_fork_version() + } + ) } #[test] @@ -420,11 +449,14 @@ mod tests { let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain.slot_time_sec(), KnownChain::Helder.slot_time_sec()); - assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: 1, - slot_time_secs: KnownChain::Sepolia.slot_time_sec(), - genesis_fork_version: KnownChain::Sepolia.genesis_fork_version() - }) + assert_eq!( + decoded.chain, + Chain::Custom { + genesis_time_secs: 1, + slot_time_secs: KnownChain::Sepolia.slot_time_sec(), + genesis_fork_version: KnownChain::Sepolia.genesis_fork_version() + } + ) } #[test] @@ -440,11 +472,14 @@ mod tests { let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain.slot_time_sec(), KnownChain::Hoodi.slot_time_sec()); - assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: 1, - slot_time_secs: KnownChain::Hoodi.slot_time_sec(), - genesis_fork_version: KnownChain::Hoodi.genesis_fork_version() - }) + assert_eq!( + decoded.chain, + Chain::Custom { + genesis_time_secs: 1, + slot_time_secs: KnownChain::Hoodi.slot_time_sec(), + genesis_fork_version: KnownChain::Hoodi.genesis_fork_version() + } + ) } #[test] @@ -460,10 +495,13 @@ mod tests { let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain.slot_time_sec(), KnownChain::Helder.slot_time_sec()); - assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: 1, - slot_time_secs: KnownChain::Helder.slot_time_sec(), - genesis_fork_version: KnownChain::Helder.genesis_fork_version() - }) + assert_eq!( + decoded.chain, + Chain::Custom { + genesis_time_secs: 1, + slot_time_secs: KnownChain::Helder.slot_time_sec(), + genesis_fork_version: KnownChain::Helder.genesis_fork_version() + } + ) } } diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 6d39465c..3f658c92 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -30,7 +30,7 @@ use crate::{ config::LogsSettings, constants::SIGNER_JWT_EXPIRATION, pbs::HEADER_VERSION_VALUE, - types::{Chain, Jwt, JwtClaims, ModuleId}, + types::{Chain, Jwt, JwtAdmin, JwtClaims, ModuleId}, }; const MILLIS_PER_SECOND: u64 = 1_000; @@ -405,6 +405,24 @@ pub fn validate_jwt(jwt: Jwt, secret: &str) -> eyre::Result<()> { .map_err(From::from) } +/// Validate an admin JWT with the given secret +pub fn validate_admin_jwt(jwt: Jwt, secret: &str) -> eyre::Result<()> { + let mut validation = jsonwebtoken::Validation::default(); + validation.leeway = 10; + + let token = jsonwebtoken::decode::( + jwt.as_str(), + &jsonwebtoken::DecodingKey::from_secret(secret.as_ref()), + &validation, + )?; + + if token.claims.admin { + Ok(()) + } else { + eyre::bail!("Token is not admin") + } +} + /// Generates a random string pub fn random_jwt_secret() -> String { rand::rng().sample_iter(&Alphanumeric).take(32).map(char::from).collect() diff --git a/crates/pbs/src/lib.rs b/crates/pbs/src/lib.rs index 8b4afdcf..73440bc3 100644 --- a/crates/pbs/src/lib.rs +++ b/crates/pbs/src/lib.rs @@ -3,6 +3,7 @@ mod constants; mod error; mod metrics; mod mev_boost; +mod registration_cache; mod routes; mod service; mod state; diff --git a/crates/pbs/src/metrics.rs b/crates/pbs/src/metrics.rs index 814d00a1..4e7c201f 100644 --- a/crates/pbs/src/metrics.rs +++ b/crates/pbs/src/metrics.rs @@ -6,7 +6,9 @@ use lazy_static::lazy_static; use prometheus::{ register_histogram_vec_with_registry, register_int_counter_vec_with_registry, - register_int_gauge_vec_with_registry, HistogramVec, IntCounterVec, IntGaugeVec, Registry, + register_int_counter_with_registry, register_int_gauge_vec_with_registry, + register_int_gauge_with_registry, HistogramVec, IntCounter, IntCounterVec, IntGauge, + IntGaugeVec, Registry, }; lazy_static! { @@ -60,4 +62,42 @@ lazy_static! { &["http_status_code", "endpoint"], PBS_METRICS_REGISTRY ).unwrap(); + + // REGISTRATION CACHE METRICS + /// Number of registration cache hits + pub static ref REGISTRATION_CACHE_HITS: IntCounter = register_int_counter_with_registry!( + "registration_cache_hits_total", + "Number of registration cache hits", + PBS_METRICS_REGISTRY + ).unwrap(); + + /// Number of registration cache misses + pub static ref REGISTRATION_CACHE_MISSES: IntCounter = register_int_counter_with_registry!( + "registration_cache_misses_total", + "Number of registration cache misses", + PBS_METRICS_REGISTRY + ).unwrap(); + + /// Current size of registration cache + pub static ref REGISTRATION_CACHE_SIZE: IntGauge = register_int_gauge_with_registry!( + "registration_cache_size", + "Current size of registration cache", + PBS_METRICS_REGISTRY + ).unwrap(); + + /// Number of registrations skipped due to cache + pub static ref REGISTRATIONS_SKIPPED: IntCounterVec = register_int_counter_vec_with_registry!( + "registrations_skipped_total", + "Number of registrations skipped due to cache", + &["reason"], + PBS_METRICS_REGISTRY + ).unwrap(); + + /// Number of registrations processed + pub static ref REGISTRATIONS_PROCESSED: IntCounterVec = register_int_counter_vec_with_registry!( + "registrations_processed_total", + "Number of registrations processed", + &["status"], + PBS_METRICS_REGISTRY + ).unwrap(); } diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs index 613815ce..bb3e2d3a 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/mev_boost/get_header.rs @@ -4,7 +4,7 @@ use std::{ }; use alloy::{ - primitives::{utils::format_ether, B256, U256}, + primitives::{aliases::B32, utils::format_ether, B256, U256}, providers::Provider, rpc::types::{beacon::BlsPublicKey, Block}, }; @@ -113,7 +113,26 @@ pub async fn get_header( let relay_id = relays[i].id.as_str(); match res { - Ok(Some(res)) => { + Ok(Some(mut res)) => { + // Apply bid boost if configured + if let Some(boost) = relays[i].config.bid_boost { + let original = res.value(); + let boosted = BID_BOOST_CACHE + .with(|cache| cache.borrow_mut().get_or_calculate(original, boost)); + res.set_value(boosted); + + // Only log if actually modified + if original != boosted { + debug!( + relay_id, + boost, + original = %format_ether(original), + boosted = %format_ether(boosted), + "Applied bid boost" + ); + } + } + RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(params.slot as i64); let value_gwei = (res.value() / U256::from(1_000_000_000)).try_into().unwrap_or_default(); @@ -132,6 +151,59 @@ pub async fn get_header( Ok(max_bid) } +/// Apply bid boost to a value. Returns the boosted value, never decreasing. +/// boost: 1.05 = 5% increase, 1.0 = no change +fn apply_bid_boost(value: U256, boost: f64) -> U256 { + if boost <= 1.0 { + return value; + } + // Safe multiplication with saturation + // Use 10000 as base for precision (1.05 = 10500/10000) + let boost_factor = (boost * 10000.0) as u64; + let boosted = value.saturating_mul(U256::from(boost_factor)) / U256::from(10000); + boosted.max(value) // Never decrease +} + +// Simple LRU cache for bid boosts to avoid recalculation +use std::{cell::RefCell, collections::HashMap}; + +struct BidBoostCache { + cache: HashMap<(U256, u64), U256>, // (value, boost_factor) -> boosted_value + max_size: usize, +} + +impl BidBoostCache { + fn new(max_size: usize) -> Self { + Self { cache: HashMap::with_capacity(max_size), max_size } + } + + fn get_or_calculate(&mut self, value: U256, boost: f64) -> U256 { + // Convert boost to fixed-point for cache key (10000 = 1.0) + let boost_factor = (boost * 10000.0) as u64; + let key = (value, boost_factor); + + if let Some(&cached) = self.cache.get(&key) { + return cached; + } + + // Calculate and cache + let boosted = apply_bid_boost(value, boost); + + // Simple eviction: clear if too large + if self.cache.len() >= self.max_size { + self.cache.clear(); + } + + self.cache.insert(key, boosted); + boosted + } +} + +// Thread-local cache to avoid locking +thread_local! { + static BID_BOOST_CACHE: RefCell = RefCell::new(BidBoostCache::new(100)); +} + /// Fetch the parent block from the RPC URL for extra validation of the header. /// Extra validation will be skipped if: /// - relay returns header before parent block is fetched @@ -475,7 +547,8 @@ fn validate_signature( &received_relay_pubkey, &message, signature, - APPLICATION_BUILDER_DOMAIN, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), ) .map_err(ValidationError::Sigverify)?; @@ -519,6 +592,106 @@ mod tests { use super::{validate_header_data, *}; + #[test] + fn test_apply_bid_boost() { + // Test normal boost + assert_eq!(apply_bid_boost(U256::from(100), 1.05), U256::from(105)); + assert_eq!(apply_bid_boost(U256::from(1000), 1.10), U256::from(1100)); + + // Test no boost + assert_eq!(apply_bid_boost(U256::from(100), 1.0), U256::from(100)); + + // Test boost less than 1.0 should not decrease + assert_eq!(apply_bid_boost(U256::from(100), 0.95), U256::from(100)); + assert_eq!(apply_bid_boost(U256::from(100), 0.5), U256::from(100)); + + // Test edge cases + assert_eq!(apply_bid_boost(U256::ZERO, 1.05), U256::ZERO); + assert_eq!(apply_bid_boost(U256::from(1), 1.05), U256::from(1)); // Should not decrease + + // Test precision with larger values + let large_value = U256::from(10_000_000_000_000_000_000u128); // 10 ETH in wei + let boosted = apply_bid_boost(large_value, 1.025); // 2.5% boost + let expected = U256::from(10_250_000_000_000_000_000u128); // 10.25 ETH + assert_eq!(boosted, expected); + } + + #[test] + fn test_apply_bid_boost_overflow_protection() { + // Test with max value - should not panic + let max_value = U256::MAX; + let boosted = apply_bid_boost(max_value, 2.0); + // Should return max value due to saturation + assert_eq!(boosted, U256::MAX); + } + + #[test] + fn test_bid_boost_cache() { + let mut cache = BidBoostCache::new(3); + + // First call calculates + let result1 = cache.get_or_calculate(U256::from(100), 1.05); + assert_eq!(result1, U256::from(105)); + assert_eq!(cache.cache.len(), 1); + + // Second call uses cache (same value, same boost) + let result2 = cache.get_or_calculate(U256::from(100), 1.05); + assert_eq!(result2, U256::from(105)); + assert_eq!(cache.cache.len(), 1); // Still only 1 entry + + // Different values cached separately + let result3 = cache.get_or_calculate(U256::from(200), 1.05); + assert_eq!(result3, U256::from(210)); + assert_eq!(cache.cache.len(), 2); + + cache.get_or_calculate(U256::from(300), 1.10); + assert_eq!(cache.cache.len(), 3); + + // Cache eviction on overflow + cache.get_or_calculate(U256::from(400), 1.05); + assert_eq!(cache.cache.len(), 1); // Cache was cleared and new entry added + } + + #[test] + fn test_cache_precision() { + let mut cache = BidBoostCache::new(10); + + // Test that similar boosts are cached separately + let val = U256::from(1000); + let boost1 = cache.get_or_calculate(val, 1.0501); + let boost2 = cache.get_or_calculate(val, 1.0502); + + // Different boost factors should give different results + // 1.0501 -> 10501/10000 = 1050 + // 1.0502 -> 10502/10000 = 1050 + // Due to rounding they might be the same + assert!(boost1 <= boost2); + + // Test more distinct values + let boost3 = cache.get_or_calculate(val, 1.05); + let boost4 = cache.get_or_calculate(val, 1.10); + assert_eq!(boost3, U256::from(1050)); + assert_eq!(boost4, U256::from(1100)); + assert_ne!(boost3, boost4); + } + + #[test] + fn test_cache_thread_local() { + // Test that the thread-local cache works + let value1 = BID_BOOST_CACHE + .with(|cache| cache.borrow_mut().get_or_calculate(U256::from(100), 1.05)); + assert_eq!(value1, U256::from(105)); + + // Second call should use cache + let value2 = BID_BOOST_CACHE + .with(|cache| cache.borrow_mut().get_or_calculate(U256::from(100), 1.05)); + assert_eq!(value2, U256::from(105)); + + // Check cache size + let cache_size = BID_BOOST_CACHE.with(|cache| cache.borrow().cache.len()); + assert_eq!(cache_size, 1); + } + #[test] fn test_validate_header() { let slot = 5; diff --git a/crates/pbs/src/mev_boost/register_validator.rs b/crates/pbs/src/mev_boost/register_validator.rs index 5d2b5f1e..725a2009 100644 --- a/crates/pbs/src/mev_boost/register_validator.rs +++ b/crates/pbs/src/mev_boost/register_validator.rs @@ -14,7 +14,11 @@ use url::Url; use crate::{ constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, + metrics::{ + REGISTRATIONS_PROCESSED, REGISTRATIONS_SKIPPED, REGISTRATION_CACHE_HITS, + REGISTRATION_CACHE_MISSES, REGISTRATION_CACHE_SIZE, RELAY_LATENCY, RELAY_STATUS_CODE, + }, + registration_cache::REGISTRATION_CACHE, state::{BuilderApiState, PbsState}, }; @@ -25,6 +29,85 @@ pub async fn register_validator( req_headers: HeaderMap, state: PbsState, ) -> eyre::Result<()> { + let original_count = registrations.len(); + + // Scope the cache operations to ensure lock is released + let (filtered_registrations, cache_stats) = { + let mut cache = REGISTRATION_CACHE.write(); + + // Filter out validators that don't need re-registration + let filtered_registrations: Vec = registrations + .into_iter() + .filter(|reg| { + cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit, + ) + }) + .collect(); + + // Mark validators as registered before sending to relays + // This prevents race conditions with concurrent requests + for reg in &filtered_registrations { + cache.mark_registered( + reg.message.pubkey, + reg.message.fee_recipient, + reg.message.gas_limit, + ); + } + + // Get cache statistics before releasing lock + let stats = (cache.stats(), cache.hit_rate()); + + (filtered_registrations, stats) + }; // Cache lock is automatically dropped here + + let ((hits, misses, size), hit_rate) = cache_stats; + + // Update metrics after releasing the lock + REGISTRATION_CACHE_HITS.inc_by(hits as u64); + REGISTRATION_CACHE_MISSES.inc_by(misses as u64); + REGISTRATION_CACHE_SIZE.set(size as i64); + + // Log cache statistics periodically + if (hits + misses) % 100 == 0 && (hits + misses) > 0 { + debug!( + cache_hits = hits, + cache_misses = misses, + cache_size = size, + hit_rate = hit_rate, + "Registration cache stats" + ); + } + + // If all validators are already registered, skip relay calls + if filtered_registrations.is_empty() { + REGISTRATIONS_SKIPPED.with_label_values(&["all_cached"]).inc_by(original_count as u64); + debug!( + skipped_count = original_count, + "All validators already registered within TTL, skipping relay calls" + ); + return Ok(()); + } + + // Record the number of registrations being processed + REGISTRATIONS_PROCESSED + .with_label_values(&["filtered"]) + .inc_by(filtered_registrations.len() as u64); + if original_count > filtered_registrations.len() { + REGISTRATIONS_SKIPPED + .with_label_values(&["partial_cached"]) + .inc_by((original_count - filtered_registrations.len()) as u64); + } + + debug!( + original_count = original_count, + filtered_count = filtered_registrations.len(), + cache_hit_rate = hit_rate, + "Filtered duplicate registrations" + ); + // prepare headers let mut send_headers = HeaderMap::new(); send_headers @@ -32,25 +115,21 @@ pub async fn register_validator( send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); let relays = state.all_relays().to_vec(); - let mut handles = Vec::with_capacity(relays.len()); - for relay in relays.clone() { - if let Some(batch_size) = relay.config.validator_registration_batch_size { - for batch in registrations.chunks(batch_size) { - handles.push(tokio::spawn( - send_register_validator_with_timeout( - batch.to_vec(), - relay.clone(), - send_headers.clone(), - state.pbs_config().timeout_register_validator_ms, - state.pbs_config().register_validator_retry_limit, - ) - .in_current_span(), - )); - } + + // Build all registration tasks upfront for parallel execution + let mut handles = Vec::new(); + for relay in relays { + let batches = if let Some(batch_size) = relay.config.validator_registration_batch_size { + filtered_registrations.chunks(batch_size).map(|c| c.to_vec()).collect::>() } else { + vec![filtered_registrations.clone()] + }; + + // Create all tasks for this relay + for batch in batches { handles.push(tokio::spawn( send_register_validator_with_timeout( - registrations.clone(), + batch, relay.clone(), send_headers.clone(), state.pbs_config().timeout_register_validator_ms, @@ -195,3 +274,279 @@ async fn send_register_validator( Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use alloy::rpc::types::beacon::relay::ValidatorRegistration; + use std::time::Instant; + + fn create_test_registrations(count: usize) -> Vec { + use alloy::rpc::types::beacon::{BlsPublicKey, BlsSignature}; + + (0..count) + .map(|i| ValidatorRegistration { + message: alloy::rpc::types::beacon::relay::ValidatorRegistrationMessage { + fee_recipient: Default::default(), + gas_limit: 30_000_000, + timestamp: 1000 + i as u64, + pubkey: BlsPublicKey::from([i as u8; 48]), + }, + signature: BlsSignature::from([0u8; 96]), + }) + .collect() + } + + fn calculate_batches( + registrations: &[ValidatorRegistration], + batch_size: Option, + ) -> Vec> { + if let Some(size) = batch_size { + registrations.chunks(size).map(|c| c.to_vec()).collect() + } else { + vec![registrations.to_vec()] + } + } + + #[test] + fn test_batch_calculation() { + let registrations = create_test_registrations(100); + + // Test with batch size + let batches = calculate_batches(®istrations, Some(20)); + assert_eq!(batches.len(), 5); + assert_eq!(batches[0].len(), 20); + assert_eq!(batches[4].len(), 20); + + // Test without batch size + let batches = calculate_batches(®istrations, None); + assert_eq!(batches.len(), 1); + assert_eq!(batches[0].len(), 100); + + // Test with uneven batch size + let registrations = create_test_registrations(95); + let batches = calculate_batches(®istrations, Some(20)); + assert_eq!(batches.len(), 5); + assert_eq!(batches[4].len(), 15); // Last batch has remainder + } + + #[tokio::test] + async fn test_parallel_execution_timing() { + let start = Instant::now(); + + // Create mock tasks that sleep + let tasks: Vec<_> = (0..5) + .map(|_| { + tokio::spawn(async { + tokio::time::sleep(Duration::from_millis(100)).await; + Ok::<_, eyre::Error>(()) + }) + }) + .collect(); + + let results = join_all(tasks).await; + + let elapsed = start.elapsed(); + + // All tasks should complete in parallel + // Should complete in ~100ms, not 500ms + assert!( + elapsed < Duration::from_millis(200), + "Parallel execution took {:?}, expected < 200ms", + elapsed + ); + + // Verify all completed successfully + assert_eq!(results.len(), 5); + for result in results { + assert!(result.is_ok()); + } + } + + #[tokio::test] + async fn test_parallel_vs_sequential_timing() { + // Sequential execution simulation + let start_seq = Instant::now(); + for _ in 0..3 { + tokio::time::sleep(Duration::from_millis(50)).await; + } + let seq_time = start_seq.elapsed(); + + // Parallel execution + let start_par = Instant::now(); + let tasks: Vec<_> = (0..3) + .map(|_| { + tokio::spawn(async { + tokio::time::sleep(Duration::from_millis(50)).await; + }) + }) + .collect(); + join_all(tasks).await; + let par_time = start_par.elapsed(); + + // Parallel should be significantly faster + assert!( + par_time < seq_time / 2, + "Parallel {:?} should be much faster than sequential {:?}", + par_time, + seq_time + ); + } + + #[test] + fn test_registration_deduplication() { + use crate::registration_cache::RegistrationCache; + + let mut cache = RegistrationCache::new(); + let registrations = create_test_registrations(5); + + // First time all should need registration + for reg in ®istrations { + assert!(cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit + )); + cache.mark_registered( + reg.message.pubkey, + reg.message.fee_recipient, + reg.message.gas_limit, + ); + } + + // Second time none should need registration + for reg in ®istrations { + assert!(!cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit + )); + } + + // Check cache stats + let (hits, misses, size) = cache.stats(); + assert_eq!(hits, 5); + assert_eq!(misses, 5); + assert_eq!(size, 5); + assert_eq!(cache.hit_rate(), 50.0); + } + + #[test] + fn test_registration_param_changes() { + use crate::registration_cache::RegistrationCache; + use alloy::primitives::Address; + + let mut cache = RegistrationCache::new(); + let reg = create_test_registrations(1).into_iter().next().unwrap(); + + // Initial registration + assert!(cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit + )); + cache.mark_registered(reg.message.pubkey, reg.message.fee_recipient, reg.message.gas_limit); + + // Same params should not need re-registration + assert!(!cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit + )); + + // Change fee recipient - should need re-registration + let new_fee_recipient = Address::from([1u8; 20]); + assert!(cache.needs_registration( + ®.message.pubkey, + &new_fee_recipient, + reg.message.gas_limit + )); + + // Change gas limit - should need re-registration + assert!(cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + 25_000_000 + )); + } + + #[test] + fn test_filtered_registrations_logic() { + use crate::registration_cache::RegistrationCache; + + let mut cache = RegistrationCache::new(); + let registrations = create_test_registrations(10); + + // Mark first 5 as already registered + for reg in ®istrations[0..5] { + cache.mark_registered( + reg.message.pubkey, + reg.message.fee_recipient, + reg.message.gas_limit, + ); + } + + // Filter registrations like the main function does + let filtered: Vec<_> = registrations + .iter() + .filter(|reg| { + cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit, + ) + }) + .cloned() + .collect(); + + // Should only have 5 registrations (the ones not in cache) + assert_eq!(filtered.len(), 5); + + // Verify the filtered ones are the correct ones (6-10) + for (i, reg) in filtered.iter().enumerate() { + assert_eq!(reg.message.pubkey, registrations[i + 5].message.pubkey); + } + } + + #[test] + fn test_cache_concurrent_access() { + use crate::registration_cache::REGISTRATION_CACHE; + use std::thread; + + let registrations = create_test_registrations(20); + let mut handles = vec![]; + + // Spawn multiple threads accessing the global cache + for chunk in registrations.chunks(5) { + let chunk_regs = chunk.to_vec(); + let handle = thread::spawn(move || { + let mut cache = REGISTRATION_CACHE.write(); + for reg in chunk_regs { + let needs = cache.needs_registration( + ®.message.pubkey, + ®.message.fee_recipient, + reg.message.gas_limit, + ); + if needs { + cache.mark_registered( + reg.message.pubkey, + reg.message.fee_recipient, + reg.message.gas_limit, + ); + } + } + }); + handles.push(handle); + } + + // Wait for all threads to complete + for handle in handles { + handle.join().unwrap(); + } + + // Verify all registrations are in cache + let cache = REGISTRATION_CACHE.read(); + let (_, _, size) = cache.stats(); + assert_eq!(size, 20); + } +} diff --git a/crates/pbs/src/mev_boost/submit_block.rs b/crates/pbs/src/mev_boost/submit_block.rs index ac633f2c..f096329c 100644 --- a/crates/pbs/src/mev_boost/submit_block.rs +++ b/crates/pbs/src/mev_boost/submit_block.rs @@ -208,9 +208,9 @@ fn validate_unblinded_block_electra( let blobs = &block_response.blobs_bundle; let expected_commitments = &signed_blinded_block.body.blob_kzg_commitments; - if expected_commitments.len() != blobs.blobs.len() || - expected_commitments.len() != blobs.commitments.len() || - expected_commitments.len() != blobs.proofs.len() + if expected_commitments.len() != blobs.blobs.len() + || expected_commitments.len() != blobs.commitments.len() + || expected_commitments.len() != blobs.proofs.len() { return Err(PbsError::Validation(ValidationError::KzgCommitments { expected_blobs: expected_commitments.len(), diff --git a/crates/pbs/src/registration_cache.rs b/crates/pbs/src/registration_cache.rs new file mode 100644 index 00000000..12f84dd2 --- /dev/null +++ b/crates/pbs/src/registration_cache.rs @@ -0,0 +1,272 @@ +use alloy::{primitives::Address, rpc::types::beacon::BlsPublicKey}; +use parking_lot::RwLock; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tracing::debug; + +const REGISTRATION_TTL: Duration = Duration::from_secs(27 * 60 * 60); // 27 hours + +/// Cache to track recently registered validators +pub struct RegistrationCache { + /// Map of validator pubkey to registration data and timestamp + registrations: HashMap, + /// Cache metrics + hits: u64, + misses: u64, +} + +#[derive(Clone)] +struct RegistrationEntry { + fee_recipient: Address, + gas_limit: u64, + registered_at: Instant, +} + +impl RegistrationCache { + pub fn new() -> Self { + Self { registrations: HashMap::new(), hits: 0, misses: 0 } + } + + /// Check if validator needs re-registration + pub fn needs_registration( + &mut self, + pubkey: &BlsPublicKey, + fee_recipient: &Address, + gas_limit: u64, + ) -> bool { + match self.registrations.get(pubkey) { + Some(entry) => { + // Check if registration is still valid + let expired = entry.registered_at.elapsed() > REGISTRATION_TTL; + let params_changed = + entry.fee_recipient != *fee_recipient || entry.gas_limit != gas_limit; + + if expired || params_changed { + self.misses += 1; + if expired { + debug!(pubkey = %pubkey, "Registration expired, needs re-registration"); + } else { + debug!( + pubkey = %pubkey, + old_fee_recipient = %entry.fee_recipient, + new_fee_recipient = %fee_recipient, + old_gas_limit = entry.gas_limit, + new_gas_limit = gas_limit, + "Registration params changed, needs re-registration" + ); + } + true + } else { + self.hits += 1; + false + } + } + None => { + self.misses += 1; + debug!(pubkey = %pubkey, "First time registration"); + true + } + } + } + + /// Mark validator as registered + pub fn mark_registered( + &mut self, + pubkey: BlsPublicKey, + fee_recipient: Address, + gas_limit: u64, + ) { + self.registrations.insert( + pubkey, + RegistrationEntry { fee_recipient, gas_limit, registered_at: Instant::now() }, + ); + + // Periodically clean expired entries to prevent unbounded growth + if self.registrations.len() % 1000 == 0 { + self.cleanup_expired(); + } + } + + /// Remove expired entries + fn cleanup_expired(&mut self) { + let initial_size = self.registrations.len(); + self.registrations.retain(|_, entry| entry.registered_at.elapsed() <= REGISTRATION_TTL); + let removed = initial_size - self.registrations.len(); + if removed > 0 { + debug!( + removed_entries = removed, + remaining_entries = self.registrations.len(), + "Cleaned up expired registration cache entries" + ); + } + } + + /// Get cache statistics + pub fn stats(&self) -> (u64, u64, usize) { + (self.hits, self.misses, self.registrations.len()) + } + + /// Force cleanup for testing + #[cfg(test)] + pub fn force_cleanup(&mut self) { + self.cleanup_expired(); + } + + /// Get hit rate percentage + pub fn hit_rate(&self) -> f64 { + let total = self.hits + self.misses; + if total == 0 { + 0.0 + } else { + (self.hits as f64 / total as f64) * 100.0 + } + } +} + +// Global cache instance +lazy_static::lazy_static! { + pub static ref REGISTRATION_CACHE: Arc> = + Arc::new(RwLock::new(RegistrationCache::new())); +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy::primitives::Address; + use std::thread; + + #[test] + fn test_registration_cache_ttl() { + let mut cache = RegistrationCache::new(); + let pubkey = BlsPublicKey::from([1u8; 48]); + let fee_recipient = Address::from([2u8; 20]); + + // First registration should need registration + assert!(cache.needs_registration(&pubkey, &fee_recipient, 30_000_000)); + + // Mark as registered + cache.mark_registered(pubkey, fee_recipient, 30_000_000); + + // Should not need registration immediately + assert!(!cache.needs_registration(&pubkey, &fee_recipient, 30_000_000)); + + // Verify cache hit was recorded + assert_eq!(cache.hits, 1); + assert_eq!(cache.misses, 1); + assert_eq!(cache.hit_rate(), 50.0); + } + + #[test] + fn test_params_change_triggers_reregistration() { + let mut cache = RegistrationCache::new(); + let pubkey = BlsPublicKey::from([1u8; 48]); + let fee_recipient1 = Address::from([2u8; 20]); + let fee_recipient2 = Address::from([3u8; 20]); + + cache.mark_registered(pubkey, fee_recipient1, 30_000_000); + + // Different fee recipient should trigger re-registration + assert!(cache.needs_registration(&pubkey, &fee_recipient2, 30_000_000)); + + // Different gas limit should trigger re-registration + assert!(cache.needs_registration(&pubkey, &fee_recipient1, 25_000_000)); + + // Cache should have 2 misses (initial + param change) + assert_eq!(cache.misses, 2); + } + + #[test] + fn test_cleanup_expired() { + let mut cache = RegistrationCache::new(); + + // Add entries + for i in 0..10 { + let pubkey = BlsPublicKey::from([i; 48]); + cache.mark_registered(pubkey, Address::default(), 30_000_000); + } + + assert_eq!(cache.registrations.len(), 10); + + // Cleanup should not remove fresh entries + cache.force_cleanup(); + assert_eq!(cache.registrations.len(), 10); + } + + #[test] + fn test_concurrent_access() { + use std::sync::Arc; + + let cache = Arc::new(RwLock::new(RegistrationCache::new())); + let mut handles = vec![]; + + // Spawn multiple threads accessing cache + for i in 0..10 { + let cache_clone = cache.clone(); + let handle = thread::spawn(move || { + let pubkey = BlsPublicKey::from([i; 48]); + let fee_recipient = Address::from([i; 20]); + + let mut cache = cache_clone.write(); + cache.needs_registration(&pubkey, &fee_recipient, 30_000_000); + cache.mark_registered(pubkey, fee_recipient, 30_000_000); + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + let cache = cache.read(); + assert_eq!(cache.registrations.len(), 10); + } + + #[test] + fn test_hit_rate_calculation() { + let mut cache = RegistrationCache::new(); + + // No requests yet + assert_eq!(cache.hit_rate(), 0.0); + + let pubkey = BlsPublicKey::from([1u8; 48]); + let fee_recipient = Address::from([2u8; 20]); + + // First call is a miss + cache.needs_registration(&pubkey, &fee_recipient, 30_000_000); + cache.mark_registered(pubkey, fee_recipient, 30_000_000); + assert_eq!(cache.hit_rate(), 0.0); // 0 hits, 1 miss + + // Second call is a hit + cache.needs_registration(&pubkey, &fee_recipient, 30_000_000); + assert_eq!(cache.hit_rate(), 50.0); // 1 hit, 1 miss + + // Third call is another hit + cache.needs_registration(&pubkey, &fee_recipient, 30_000_000); + assert!((cache.hit_rate() - 66.67).abs() < 0.1); // 2 hits, 1 miss ≈ 66.67% + } + + #[test] + fn test_stats() { + let mut cache = RegistrationCache::new(); + let pubkey = BlsPublicKey::from([1u8; 48]); + let fee_recipient = Address::from([2u8; 20]); + + // Initial stats + let (hits, misses, size) = cache.stats(); + assert_eq!((hits, misses, size), (0, 0, 0)); + + // After registration + cache.needs_registration(&pubkey, &fee_recipient, 30_000_000); + cache.mark_registered(pubkey, fee_recipient, 30_000_000); + + let (hits, misses, size) = cache.stats(); + assert_eq!((hits, misses, size), (0, 1, 1)); + + // After cache hit + cache.needs_registration(&pubkey, &fee_recipient, 30_000_000); + + let (hits, misses, size) = cache.stats(); + assert_eq!((hits, misses, size), (1, 1, 1)); + } +} diff --git a/crates/signer/src/error.rs b/crates/signer/src/error.rs index a2a113f3..64a3e5b8 100644 --- a/crates/signer/src/error.rs +++ b/crates/signer/src/error.rs @@ -25,11 +25,17 @@ pub enum SignerModuleError { #[error("Dirk signer does not support this operation")] DirkNotSupported, + #[error("module id not found")] + ModuleIdNotFound, + #[error("internal error: {0}")] Internal(String), #[error("rate limited for {0} more seconds")] RateLimited(f64), + + #[error("request error: {0}")] + RequestError(String), } impl IntoResponse for SignerModuleError { @@ -48,9 +54,13 @@ impl IntoResponse for SignerModuleError { (StatusCode::INTERNAL_SERVER_ERROR, "internal error".to_string()) } SignerModuleError::SignerError(err) => (StatusCode::BAD_REQUEST, err.to_string()), + SignerModuleError::ModuleIdNotFound => (StatusCode::NOT_FOUND, self.to_string()), SignerModuleError::RateLimited(duration) => { (StatusCode::TOO_MANY_REQUESTS, format!("rate limited for {duration:?}")) } + SignerModuleError::RequestError(err) => { + (StatusCode::BAD_REQUEST, format!("bad request: {err}")) + } } .into_response() } diff --git a/crates/signer/src/manager/dirk.rs b/crates/signer/src/manager/dirk.rs index 4c2d909f..18adc314 100644 --- a/crates/signer/src/manager/dirk.rs +++ b/crates/signer/src/manager/dirk.rs @@ -1,6 +1,10 @@ use std::{collections::HashMap, io::Write, path::PathBuf}; -use alloy::{hex, rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN}; +use alloy::{ + hex, + primitives::{aliases::B32, B256}, + rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN, +}; use blsful::inner_types::{Field, G2Affine, G2Projective, Group, Scalar}; use cb_common::{ commit::request::{ConsensusProxyMap, ProxyDelegation, SignedProxyDelegation}, @@ -8,7 +12,7 @@ use cb_common::{ constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, signer::{BlsPublicKey, BlsSignature, ProxyStore}, - types::{Chain, ModuleId}, + types::{self, Chain, ModuleId}, }; use eyre::{bail, OptionExt}; use futures::{future::join_all, stream::FuturesUnordered, FutureExt, StreamExt}; @@ -173,8 +177,8 @@ impl DirkManager { .proxy_accounts .values() .filter_map(|proxy| { - if proxy.module == *module && - proxy.consensus.public_key() == account.public_key() + if proxy.module == *module + && proxy.consensus.public_key() == account.public_key() { Some(proxy.inner.public_key()) } else { @@ -192,14 +196,15 @@ impl DirkManager { pub async fn request_consensus_signature( &self, pubkey: &BlsPublicKey, - object_root: [u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { match self.consensus_accounts.get(pubkey) { Some(Account::Simple(account)) => { - self.request_simple_signature(account, object_root).await + self.request_simple_signature(account, object_root, module_signing_id).await } Some(Account::Distributed(account)) => { - self.request_distributed_signature(account, object_root).await + self.request_distributed_signature(account, object_root, module_signing_id).await } None => Err(SignerModuleError::UnknownConsensusSigner(pubkey.to_vec())), } @@ -209,14 +214,15 @@ impl DirkManager { pub async fn request_proxy_signature( &self, pubkey: &BlsPublicKey, - object_root: [u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { match self.proxy_accounts.get(pubkey) { Some(ProxyAccount { inner: Account::Simple(account), .. }) => { - self.request_simple_signature(account, object_root).await + self.request_simple_signature(account, object_root, module_signing_id).await } Some(ProxyAccount { inner: Account::Distributed(account), .. }) => { - self.request_distributed_signature(account, object_root).await + self.request_distributed_signature(account, object_root, module_signing_id).await } None => Err(SignerModuleError::UnknownProxySigner(pubkey.to_vec())), } @@ -226,13 +232,21 @@ impl DirkManager { async fn request_simple_signature( &self, account: &SimpleAccount, - object_root: [u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { - let domain = compute_domain(self.chain, COMMIT_BOOST_DOMAIN); + let domain = compute_domain(self.chain, &B32::from(COMMIT_BOOST_DOMAIN)); + + let data = match module_signing_id { + Some(id) => types::PropCommitSigningInfo { data: *object_root, module_signing_id: *id } + .tree_hash_root() + .to_vec(), + None => object_root.to_vec(), + }; let response = SignerClient::new(account.connection.clone()) .sign(SignRequest { - data: object_root.to_vec(), + data, domain: domain.to_vec(), id: Some(sign_request::Id::PublicKey(account.public_key.to_vec())), }) @@ -256,17 +270,27 @@ impl DirkManager { async fn request_distributed_signature( &self, account: &DistributedAccount, - object_root: [u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { let mut partials = Vec::with_capacity(account.participants.len()); let mut requests = Vec::with_capacity(account.participants.len()); + let data = match module_signing_id { + Some(id) => types::PropCommitSigningInfo { data: *object_root, module_signing_id: *id } + .tree_hash_root() + .to_vec(), + None => object_root.to_vec(), + }; + for (id, channel) in account.participants.iter() { + let data_copy = data.clone(); let request = async move { SignerClient::new(channel.clone()) .sign(SignRequest { - data: object_root.to_vec(), - domain: compute_domain(self.chain, COMMIT_BOOST_DOMAIN).to_vec(), + data: data_copy, + domain: compute_domain(self.chain, &B32::from(COMMIT_BOOST_DOMAIN)) + .to_vec(), id: Some(sign_request::Id::Account(account.name.clone())), }) .map(|res| (res, *id)) @@ -336,7 +360,7 @@ impl DirkManager { let message = ProxyDelegation { delegator: consensus, proxy: proxy_account.inner.public_key() }; let delegation_signature = - self.request_consensus_signature(&consensus, message.tree_hash_root().0).await?; + self.request_consensus_signature(&consensus, &message.tree_hash_root(), None).await?; let delegation = SignedProxyDelegation { message, signature: delegation_signature }; @@ -620,7 +644,7 @@ fn load_distributed_accounts( "Skiping invalid participant ID (0) for account {} in host {host_name}", account.name ); - continue + continue; } match consensus_accounts.get_mut(&public_key) { @@ -681,8 +705,8 @@ fn aggregate_partial_signatures(partials: &[(BlsSignature, u32)]) -> eyre::Resul denominator *= Scalar::from(*other_id) - Scalar::from(*id); } } - let lagrange_coeff = numerator * - denominator + let lagrange_coeff = numerator + * denominator .invert() .into_option() .ok_or_eyre("Failed to get lagrange coefficient")?; @@ -706,8 +730,8 @@ fn random_password() -> String { /// /// i.e., `{wallet}/{consensus_proxy}/{module}/{uuid}` fn name_matches_proxy(name: &str) -> bool { - name.split("/").count() > 3 && - name.rsplit_once("/").is_some_and(|(_, name)| uuid::Uuid::parse_str(name).is_ok()) + name.split("/").count() > 3 + && name.rsplit_once("/").is_some_and(|(_, name)| uuid::Uuid::parse_str(name).is_ok()) } mod test { diff --git a/crates/signer/src/manager/local.rs b/crates/signer/src/manager/local.rs index a613df0a..a13695e5 100644 --- a/crates/signer/src/manager/local.rs +++ b/crates/signer/src/manager/local.rs @@ -1,6 +1,9 @@ use std::collections::HashMap; -use alloy::{primitives::Address, rpc::types::beacon::BlsSignature}; +use alloy::{ + primitives::{Address, B256}, + rpc::types::beacon::BlsSignature, +}; use cb_common::{ commit::request::{ ConsensusProxyMap, ProxyDelegationBls, ProxyDelegationEcdsa, SignedProxyDelegationBls, @@ -95,7 +98,7 @@ impl LocalSigningManager { let proxy_pubkey = signer.pubkey(); let message = ProxyDelegationBls { delegator, proxy: proxy_pubkey }; - let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0).await?; + let signature = self.sign_consensus(&delegator, &message.tree_hash_root(), None).await?; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer, delegation }; @@ -114,7 +117,7 @@ impl LocalSigningManager { let proxy_address = signer.address(); let message = ProxyDelegationEcdsa { delegator, proxy: proxy_address }; - let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0).await?; + let signature = self.sign_consensus(&delegator, &message.tree_hash_root(), None).await?; let delegation = SignedProxyDelegationEcdsa { signature, message }; let proxy_signer = EcdsaProxySigner { signer, delegation }; @@ -129,13 +132,14 @@ impl LocalSigningManager { pub async fn sign_consensus( &self, pubkey: &BlsPublicKey, - object_root: &[u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { let signer = self .consensus_signers .get(pubkey) .ok_or(SignerModuleError::UnknownConsensusSigner(pubkey.to_vec()))?; - let signature = signer.sign(self.chain, *object_root).await; + let signature = signer.sign(self.chain, object_root, module_signing_id).await; Ok(signature) } @@ -143,28 +147,30 @@ impl LocalSigningManager { pub async fn sign_proxy_bls( &self, pubkey: &BlsPublicKey, - object_root: &[u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { let bls_proxy = self .proxy_signers .bls_signers .get(pubkey) .ok_or(SignerModuleError::UnknownProxySigner(pubkey.to_vec()))?; - let signature = bls_proxy.sign(self.chain, *object_root).await; + let signature = bls_proxy.sign(self.chain, object_root, module_signing_id).await; Ok(signature) } pub async fn sign_proxy_ecdsa( &self, address: &Address, - object_root: &[u8; 32], + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { let ecdsa_proxy = self .proxy_signers .ecdsa_signers .get(address) .ok_or(SignerModuleError::UnknownProxySigner(address.to_vec()))?; - let signature = ecdsa_proxy.sign(self.chain, *object_root).await?; + let signature = ecdsa_proxy.sign(self.chain, object_root, module_signing_id).await?; Ok(signature) } @@ -265,7 +271,6 @@ impl LocalSigningManager { #[cfg(test)] mod tests { use alloy::primitives::B256; - use cb_common::signature::compute_signing_root; use lazy_static::lazy_static; use super::*; @@ -287,9 +292,48 @@ mod tests { (signing_manager, consensus_pk) } + mod test_bls { + use alloy::primitives::aliases::B32; + use cb_common::{ + constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, + signer::verify_bls_signature, types, + }; + + use super::*; + + #[tokio::test] + async fn test_key_signs_message() { + let (signing_manager, consensus_pk) = init_signing_manager(); + + let data_root = B256::random(); + let module_signing_id = B256::random(); + + let sig = signing_manager + .sign_consensus(&consensus_pk, &data_root, Some(&module_signing_id)) + .await + .unwrap(); + + // Verify signature + let signing_domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); + + let validation_result = + verify_bls_signature(&consensus_pk, signing_root.as_slice(), &sig); + + assert!(validation_result.is_ok(), "Keypair must produce valid signatures of messages.") + } + } + mod test_proxy_bls { + use alloy::primitives::aliases::B32; use cb_common::{ - constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, signer::verify_bls_signature, + constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, + signer::verify_bls_signature, types, }; use super::*; @@ -339,14 +383,23 @@ mod tests { let proxy_pk = signed_delegation.message.proxy; let data_root = B256::random(); + let module_signing_id = B256::random(); - let sig = signing_manager.sign_proxy_bls(&proxy_pk, &data_root).await.unwrap(); + let sig = signing_manager + .sign_proxy_bls(&proxy_pk, &data_root, Some(&module_signing_id)) + .await + .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(data_root.tree_hash_root().0, domain); + let signing_domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); - let validation_result = verify_bls_signature(&proxy_pk, &signing_root, &sig); + let validation_result = verify_bls_signature(&proxy_pk, signing_root.as_slice(), &sig); assert!( validation_result.is_ok(), @@ -356,9 +409,10 @@ mod tests { } mod test_proxy_ecdsa { + use alloy::primitives::aliases::B32; use cb_common::{ constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, - signer::verify_ecdsa_signature, + signer::verify_ecdsa_signature, types, }; use super::*; @@ -408,12 +462,21 @@ mod tests { let proxy_pk = signed_delegation.message.proxy; let data_root = B256::random(); + let module_signing_id = B256::random(); - let sig = signing_manager.sign_proxy_ecdsa(&proxy_pk, &data_root).await.unwrap(); + let sig = signing_manager + .sign_proxy_ecdsa(&proxy_pk, &data_root, Some(&module_signing_id)) + .await + .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(data_root.tree_hash_root().0, domain); + let signing_domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); let validation_result = verify_ecdsa_signature(&proxy_pk, &signing_root, &sig); diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 1a41a008..9bbdc48e 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -18,17 +18,17 @@ use cb_common::{ commit::{ constants::{ GENERATE_PROXY_KEY_PATH, GET_PUBKEYS_PATH, RELOAD_PATH, REQUEST_SIGNATURE_PATH, - STATUS_PATH, + REVOKE_MODULE_PATH, STATUS_PATH, }, request::{ - EncryptionScheme, GenerateProxyRequest, GetPubkeysResponse, SignConsensusRequest, - SignProxyRequest, SignRequest, + EncryptionScheme, GenerateProxyRequest, GetPubkeysResponse, ReloadRequest, + RevokeModuleRequest, SignConsensusRequest, SignProxyRequest, SignRequest, }, }, - config::StartSignerConfig, + config::{ModuleSigningConfig, StartSignerConfig}, constants::{COMMIT_BOOST_COMMIT, COMMIT_BOOST_VERSION}, types::{Chain, Jwt, ModuleId}, - utils::{decode_jwt, validate_jwt}, + utils::{decode_jwt, validate_admin_jwt, validate_jwt}, }; use cb_metrics::provider::MetricsProvider; use eyre::Context; @@ -61,9 +61,12 @@ struct SigningState { /// Manager handling different signing methods manager: Arc>, - /// Map of modules ids to JWT secrets. This also acts as registry of all - /// modules running - jwts: Arc>, + /// Map of modules ids to JWT configurations. This also acts as registry of + /// all modules running + jwts: Arc>>, + + /// Secret for the admin JWT + admin_secret: Arc>, /// Map of JWT failures per peer jwt_auth_failures: Arc>>, @@ -75,16 +78,18 @@ struct SigningState { impl SigningService { pub async fn run(config: StartSignerConfig) -> eyre::Result<()> { - if config.jwts.is_empty() { + if config.mod_signing_configs.is_empty() { warn!("Signing service was started but no module is registered. Exiting"); return Ok(()); } - let module_ids: Vec = config.jwts.keys().cloned().map(Into::into).collect(); + let module_ids: Vec = + config.mod_signing_configs.keys().cloned().map(Into::into).collect(); let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), - jwts: config.jwts.into(), + jwts: Arc::new(ParkingRwLock::new(config.mod_signing_configs)), + admin_secret: Arc::new(ParkingRwLock::new(config.admin_secret)), jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), @@ -113,20 +118,30 @@ impl SigningService { SigningService::init_metrics(config.chain)?; - let app = axum::Router::new() + let signer_app = axum::Router::new() .route(REQUEST_SIGNATURE_PATH, post(handle_request_signature)) .route(GET_PUBKEYS_PATH, get(handle_get_pubkeys)) .route(GENERATE_PROXY_KEY_PATH, post(handle_generate_proxy)) .route_layer(middleware::from_fn_with_state(state.clone(), jwt_auth)) + .with_state(state.clone()) + .route_layer(middleware::from_fn(log_request)); + + let admin_app = axum::Router::new() .route(RELOAD_PATH, post(handle_reload)) + .route(REVOKE_MODULE_PATH, post(handle_revoke_module)) + .route_layer(middleware::from_fn_with_state(state.clone(), admin_auth)) .with_state(state.clone()) .route_layer(middleware::from_fn(log_request)) - .route(STATUS_PATH, get(handle_status)) - .into_make_service_with_connect_info::(); + .route(STATUS_PATH, get(handle_status)); let listener = TcpListener::bind(config.endpoint).await?; - axum::serve(listener, app).await.wrap_err("signer server exited") + axum::serve( + listener, + signer_app.merge(admin_app).into_make_service_with_connect_info::(), + ) + .await + .wrap_err("signer server exited") } fn init_metrics(network: Chain) -> eyre::Result<()> { @@ -214,18 +229,35 @@ fn check_jwt_auth( SignerModuleError::Unauthorized })?; - let jwt_secret = state.jwts.get(&module_id).ok_or_else(|| { + let guard = state.jwts.read(); + let jwt_config = guard.get(&module_id).ok_or_else(|| { error!("Unauthorized request. Was the module started correctly?"); SignerModuleError::Unauthorized })?; - validate_jwt(jwt, jwt_secret).map_err(|e| { + validate_jwt(jwt, &jwt_config.jwt_secret).map_err(|e| { error!("Unauthorized request. Invalid JWT: {e}"); SignerModuleError::Unauthorized })?; Ok(module_id) } +async fn admin_auth( + State(state): State, + TypedHeader(auth): TypedHeader>, + req: Request, + next: Next, +) -> Result { + let jwt: Jwt = auth.token().to_string().into(); + + validate_admin_jwt(jwt, &state.admin_secret.read()).map_err(|e| { + error!("Unauthorized request. Invalid JWT: {e}"); + SignerModuleError::Unauthorized + })?; + + Ok(next.run(req).await) +} + /// Requests logging middleware layer async fn log_request(req: Request, next: Next) -> Result { let url = &req.uri().clone(); @@ -268,37 +300,48 @@ async fn handle_request_signature( ) -> Result { let req_id = Uuid::new_v4(); + let Some(signing_id) = state.jwts.read().get(&module_id).map(|m| m.signing_id) else { + error!(event = "request_signature", ?module_id, ?req_id, "Module signing ID not found"); + return Err(SignerModuleError::RequestError("Module signing ID not found".to_string())); + }; + debug!(event = "request_signature", ?module_id, %request, ?req_id, "New request"); let manager = state.manager.read().await; let res = match &*manager { SigningManager::Local(local_manager) => match request { - SignRequest::Consensus(SignConsensusRequest { object_root, pubkey }) => local_manager - .sign_consensus(&pubkey, &object_root) - .await - .map(|sig| Json(sig).into_response()), - SignRequest::ProxyBls(SignProxyRequest { object_root, proxy: bls_key }) => { + SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { local_manager - .sign_proxy_bls(&bls_key, &object_root) + .sign_consensus(pubkey, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } - SignRequest::ProxyEcdsa(SignProxyRequest { object_root, proxy: ecdsa_key }) => { + SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { local_manager - .sign_proxy_ecdsa(&ecdsa_key, &object_root) + .sign_proxy_bls(bls_key, object_root, Some(&signing_id)) + .await + .map(|sig| Json(sig).into_response()) + } + SignRequest::ProxyEcdsa(SignProxyRequest { ref object_root, proxy: ref ecdsa_key }) => { + local_manager + .sign_proxy_ecdsa(ecdsa_key, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } }, SigningManager::Dirk(dirk_manager) => match request { - SignRequest::Consensus(SignConsensusRequest { object_root, pubkey }) => dirk_manager - .request_consensus_signature(&pubkey, *object_root) - .await - .map(|sig| Json(sig).into_response()), - SignRequest::ProxyBls(SignProxyRequest { object_root, proxy: bls_key }) => dirk_manager - .request_proxy_signature(&bls_key, *object_root) - .await - .map(|sig| Json(sig).into_response()), + SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { + dirk_manager + .request_consensus_signature(pubkey, object_root, Some(&signing_id)) + .await + .map(|sig| Json(sig).into_response()) + } + SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { + dirk_manager + .request_proxy_signature(bls_key, object_root, Some(&signing_id)) + .await + .map(|sig| Json(sig).into_response()) + } SignRequest::ProxyEcdsa(_) => { error!( event = "request_signature", @@ -360,6 +403,7 @@ async fn handle_generate_proxy( async fn handle_reload( State(mut state): State, + Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); @@ -373,6 +417,30 @@ async fn handle_reload( } }; + if let Some(jwt_secrets) = request.jwt_secrets { + let mut jwt_configs = state.jwts.write(); + let mut new_configs = HashMap::new(); + for (module_id, jwt_secret) in jwt_secrets { + if let Some(signing_id) = jwt_configs.get(&module_id).map(|cfg| cfg.signing_id) { + new_configs.insert( + module_id.clone(), + ModuleSigningConfig { module_name: module_id, jwt_secret, signing_id }, + ); + } else { + let error_message = format!( + "Module {module_id} signing ID not found in commit-boost config, cannot reload" + ); + error!(event = "reload", ?req_id, module_id = %module_id, error = %error_message); + return Err(SignerModuleError::RequestError(error_message)); + } + } + *jwt_configs = new_configs; + } + + if let Some(admin_secret) = request.admin_secret { + *state.admin_secret.write() = admin_secret; + } + let new_manager = match start_manager(config).await { Ok(manager) => manager, Err(err) => { @@ -386,6 +454,17 @@ async fn handle_reload( Ok(StatusCode::OK) } +async fn handle_revoke_module( + State(state): State, + Json(request): Json, +) -> Result { + let mut guard = state.jwts.write(); + guard + .remove(&request.module_id) + .ok_or(SignerModuleError::ModuleIdNotFound) + .map(|_| StatusCode::OK) +} + async fn start_manager(config: StartSignerConfig) -> eyre::Result { let proxy_store = if let Some(store) = config.store.clone() { Some(store.init_from_env()?) diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md new file mode 100644 index 00000000..fd19fafc --- /dev/null +++ b/docs/docs/developing/prop-commit-signing.md @@ -0,0 +1,60 @@ +# Requesting Proposer Commitment Signatures with Commit-Boost + +When you create a new validator on the Ethereum network, one of the steps is the generation of a new BLS private key (commonly known as the "validator key" or the "signer key") and its corresponding BLS public key (the "validator pubkey", used as an identifier). Typically this private key will be used by an Ethereum consensus client to sign things such as attestations and blocks for publication on the Beacon chain. These signatures prove that you, as the owner of that private key, approve of the data being signed. However, as general-purpose private keys, they can also be used to sign *other* arbitrary messages not destined for the Beacon chain. + +Commit-Boost takes advantage of this by offering a standard known as **proposer commitments**. These are arbitrary messages (albeit with some important rules), similar to the kind used on the Beacon chain, that have been signed by one of the owner's private keys. Modules interested in leveraging Commit-Boost's proposer commitments can construct their own data in whatever format they like and request that Commit-Boost's **signer service** generate a signature for it with a particular private key. The module can then use that signature to verify the data was signed by that user. + +Commit-Boost supports proposer commitment signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). + + +## Rules of Proposer Commitment Signatures + +Proposer commitment signatures produced by Commit-Boost's signer service conform to the following rules: + +- Signatures are **unique** to a given EVM chain (identified by its [chain ID](https://chainlist.org/)). Signatures generated for one chain will not work on a different chain. +- Signatures are **unique** to Commit-Boost proposer commitments. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. +- Signatures are **unique** to a particular module. One module cannot, for example, request an identical payload as another module and effectively "forge" a signature for the second module; identical payloads from two separate modules will result in two separate signatures. +- The data payload being signed must be a **32-byte array**, typically serializd as a 64-character hex string with an optional `0x` prefix. The value itself is arbitrary, as long as it has meaning to the requester - though it is typically the 256-bit hash of some kind of data. +- If requesting a signature from a BLS key, the resulting signature will be a standard BLS signature (96 bytes in length). +- If requesting a signature from an ECDSA key, the resulting signature will be a standard Ethereum RSV signature (65 bytes in length). + + +## Configuring a Module for Proposer Commitments + +Commit-Boost's signer service must be configured prior to launching to expect requests from your module. There are two main parts: + +1. An entry for your module into [Commit-Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit-Boost node. + +2. A JWT secret used by your module to authenticate with the signer in HTTP requests. This must be a string that both the Commit-Boost signer can read and your module can read, but no other modules should be allowed to access it. The user should be responsible for determining an appropriate secret and providing it to the Commit-Boost signer service securely; your module will need some way to accept this, typically via a command line argument that accepts a path to a file with the secret or as an environment variable. + +Once the user has configured both Commit-Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. + + +## The Signing ID + +Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Proposer commitment signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit-Boost configuration file section for your module. Commit-Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. + +The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit-Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit-Boost configuration files accordingly. + + +## Structure of a Signature + +The form proposer commitment signatures take depends on the type of signature being requested. BLS signatures take the [standard form](https://eth2book.info/latest/part2/building_blocks/signatures/) (96-byte values). ECDSA (Ethereum EL) signatures take the [standard Ethereum ECDSA `r,s,v` signature form](https://forum.openzeppelin.com/t/sign-it-like-you-mean-it-creating-and-verifying-ethereum-signatures/697). In both cases, the data being signed is a 32-byte hash - the root hash of an SSZ Merkle tree, described below: + +
+ + + +
+ +where: + +- `Request Data` is a 32-byte array that serves as the data you want to sign. This is typically a hash of some more complex data on its own that your module constructs. + +- `Signing ID` is your module's 32-byte signing ID. The signer service will load this for your module from its configuration file. + +- `Domain` is the 32-byte output of the [compute_domain()](https://eth2book.info/capella/part2/building_blocks/signatures/#domain-separation-and-forks) function in the Beacon specification. The 4-byte domain type in this case is not a standard Beacon domain type, but rather Commit-Boost's own domain type: `0x6D6D6F43`. + +The data signed in a proposer commitment is the 32-byte root of this tree (the green `Root` box). Note that calculating this will involve calculating the Merkle Root of two separate trees: first the blue data subtree (with the original request data and the signing ID) to establish the blue `Root` value, and then again with a tree created from that value and the `Domain`. + +Many languages provide libraries for computing the root of an SSZ Merkle tree, such as [fastssz for Go](https://github.com/ferranbt/fastssz) or [tree_hash for Rust](https://docs.rs/tree_hash/latest/tree_hash/). When verifying proposer commitment signatures, use a library that supports Merkle tree root hashing, the `compute_domain()` operation, and validation for signatures generated by your key of choice. diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index c3f49712..ed2ffa6e 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -341,6 +341,7 @@ Delegation signatures will be stored in files with the format `/deleg A full example of a config file with Dirk can be found [here](https://github.com/Commit-Boost/commit-boost-client/blob/main/examples/configs/dirk_signer.toml). + ## Custom module We currently provide a test module that needs to be built locally. To build the module run: ```bash @@ -375,6 +376,7 @@ enabled = true id = "DA_COMMIT" type = "commit" docker_image = "test_da_commit" +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" sleep_secs = 5 [[modules]] @@ -385,10 +387,11 @@ docker_image = "test_builder_log" A few things to note: - We now added a `signer` section which will be used to create the Signer module. -- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. Additional parameters needed for the business logic of the module will also be here, +- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. For modules with type `commit`, which will be used to access the Signer service and request signatures for preconfs, you will also need to specify the module's unique `signing_id` (see [the propser commitment documentation](../developing/prop-commit-signing.md)). Additional parameters needed for the business logic of the module will also be here. To learn more about developing modules, check out [here](/category/developing). + ## Vouch [Vouch](https://github.com/attestantio/vouch) is a multi-node validator client built by [Attestant](https://www.attestant.io/). Vouch is particular in that it also integrates an MEV-Boost client to interact with relays. The Commit-Boost PBS module is compatible with the Vouch `blockrelay` since it implements the same Builder-API as relays. For example, depending on your setup and preference, you may want to fetch headers from a given relay using Commit-Boost vs using the built-in Vouch `blockrelay`. @@ -429,6 +432,15 @@ Commit-Boost supports hot-reloading the configuration file. This means that you docker compose -f cb.docker-compose.yml exec cb_signer curl -X POST http://localhost:20000/reload ``` +### Signer module reload + +The signer module takes 2 optional parameters in the JSON body: + +- `jwt_secrets`: a string with a comma-separated list of `=` for all modules. +- `admin_secret`: a string with the secret for the signer admin JWT. + +Parameters that are not provided will not be updated; they will be regenerated using their original on-disk data as though the signer service was being restarted. Note that any changes you made with calls to `/revoke_jwt` or `/reload` will be reverted, so make sure you provide any modifications again as part of this call. + ### Notes - The hot reload feature is available for PBS modules (both default and custom) and signer module. diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 385e7a0c..97991ee5 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -26,6 +26,7 @@ Modules need some environment variables to work correctly. - `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with `id=\{ID\}`. ### Signer Module +- `CB_SIGNER_ADMIN_JWT`: secret to use for admin JWT. - `CB_SIGNER_ENDPOINT`: optional, override to specify the `IP:port` endpoint to bind the signer server to. - For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only). diff --git a/docs/docs/res/img/prop_commit_tree.png b/docs/docs/res/img/prop_commit_tree.png new file mode 100644 index 00000000..1e36f4b4 Binary files /dev/null and b/docs/docs/res/img/prop_commit_tree.png differ diff --git a/examples/da_commit/src/main.rs b/examples/da_commit/src/main.rs index 71b61c53..c73b2191 100644 --- a/examples/da_commit/src/main.rs +++ b/examples/da_commit/src/main.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use alloy::primitives::Address; +use alloy::primitives::{b256, Address, B256}; use commit_boost::prelude::*; use eyre::{OptionExt, Result}; use lazy_static::lazy_static; @@ -9,6 +9,13 @@ use serde::Deserialize; use tokio::time::sleep; use tracing::{error, info}; +// This is the signing ID used for the DA Commit module. +// Signatures produced by the signer service will incorporate this ID as part of +// the signature, preventing other modules from using the same signature for +// different purposes. +pub const DA_COMMIT_SIGNING_ID: B256 = + b256!("0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b"); + // You can define custom metrics and a custom registry for the business logic of // your module. These will be automatically scaped by the Prometheus server lazy_static! { @@ -83,17 +90,38 @@ impl DaCommitService { ) -> Result<()> { let datagram = Datagram { data }; + // Request a signature directly from a BLS key let request = SignConsensusRequest::builder(pubkey).with_msg(&datagram); let signature = self.config.signer_client.request_consensus_signature(request).await?; - info!("Proposer commitment (consensus): {}", signature); + match verify_proposer_commitment_signature_bls( + self.config.chain, + &pubkey, + &datagram, + &signature, + &DA_COMMIT_SIGNING_ID, + ) { + Ok(_) => info!("Signature verified successfully"), + Err(err) => error!(%err, "Signature verification failed"), + }; + // Request a signature from a proxy BLS key let proxy_request_bls = SignProxyRequest::builder(proxy_bls).with_msg(&datagram); let proxy_signature_bls = self.config.signer_client.request_proxy_signature_bls(proxy_request_bls).await?; - info!("Proposer commitment (proxy BLS): {}", proxy_signature_bls); + match verify_proposer_commitment_signature_bls( + self.config.chain, + &proxy_bls, + &datagram, + &proxy_signature_bls, + &DA_COMMIT_SIGNING_ID, + ) { + Ok(_) => info!("Signature verified successfully"), + Err(err) => error!(%err, "Signature verification failed"), + }; + // If ECDSA keys are enabled, request a signature from a proxy ECDSA key if let Some(proxy_ecdsa) = proxy_ecdsa { let proxy_request_ecdsa = SignProxyRequest::builder(proxy_ecdsa).with_msg(&datagram); let proxy_signature_ecdsa = self @@ -102,6 +130,16 @@ impl DaCommitService { .request_proxy_signature_ecdsa(proxy_request_ecdsa) .await?; info!("Proposer commitment (proxy ECDSA): {}", proxy_signature_ecdsa); + match verify_proposer_commitment_signature_ecdsa( + self.config.chain, + &proxy_ecdsa, + &datagram, + &proxy_signature_ecdsa, + &DA_COMMIT_SIGNING_ID, + ) { + Ok(_) => info!("Signature verified successfully"), + Err(err) => error!(%err, "Signature verification failed"), + }; } SIG_RECEIVED_COUNTER.inc(); diff --git a/justfile b/justfile old mode 100644 new mode 100755 diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f1b5c9d9..6da9a00d 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -11,6 +11,8 @@ cb-common.workspace = true cb-pbs.workspace = true cb-signer.workspace = true eyre.workspace = true +futures.workspace = true +jsonwebtoken.workspace = true reqwest.workspace = true serde_json.workspace = true tempfile.workspace = true diff --git a/tests/data/configs/signer.happy.toml b/tests/data/configs/signer.happy.toml new file mode 100644 index 00000000..6fb76445 --- /dev/null +++ b/tests/data/configs/signer.happy.toml @@ -0,0 +1,52 @@ +chain = "Hoodi" + +[pbs] +docker_image = "ghcr.io/commit-boost/pbs:latest" +with_signer = true +host = "127.0.0.1" +port = 18550 +relay_check = true +wait_all_registrations = true +timeout_get_header_ms = 950 +timeout_get_payload_ms = 4000 +timeout_register_validator_ms = 3000 +skip_sigverify = false +min_bid_eth = 0.5 +late_in_slot_time_ms = 2000 +extra_validation_enabled = false +rpc_url = "https://ethereum-holesky-rpc.publicnode.com" + +[[relays]] +id = "example-relay" +url = "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz" +headers = { X-MyCustomHeader = "MyCustomHeader" } +enable_timing_games = false +target_first_request_ms = 200 +frequency_get_header_ms = 300 + +[signer] +docker_image = "ghcr.io/commit-boost/signer:latest" +host = "127.0.0.1" +port = 20000 +jwt_auth_fail_limit = 3 +jwt_auth_fail_timeout_seconds = 300 + +[signer.local.loader] +key_path = "./tests/data/keys.example.json" + +[signer.local.store] +proxy_dir = "./proxies" + +[[modules]] +id = "test-module" +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" +type = "commit" +docker_image = "test_da_commit" +env_file = ".cb.env" + +[[modules]] +id = "another-module" +signing_id = "0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d" +type = "commit" +docker_image = "test_da_commit" +env_file = ".cb.env" diff --git a/tests/src/conflict_matrix.rs b/tests/src/conflict_matrix.rs new file mode 100644 index 00000000..1b2ac51d --- /dev/null +++ b/tests/src/conflict_matrix.rs @@ -0,0 +1,93 @@ +use eyre::{bail, Result}; +use std::collections::HashSet; + +/// Simple conflict matrix for module compatibility checking +pub struct ConflictMatrix { + incompatible_pairs: HashSet<(String, String)>, +} + +impl ConflictMatrix { + pub fn new() -> Self { + let mut incompatible_pairs = HashSet::new(); + + // Define known incompatible module pairs + incompatible_pairs.insert(("pbs_relay_a".into(), "pbs_relay_b".into())); + incompatible_pairs.insert(("custom_pbs".into(), "pbs".into())); + incompatible_pairs.insert(("duplicate_signer".into(), "signer".into())); + + Self { incompatible_pairs } + } + + /// Check if two modules can coexist in the same configuration + pub fn can_coexist(&self, module_a: &str, module_b: &str) -> bool { + // Check both orderings since the set might only have one direction + !self.incompatible_pairs.contains(&(module_a.into(), module_b.into())) + && !self.incompatible_pairs.contains(&(module_b.into(), module_a.into())) + } + + /// Validate a list of modules for conflicts + pub fn validate_module_list(&self, module_ids: &[String]) -> Result<()> { + // Check for duplicates first + let mut seen = HashSet::new(); + for module_id in module_ids { + if !seen.insert(module_id) { + bail!("Duplicate module ID: {}", module_id); + } + } + + // Check for incompatible pairs + for i in 0..module_ids.len() { + for j in i + 1..module_ids.len() { + let module_a = &module_ids[i]; + let module_b = &module_ids[j]; + + if !self.can_coexist(module_a, module_b) { + bail!("Incompatible modules: '{}' and '{}'", module_a, module_b); + } + } + } + + Ok(()) + } +} + +impl Default for ConflictMatrix { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_conflict_matrix_basic() { + let matrix = ConflictMatrix::new(); + + // Test known compatible pairs + assert!(matrix.can_coexist("pbs", "signer")); + assert!(matrix.can_coexist("signer", "pbs")); // Symmetric + + // Test known incompatible pairs + assert!(!matrix.can_coexist("pbs_relay_a", "pbs_relay_b")); + assert!(!matrix.can_coexist("pbs_relay_b", "pbs_relay_a")); // Symmetric + } + + #[test] + fn test_validate_module_list() { + let matrix = ConflictMatrix::new(); + + // Valid configuration + let valid_modules = vec!["pbs".to_string(), "signer".to_string()]; + assert!(matrix.validate_module_list(&valid_modules).is_ok()); + + // Invalid configuration - duplicates + let duplicate_modules = vec!["pbs".to_string(), "pbs".to_string()]; + assert!(matrix.validate_module_list(&duplicate_modules).is_err()); + + // Invalid configuration - conflicts + let conflict_modules = vec!["pbs_relay_a".to_string(), "pbs_relay_b".to_string()]; + assert!(matrix.validate_module_list(&conflict_modules).is_err()); + } +} diff --git a/tests/src/lib.rs b/tests/src/lib.rs index a4fbbb6a..e6d39cca 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,3 +1,8 @@ +pub mod conflict_matrix; pub mod mock_relay; pub mod mock_validator; +pub mod module_conflicts; +pub mod signer_service; pub mod utils; + +pub use conflict_matrix::ConflictMatrix; diff --git a/tests/src/mock_relay.rs b/tests/src/mock_relay.rs index 04ebfc24..971d19fe 100644 --- a/tests/src/mock_relay.rs +++ b/tests/src/mock_relay.rs @@ -71,6 +71,13 @@ impl MockRelayState { pub fn set_response_override(&self, status: StatusCode) { *self.response_override.write().unwrap() = Some(status); } + + pub fn reset_counter(&self) { + self.received_get_header.store(0, Ordering::Relaxed); + self.received_get_status.store(0, Ordering::Relaxed); + self.received_register_validator.store(0, Ordering::Relaxed); + self.received_submit_block.store(0, Ordering::Relaxed); + } } impl MockRelayState { @@ -121,8 +128,8 @@ async fn handle_get_header( response.message.pubkey = blst_pubkey_to_alloy(&state.signer.sk_to_pk()); response.message.header.timestamp = timestamp_of_slot_start_sec(0, state.chain); - let object_root = response.message.tree_hash_root().0; - response.signature = sign_builder_root(state.chain, &state.signer, object_root); + let object_root = response.message.tree_hash_root(); + response.signature = sign_builder_root(state.chain, &state.signer, &object_root); let response = GetHeaderResponse::Electra(response); (StatusCode::OK, Json(response)).into_response() diff --git a/tests/src/module_conflicts.rs b/tests/src/module_conflicts.rs new file mode 100644 index 00000000..ab213b6f --- /dev/null +++ b/tests/src/module_conflicts.rs @@ -0,0 +1,163 @@ +#[cfg(test)] +mod tests { + use crate::conflict_matrix::ConflictMatrix; + use crate::utils::get_default_config; + use std::collections::HashSet; + + #[test] + fn test_module_id_uniqueness() { + let config = get_default_config(); + let mut seen_ids = HashSet::new(); + + if let Some(modules) = config.modules { + for module in modules { + assert!(seen_ids.insert(module.id.clone()), "Duplicate module ID: {}", module.id); + } + } + } + + #[test] + fn test_port_conflicts() { + let config = get_default_config(); + let mut used_ports = HashSet::new(); + + // Check PBS port + assert!( + used_ports.insert(config.pbs.pbs_config.port), + "PBS port {} conflicts with an existing service", + config.pbs.pbs_config.port + ); + + // Check signer port if exists + if let Some(signer) = config.signer { + assert!(used_ports.insert(signer.port), "Signer port {} already in use", signer.port); + } + + // Check metrics ports if exists + if let Some(metrics) = config.metrics { + assert!( + used_ports.insert(metrics.start_port), + "Metrics port {} already in use", + metrics.start_port + ); + } + } + + #[test] + fn test_metric_namespace_conflicts() { + // Each module should prefix metrics with cb_{module_name}_ + let known_modules = + vec![("pbs", "cb_pbs_"), ("signer", "cb_signer_"), ("metrics", "cb_metrics_")]; + + for (module_a, prefix_a) in &known_modules { + for (module_b, prefix_b) in &known_modules { + if module_a != module_b { + assert!( + !prefix_a.starts_with(prefix_b) || prefix_a == prefix_b, + "Metric prefix conflict between {} and {}: '{}' vs '{}'", + module_a, + module_b, + prefix_a, + prefix_b + ); + } + } + } + } + + #[test] + fn test_integration_matrix_validation() { + let config = get_default_config(); + let matrix = ConflictMatrix::new(); + + if let Some(modules) = config.modules { + let module_ids: Vec = modules.iter().map(|m| m.id.to_string()).collect(); + + // Use the matrix to validate all module combinations + if let Err(error) = matrix.validate_module_list(&module_ids) { + panic!("Module conflict detected in configuration: {}", error); + } + } + } + + #[test] + fn test_known_incompatible_combinations() { + let matrix = ConflictMatrix::new(); + + // Test specific incompatible combinations + let incompatible_pairs = vec![ + ("pbs_relay_a", "pbs_relay_b"), + ("custom_pbs", "pbs"), + ("duplicate_signer", "signer"), + ]; + + for (module_a, module_b) in incompatible_pairs { + assert!( + !matrix.can_coexist(module_a, module_b), + "Modules '{}' and '{}' should be incompatible", + module_a, + module_b + ); + } + } + + #[test] + fn test_known_compatible_combinations() { + let matrix = ConflictMatrix::new(); + + // Test specific compatible combinations + let compatible_pairs = vec![("pbs", "signer"), ("pbs", "metrics"), ("signer", "metrics")]; + + for (module_a, module_b) in compatible_pairs { + assert!( + matrix.can_coexist(module_a, module_b), + "Modules '{}' and '{}' should be compatible", + module_a, + module_b + ); + } + } + + #[test] + fn test_conflict_matrix_symmetry() { + let matrix = ConflictMatrix::new(); + + // Test that compatibility is symmetric (A compatible with B implies B compatible with A) + let test_pairs = + vec![("pbs", "signer"), ("pbs_relay_a", "pbs_relay_b"), ("custom_pbs", "pbs")]; + + for (module_a, module_b) in test_pairs { + let a_to_b = matrix.can_coexist(module_a, module_b); + let b_to_a = matrix.can_coexist(module_b, module_a); + + assert_eq!( + a_to_b, b_to_a, + "Compatibility should be symmetric: {} <-> {}", + module_a, module_b + ); + } + } + + #[test] + fn test_empty_module_configuration() { + let matrix = ConflictMatrix::new(); + + // Empty module list should always be valid + let result = matrix.validate_module_list(&[]); + assert!(result.is_ok(), "Empty module list should be valid"); + } + + #[test] + fn test_single_module_configuration() { + let matrix = ConflictMatrix::new(); + + // Single module should always be valid + let single_modules = + vec![vec!["pbs".to_string()], vec!["signer".to_string()], vec!["metrics".to_string()]]; + + for module_list in single_modules { + let result = matrix.validate_module_list(&module_list); + assert!(result.is_ok(), "Single module '{}' should be valid", module_list[0]); + } + } +} diff --git a/tests/src/signer_service.rs b/tests/src/signer_service.rs new file mode 100644 index 00000000..3fb80e9c --- /dev/null +++ b/tests/src/signer_service.rs @@ -0,0 +1,87 @@ +use std::{collections::HashMap, time::Duration}; + +use alloy::{hex, primitives::FixedBytes}; +use cb_common::{ + commit::request::GetPubkeysResponse, + config::{ModuleSigningConfig, StartSignerConfig}, + constants::SIGNER_JWT_EXPIRATION, + signer::{SignerLoader, ValidatorKeysFormat}, + types::{Chain, Jwt, JwtAdmin, ModuleId}, +}; +use cb_signer::service::SigningService; +use eyre::Result; +use reqwest::{Response, StatusCode}; +use tracing::info; + +use crate::utils::{get_signer_config, get_start_signer_config}; + +// Starts the signer moduler server on a separate task and returns its +// configuration +pub async fn start_server( + port: u16, + mod_signing_configs: &HashMap, + admin_secret: String, +) -> Result { + let chain = Chain::Hoodi; + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path: "tests/data/keystores/keys".into(), + secrets_path: "tests/data/keystores/secrets".into(), + format: ValidatorKeysFormat::Lighthouse, + }; + let mut config = get_signer_config(loader); + config.port = port; + config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing + config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing + let start_config = get_start_signer_config(config, chain, mod_signing_configs, admin_secret); + + // Run the Signer + let server_handle = tokio::spawn(SigningService::run(start_config.clone())); + + // Make sure the server is running + tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + Ok(start_config) +} + +// Verifies that the pubkeys returned by the server match the pubkeys in the +// test data +pub async fn verify_pubkeys(response: Response) -> Result<()> { + // Verify the expected pubkeys are returned + assert!(response.status() == StatusCode::OK); + let pubkey_json = response.json::().await?; + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } + Ok(()) +} + +// Creates a JWT for module administration +pub fn create_admin_jwt(admin_secret: String) -> Result { + jsonwebtoken::encode( + &jsonwebtoken::Header::default(), + &JwtAdmin { + admin: true, + exp: jsonwebtoken::get_current_timestamp() + SIGNER_JWT_EXPIRATION, + }, + &jsonwebtoken::EncodingKey::from_secret(admin_secret.as_ref()), + ) + .map_err(Into::into) + .map(Jwt::from) +} diff --git a/tests/src/utils.rs b/tests/src/utils.rs index 5392fd95..cef1b4b1 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -4,12 +4,17 @@ use std::{ sync::{Arc, Once}, }; -use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; +use alloy::{ + primitives::{B256, U256}, + rpc::types::beacon::BlsPublicKey, +}; use cb_common::{ config::{ - PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, - SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, - SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_PORT_DEFAULT, + CommitBoostConfig, LogsSettings, ModuleKind, ModuleSigningConfig, PbsConfig, + PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, + StaticModuleConfig, StaticPbsConfig, SIGNER_IMAGE_DEFAULT, + SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, + SIGNER_PORT_DEFAULT, }, pbs::{RelayClient, RelayEntry}, signer::SignerLoader, @@ -41,6 +46,7 @@ pub fn generate_mock_relay(port: u16, pubkey: BlsPublicKey) -> Result PbsConfig { +pub fn get_pbs_config(port: u16) -> PbsConfig { PbsConfig { host: Ipv4Addr::UNSPECIFIED, port, @@ -84,6 +91,23 @@ pub fn get_pbs_static_config(port: u16) -> PbsConfig { } } +pub fn get_pbs_static_config(pbs_config: PbsConfig) -> StaticPbsConfig { + StaticPbsConfig { docker_image: String::from(""), pbs_config, with_signer: true } +} + +pub fn get_commit_boost_config(pbs_static_config: StaticPbsConfig) -> CommitBoostConfig { + CommitBoostConfig { + chain: Chain::Hoodi, + relays: vec![], + pbs: pbs_static_config, + muxes: None, + modules: Some(vec![]), + signer: None, + metrics: None, + logs: LogsSettings::default(), + } +} + pub fn to_pbs_config( chain: Chain, pbs_config: PbsConfig, @@ -115,7 +139,8 @@ pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { pub fn get_start_signer_config( signer_config: SignerConfig, chain: Chain, - jwts: HashMap, + mod_signing_configs: &HashMap, + admin_secret: String, ) -> StartSignerConfig { match signer_config.inner { SignerType::Local { loader, .. } => StartSignerConfig { @@ -123,7 +148,8 @@ pub fn get_start_signer_config( loader: Some(loader), store: None, endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), - jwts, + mod_signing_configs: mod_signing_configs.clone(), + admin_secret, jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, dirk: None, @@ -131,3 +157,20 @@ pub fn get_start_signer_config( _ => panic!("Only local signers are supported in tests"), } } + +pub fn create_module_config(id: ModuleId, signing_id: B256) -> StaticModuleConfig { + StaticModuleConfig { + id, + signing_id, + docker_image: String::from(""), + env: None, + env_file: None, + kind: ModuleKind::Commit, + } +} + +pub fn get_default_config() -> CommitBoostConfig { + let pbs_config = get_pbs_config(18550); + let pbs_static_config = get_pbs_static_config(pbs_config); + get_commit_boost_config(pbs_static_config) +} diff --git a/tests/tests/integration_auth_security.rs b/tests/tests/integration_auth_security.rs new file mode 100644 index 00000000..77149abd --- /dev/null +++ b/tests/tests/integration_auth_security.rs @@ -0,0 +1,492 @@ +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +use alloy::primitives::{b256, FixedBytes}; +use cb_common::{ + commit::{ + constants::{GET_PUBKEYS_PATH, REQUEST_SIGNATURE_PATH, REVOKE_MODULE_PATH}, + request::{SignConsensusRequest, SignRequest}, + }, + config::{load_module_signing_configs, ModuleSigningConfig}, + types::ModuleId, + utils::create_jwt, +}; +use cb_tests::{ + signer_service::{create_admin_jwt, start_server}, + utils::{self, setup_test_env}, +}; +use eyre::Result; +use reqwest::{Client, StatusCode}; +use tokio::time::sleep; +use tracing::{info, warn}; + +const JWT_MODULE_1: &str = "auth-test-module-1"; +const JWT_MODULE_2: &str = "auth-test-module-2"; +const JWT_MODULE_3: &str = "stress-test-module"; +const JWT_SECRET_1: &str = "super-secret-key-1"; +const JWT_SECRET_2: &str = "super-secret-key-2"; +const JWT_SECRET_3: &str = "stress-test-secret"; +const ADMIN_SECRET: &str = "admin-integration-secret"; +const PUBKEY_1: [u8; 48] = [ + 0x88, 0x38, 0x27, 0x19, 0x3f, 0x76, 0x27, 0xcd, 0x04, 0xe6, 0x21, 0xe1, 0xe8, 0xd5, 0x64, 0x98, + 0x36, 0x2a, 0x52, 0xb2, 0xa3, 0x0c, 0x9a, 0x1c, 0x72, 0x03, 0x6e, 0xb9, 0x35, 0xc4, 0x27, 0x8d, + 0xee, 0x23, 0xd3, 0x8a, 0x24, 0xd2, 0xf7, 0xdd, 0xa6, 0x26, 0x89, 0x88, 0x6f, 0x0c, 0x39, 0xf4, +]; + +async fn create_multi_module_signing_configs() -> HashMap { + let mut cfg = + utils::get_commit_boost_config(utils::get_pbs_static_config(utils::get_pbs_config(0))); + + let module_id_1 = ModuleId(JWT_MODULE_1.to_string()); + let signing_id_1 = b256!("0x1111111111111111111111111111111111111111111111111111111111111111"); + let module_id_2 = ModuleId(JWT_MODULE_2.to_string()); + let signing_id_2 = b256!("0x2222222222222222222222222222222222222222222222222222222222222222"); + let module_id_3 = ModuleId(JWT_MODULE_3.to_string()); + let signing_id_3 = b256!("0x3333333333333333333333333333333333333333333333333333333333333333"); + + cfg.modules = Some(vec![ + utils::create_module_config(module_id_1.clone(), signing_id_1), + utils::create_module_config(module_id_2.clone(), signing_id_2), + utils::create_module_config(module_id_3.clone(), signing_id_3), + ]); + + let jwts = HashMap::from([ + (module_id_1.clone(), JWT_SECRET_1.to_string()), + (module_id_2.clone(), JWT_SECRET_2.to_string()), + (module_id_3.clone(), JWT_SECRET_3.to_string()), + ]); + + load_module_signing_configs(&cfg, &jwts).unwrap() +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_jwt_auth_rate_limiting_parallel() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE_1.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21000, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let mod_cfg = mod_cfgs.get(&module_id).unwrap(); + + let client = Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + + // Trigger rate limit with parallel invalid requests + let invalid_jwt = create_jwt(&module_id, "wrong-secret")?; + let mut handles = vec![]; + + for i in 0..start_config.jwt_auth_fail_limit { + let client = client.clone(); + let url = url.clone(); + let jwt = invalid_jwt.clone(); + + handles.push(tokio::spawn(async move { + let response = client.get(&url).bearer_auth(&jwt).send().await.unwrap(); + (i, response.status()) + })); + } + + // Wait for all invalid requests to complete + for handle in handles { + let (i, status) = handle.await?; + assert_eq!(status, StatusCode::UNAUTHORIZED, "Request {} should be unauthorized", i); + } + + // Now test that rate limiting is active + let valid_jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; + let response = client.get(&url).bearer_auth(&valid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::TOO_MANY_REQUESTS, "Should be rate limited"); + + // Wait for timeout and verify recovery + sleep(Duration::from_secs(start_config.jwt_auth_fail_timeout_seconds as u64 + 1)).await; + let response = client.get(&url).bearer_auth(&valid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::OK, "Should recover after timeout"); + + info!("✓ JWT rate limiting with parallel requests works correctly"); + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_module_registration_revocation_lifecycle() -> Result<()> { + setup_test_env(); + let module_id_1 = ModuleId(JWT_MODULE_1.to_string()); + let module_id_2 = ModuleId(JWT_MODULE_2.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21001, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + + let client = Client::new(); + let pubkeys_url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let revoke_url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + + let mod_cfg_1 = mod_cfgs.get(&module_id_1).unwrap(); + let mod_cfg_2 = mod_cfgs.get(&module_id_2).unwrap(); + let jwt_1 = create_jwt(&module_id_1, &mod_cfg_1.jwt_secret)?; + let jwt_2 = create_jwt(&module_id_2, &mod_cfg_2.jwt_secret)?; + let admin_jwt = create_admin_jwt(ADMIN_SECRET.to_string())?; + + // Phase 1: Both modules should be operational + let response_1 = client.get(&pubkeys_url).bearer_auth(&jwt_1).send().await?; + assert_eq!(response_1.status(), StatusCode::OK, "Module 1 should be operational"); + + let response_2 = client.get(&pubkeys_url).bearer_auth(&jwt_2).send().await?; + assert_eq!(response_2.status(), StatusCode::OK, "Module 2 should be operational"); + + // Phase 2: Revoke module 1 + let revoke_body = format!("{{\"module_id\": \"{}\"}}", JWT_MODULE_1); + let response = client + .post(&revoke_url) + .header("content-type", "application/json") + .body(revoke_body) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert_eq!(response.status(), StatusCode::OK, "Module 1 revocation should succeed"); + + // Phase 3: Verify module 1 is revoked, module 2 still works + let response_1 = client.get(&pubkeys_url).bearer_auth(&jwt_1).send().await?; + assert_eq!(response_1.status(), StatusCode::UNAUTHORIZED, "Module 1 should be revoked"); + + let response_2 = client.get(&pubkeys_url).bearer_auth(&jwt_2).send().await?; + assert_eq!(response_2.status(), StatusCode::OK, "Module 2 should still work"); + + // Phase 4: Revoke module 2 + let revoke_body = format!("{{\"module_id\": \"{}\"}}", JWT_MODULE_2); + let response = client + .post(&revoke_url) + .header("content-type", "application/json") + .body(revoke_body) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert_eq!(response.status(), StatusCode::OK, "Module 2 revocation should succeed"); + + // Phase 5: Verify both modules are now revoked + let response_1 = client.get(&pubkeys_url).bearer_auth(&jwt_1).send().await?; + assert_eq!(response_1.status(), StatusCode::UNAUTHORIZED, "Module 1 should remain revoked"); + + let response_2 = client.get(&pubkeys_url).bearer_auth(&jwt_2).send().await?; + assert_eq!(response_2.status(), StatusCode::UNAUTHORIZED, "Module 2 should be revoked"); + + info!("✓ Module registration/revocation lifecycle works correctly"); + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_concurrent_signing_requests() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE_1.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21002, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let mod_cfg = mod_cfgs.get(&module_id).unwrap(); + + let client = Client::new(); + let sign_url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); + let jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; + + let concurrent_requests = 10; + let mut handles = vec![]; + + // Create concurrent signing requests + for i in 0..concurrent_requests { + let client = client.clone(); + let url = sign_url.clone(); + let jwt = jwt.clone(); + + handles.push(tokio::spawn(async move { + let object_root = + b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); + let request = SignRequest::Consensus(SignConsensusRequest { + pubkey: FixedBytes(PUBKEY_1), + object_root, + }); + + let start_time = Instant::now(); + let response = client.post(&url).json(&request).bearer_auth(&jwt).send().await.unwrap(); + let duration = start_time.elapsed(); + + (i, response.status(), duration) + })); + } + + // Collect results + let mut total_duration = Duration::ZERO; + let mut success_count = 0; + + for handle in handles { + let (i, status, duration) = handle.await?; + total_duration += duration; + + if status == StatusCode::OK { + success_count += 1; + } else { + warn!("Request {} failed with status: {}", i, status); + } + } + + assert_eq!(success_count, concurrent_requests, "All signing requests should succeed"); + + let avg_duration = total_duration / concurrent_requests as u32; + info!( + "✓ {} concurrent signing requests completed, avg duration: {:?}", + concurrent_requests, avg_duration + ); + + // Ensure reasonable performance (less than 1 second per request on average) + assert!(avg_duration < Duration::from_secs(1), "Average request time should be reasonable"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_pbs_signer_auth_flow() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE_1.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21003, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let mod_cfg = mod_cfgs.get(&module_id).unwrap(); + + let client = Client::new(); + let pubkeys_url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let sign_url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); + + // Simulate PBS module workflow: get pubkeys, then sign + let jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; + + // Step 1: PBS gets validator pubkeys from signer + let response = client.get(&pubkeys_url).bearer_auth(&jwt).send().await?; + assert_eq!(response.status(), StatusCode::OK, "PBS should get pubkeys successfully"); + + let pubkeys_response = + response.json::().await?; + assert!(!pubkeys_response.keys.is_empty(), "Should return validator pubkeys"); + info!("✓ PBS retrieved {} pubkeys from signer", pubkeys_response.keys.len()); + + // Step 2: PBS requests signature for consensus object + let object_root = b256!("0xbeefcafefeeddeadbeefcafefeeddeadbeefcafefeeddeadbeefcafefeeddeaa"); + let request = + SignRequest::Consensus(SignConsensusRequest { pubkey: FixedBytes(PUBKEY_1), object_root }); + + let response = client.post(&sign_url).json(&request).bearer_auth(&jwt).send().await?; + assert_eq!(response.status(), StatusCode::OK, "PBS should get signature successfully"); + + let signature = response.text().await?; + assert!(!signature.is_empty(), "Signature should not be empty"); + info!("✓ PBS got signature from signer: {} chars", signature.len()); + + // Step 3: Test invalid authentication + let invalid_jwt = create_jwt(&module_id, "wrong-secret")?; + let response = client.get(&pubkeys_url).bearer_auth(&invalid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED, "Invalid auth should be rejected"); + + info!("✓ PBS ↔ Signer authentication flow works correctly"); + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_auth_stress_1000_requests_per_second() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE_3.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21004, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let mod_cfg = mod_cfgs.get(&module_id).unwrap(); + + let client = Arc::new(Client::new()); + let pubkeys_url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; + + let target_rps = 1000; + let test_duration = Duration::from_secs(3); // 3 seconds of stress testing + let total_requests = (target_rps * test_duration.as_secs() as usize) / 3; // Adjust for 3-second test + + info!( + "Starting stress test: {} requests over {:?} targeting {} RPS", + total_requests, test_duration, target_rps + ); + + let success_count = Arc::new(AtomicUsize::new(0)); + let error_count = Arc::new(AtomicUsize::new(0)); + let start_time = Instant::now(); + + // Create request batches to maintain target RPS + let batch_size = 50; + let batches = (total_requests + batch_size - 1) / batch_size; + let delay_between_batches = test_duration / batches as u32; + + let mut batch_handles = vec![]; + + for batch_idx in 0..batches { + let client = Arc::clone(&client); + let url = pubkeys_url.clone(); + let jwt = jwt.clone(); + let success_count = Arc::clone(&success_count); + let error_count = Arc::clone(&error_count); + + batch_handles.push(tokio::spawn(async move { + // Wait for this batch's time slot + sleep(delay_between_batches * batch_idx as u32).await; + + let mut request_handles = vec![]; + + for _ in 0..batch_size { + let client = Arc::clone(&client); + let url = url.clone(); + let jwt = jwt.clone(); + let success_count = Arc::clone(&success_count); + let error_count = Arc::clone(&error_count); + + request_handles.push(tokio::spawn(async move { + match client.get(&url).bearer_auth(&jwt).send().await { + Ok(response) => { + if response.status() == StatusCode::OK { + success_count.fetch_add(1, Ordering::Relaxed); + } else { + error_count.fetch_add(1, Ordering::Relaxed); + } + } + Err(_) => { + error_count.fetch_add(1, Ordering::Relaxed); + } + } + })); + } + + // Wait for all requests in this batch + for handle in request_handles { + let _ = handle.await; + } + })); + } + + // Wait for all batches to complete + for handle in batch_handles { + let _ = handle.await; + } + + let actual_duration = start_time.elapsed(); + let total_processed = + success_count.load(Ordering::Relaxed) + error_count.load(Ordering::Relaxed); + let actual_rps = total_processed as f64 / actual_duration.as_secs_f64(); + + let success_rate = + success_count.load(Ordering::Relaxed) as f64 / total_processed as f64 * 100.0; + + info!("Stress test completed:"); + info!(" Duration: {:?}", actual_duration); + info!(" Total requests: {}", total_processed); + info!(" Successful: {}", success_count.load(Ordering::Relaxed)); + info!(" Errors: {}", error_count.load(Ordering::Relaxed)); + info!(" Actual RPS: {:.1}", actual_rps); + info!(" Success rate: {:.1}%", success_rate); + + // Performance assertions + assert!(actual_rps >= 500.0, "Should handle at least 500 RPS (got {:.1})", actual_rps); + assert!(success_rate >= 95.0, "Success rate should be at least 95% (got {:.1}%)", success_rate); + + info!( + "✓ Authentication stress test passed: {:.1} RPS with {:.1}% success rate", + actual_rps, success_rate + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_jwt_failure_tracking_and_timeout() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE_2.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21005, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let mod_cfg = mod_cfgs.get(&module_id).unwrap(); + + let client = Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + + // Test failure accumulation over time + let invalid_jwt = create_jwt(&module_id, "invalid-secret")?; + let valid_jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; + + // Make 2 failed attempts (below limit) + for i in 0..2 { + let response = client.get(&url).bearer_auth(&invalid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED, "Attempt {} should fail", i + 1); + } + + // Valid request should still work + let response = client.get(&url).bearer_auth(&valid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::OK, "Valid request should work"); + + // One more failed attempt to trigger rate limit + let response = client.get(&url).bearer_auth(&invalid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED, "Final failed attempt"); + + // Now should be rate limited + let response = client.get(&url).bearer_auth(&valid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::TOO_MANY_REQUESTS, "Should be rate limited"); + + // Test partial timeout (should still be limited) + sleep(Duration::from_secs(1)).await; + let response = client.get(&url).bearer_auth(&valid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::TOO_MANY_REQUESTS, "Should still be rate limited"); + + // Wait for full timeout period + sleep(Duration::from_secs(start_config.jwt_auth_fail_timeout_seconds as u64)).await; + let response = client.get(&url).bearer_auth(&valid_jwt).send().await?; + assert_eq!(response.status(), StatusCode::OK, "Should recover after full timeout"); + + info!("✓ JWT failure tracking and timeout behavior works correctly"); + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_cross_module_auth_isolation() -> Result<()> { + setup_test_env(); + let module_id_1 = ModuleId(JWT_MODULE_1.to_string()); + let module_id_2 = ModuleId(JWT_MODULE_2.to_string()); + let mod_cfgs = create_multi_module_signing_configs().await; + let start_config = start_server(21006, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + + let mod_cfg_1 = mod_cfgs.get(&module_id_1).unwrap(); + let mod_cfg_2 = mod_cfgs.get(&module_id_2).unwrap(); + + let client = Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + + // Test that module 1's JWT doesn't work for module 2's context and vice versa + let jwt_1 = create_jwt(&module_id_1, &mod_cfg_1.jwt_secret)?; + let jwt_2 = create_jwt(&module_id_2, &mod_cfg_2.jwt_secret)?; + + // Both should work with their own JWTs + let response = client.get(&url).bearer_auth(&jwt_1).send().await?; + assert_eq!(response.status(), StatusCode::OK, "Module 1 JWT should work"); + + let response = client.get(&url).bearer_auth(&jwt_2).send().await?; + assert_eq!(response.status(), StatusCode::OK, "Module 2 JWT should work"); + + // Cross-auth should fail - try using module 1's secret for module 2's ID + let invalid_cross_jwt = create_jwt(&module_id_2, &mod_cfg_1.jwt_secret)?; + let response = client.get(&url).bearer_auth(&invalid_cross_jwt).send().await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED, "Cross-module auth should fail"); + + // Trigger rate limit on module 1 + let invalid_jwt_1 = create_jwt(&module_id_1, "wrong-secret")?; + for _ in 0..start_config.jwt_auth_fail_limit { + let response = client.get(&url).bearer_auth(&invalid_jwt_1).send().await?; + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + } + + // Module 1 should be rate limited + let response = client.get(&url).bearer_auth(&jwt_1).send().await?; + assert_eq!(response.status(), StatusCode::TOO_MANY_REQUESTS, "Module 1 should be rate limited"); + + // Module 2 should still work (isolation) + let response = client.get(&url).bearer_auth(&jwt_2).send().await?; + assert_eq!(response.status(), StatusCode::OK, "Module 2 should still work"); + + info!("✓ Cross-module authentication isolation works correctly"); + Ok(()) +} diff --git a/tests/tests/integration_registration_cache.rs b/tests/tests/integration_registration_cache.rs new file mode 100644 index 00000000..24cda88b --- /dev/null +++ b/tests/tests/integration_registration_cache.rs @@ -0,0 +1,445 @@ +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use alloy::{ + primitives::Address, + rpc::types::beacon::{relay::ValidatorRegistration, BlsPublicKey, BlsSignature}, +}; +use cb_common::{ + signer::{random_secret, BlsPublicKey as CommonBlsPublicKey}, + types::Chain, + utils::blst_pubkey_to_alloy, +}; +use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_tests::{ + mock_relay::{start_mock_relay_service, MockRelayState}, + mock_validator::MockValidator, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, +}; +use eyre::Result; +use futures::future::try_join_all; +use reqwest::StatusCode; +use tokio::time::sleep; +use tracing::{debug, info}; + +// Test constants +const LARGE_VALIDATOR_COUNT: usize = 1000; +const BENCHMARK_VALIDATOR_COUNT: usize = 10000; + +fn create_test_registration( + i: usize, + fee_recipient: Address, + gas_limit: u64, +) -> ValidatorRegistration { + ValidatorRegistration { + message: alloy::rpc::types::beacon::relay::ValidatorRegistrationMessage { + fee_recipient, + gas_limit, + timestamp: 1000000 + i as u64, + pubkey: BlsPublicKey::from([i as u8; 48]), + }, + signature: BlsSignature::from([0u8; 96]), + } +} + +fn create_validator_registrations(count: usize) -> Vec { + let fee_recipient = Address::from([2u8; 20]); + (0..count).map(|i| create_test_registration(i, fee_recipient, 30_000_000)).collect() +} + +async fn setup_pbs_service_with_mock_relay(pbs_port: u16) -> Result> { + let signer = random_secret(); + let pubkey: CommonBlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + let chain = Chain::Holesky; + + // Setup mock relay + let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; + let mock_state = Arc::new(MockRelayState::new(chain, signer)); + tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + + // Setup PBS service + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + Ok(mock_state) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_registration_cache_ttl_expiration() -> Result<()> { + setup_test_env(); + let pbs_port = 5000; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + let mock_validator = MockValidator::new(pbs_port)?; + + // Note: Cache is global and persistent, so tests may affect each other + // This is intentional for integration testing + + let registration = create_test_registration(1, Address::from([2u8; 20]), 30_000_000); + + info!("Testing initial registration"); + let res = mock_validator.do_register_custom_validators(vec![registration.clone()]).await?; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(mock_state.received_register_validator(), 1); + + info!("Testing cached registration (should skip relay call)"); + mock_state.reset_counter(); + let res = mock_validator.do_register_custom_validators(vec![registration.clone()]).await?; + assert_eq!(res.status(), StatusCode::OK); + // Should be 0 because it was cached + assert_eq!(mock_state.received_register_validator(), 0); + + info!("TTL expiration test completed - cache behavior verified through relay call patterns"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_parameter_change_detection() -> Result<()> { + setup_test_env(); + let pbs_port = 5100; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + let mock_validator = MockValidator::new(pbs_port)?; + + // Using global cache - testing real cache behavior + + let initial_registration = create_test_registration(1, Address::from([2u8; 20]), 30_000_000); + + info!("Initial registration"); + let res = + mock_validator.do_register_custom_validators(vec![initial_registration.clone()]).await?; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(mock_state.received_register_validator(), 1); + + info!("Testing fee_recipient change detection"); + mock_state.reset_counter(); + let fee_recipient_changed = create_test_registration(1, Address::from([3u8; 20]), 30_000_000); + let res = mock_validator.do_register_custom_validators(vec![fee_recipient_changed]).await?; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(mock_state.received_register_validator(), 1); // Should re-register + + info!("Testing gas_limit change detection"); + mock_state.reset_counter(); + let gas_limit_changed = create_test_registration(1, Address::from([2u8; 20]), 25_000_000); + let res = mock_validator.do_register_custom_validators(vec![gas_limit_changed]).await?; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(mock_state.received_register_validator(), 1); // Should re-register + + info!("Testing unchanged parameters (should use cache)"); + mock_state.reset_counter(); + let unchanged = create_test_registration(1, Address::from([2u8; 20]), 25_000_000); + let res = mock_validator.do_register_custom_validators(vec![unchanged]).await?; + assert_eq!(res.status(), StatusCode::OK); + assert_eq!(mock_state.received_register_validator(), 0); // Should use cache + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_concurrent_registration_handling() -> Result<()> { + setup_test_env(); + let pbs_port = 5200; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + let _mock_validator = MockValidator::new(pbs_port)?; + + // Using global cache - testing real cache behavior + + info!("Testing concurrent registration of {} validators", LARGE_VALIDATOR_COUNT); + let registrations = create_validator_registrations(LARGE_VALIDATOR_COUNT); + + let start_time = Instant::now(); + + // Split registrations into chunks for concurrent processing + let chunk_size = 100; + let mut handles = Vec::new(); + + for chunk in registrations.chunks(chunk_size) { + let chunk_regs = chunk.to_vec(); + let validator = MockValidator::new(pbs_port)?; + + let handle = + tokio::spawn(async move { validator.do_register_custom_validators(chunk_regs).await }); + handles.push(handle); + } + + // Wait for all concurrent registrations + let results: Result, _> = try_join_all(handles).await; + let responses = results?; + + let elapsed = start_time.elapsed(); + info!( + "Concurrent registration of {} validators completed in {:?}", + LARGE_VALIDATOR_COUNT, elapsed + ); + + // Verify all responses are successful + for response in responses { + let response = response?; + assert_eq!(response.status(), StatusCode::OK); + } + + // Verify relay received the registrations + let total_received = mock_state.received_register_validator(); + debug!("Total registrations received by relay: {}", total_received); + assert!(total_received > 0); + + // Test second round should be mostly cached + info!("Testing second round (should be mostly cached)"); + mock_state.reset_counter(); + let start_cached = Instant::now(); + + let mut cached_handles = Vec::new(); + for chunk in registrations.chunks(chunk_size) { + let chunk_regs = chunk.to_vec(); + let validator = MockValidator::new(pbs_port)?; + + let handle = + tokio::spawn(async move { validator.do_register_custom_validators(chunk_regs).await }); + cached_handles.push(handle); + } + + let cached_results: Result, _> = try_join_all(cached_handles).await; + let _cached_responses = cached_results?; + + let cached_elapsed = start_cached.elapsed(); + info!("Cached round completed in {:?}", cached_elapsed); + + // Second round should be much faster and have fewer relay calls + let cached_received = mock_state.received_register_validator(); + debug!("Cached registrations received by relay: {}", cached_received); + assert!(cached_received < total_received); + assert!(cached_elapsed < elapsed); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_cache_cleanup_and_memory_management() -> Result<()> { + setup_test_env(); + let pbs_port = 5300; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + let mock_validator = MockValidator::new(pbs_port)?; + + // Using global cache - testing real cache behavior + + info!("Testing cache cleanup and memory management"); + + // Add many registrations to trigger cleanup + let large_batch = create_validator_registrations(1200); // > 1000 to trigger cleanup + + let res = mock_validator.do_register_custom_validators(large_batch).await?; + assert_eq!(res.status(), StatusCode::OK); + + info!("Cache cleanup test completed - verified through registration behavior"); + + // Test that cache continues to work correctly after cleanup + let test_registration = create_test_registration(999, Address::from([2u8; 20]), 30_000_000); + mock_state.reset_counter(); + + let res = mock_validator.do_register_custom_validators(vec![test_registration]).await?; + assert_eq!(res.status(), StatusCode::OK); + // Should be cached from previous large batch + assert_eq!(mock_state.received_register_validator(), 0); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_performance_benchmark_10k_validators() -> Result<()> { + setup_test_env(); + let pbs_port = 5400; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + let _mock_validator = MockValidator::new(pbs_port)?; + + // Using global cache - testing real cache behavior + + info!("Performance benchmark: {} validator registrations", BENCHMARK_VALIDATOR_COUNT); + let registrations = create_validator_registrations(BENCHMARK_VALIDATOR_COUNT); + + // Benchmark initial registration (cold cache) + let start_cold = Instant::now(); + + // Process in larger chunks for better performance + let chunk_size = 500; + let mut cold_handles = Vec::new(); + + for chunk in registrations.chunks(chunk_size) { + let chunk_regs = chunk.to_vec(); + let validator = MockValidator::new(pbs_port)?; + + let handle = + tokio::spawn(async move { validator.do_register_custom_validators(chunk_regs).await }); + cold_handles.push(handle); + } + + let cold_results: Result, _> = try_join_all(cold_handles).await; + let _cold_responses = cold_results?; + + let cold_elapsed = start_cold.elapsed(); + let cold_throughput = BENCHMARK_VALIDATOR_COUNT as f64 / cold_elapsed.as_secs_f64(); + + info!( + "Cold cache: {} validators in {:?} ({:.2} validators/sec)", + BENCHMARK_VALIDATOR_COUNT, cold_elapsed, cold_throughput + ); + + // Benchmark cached registration (warm cache) + mock_state.reset_counter(); + let start_warm = Instant::now(); + + let mut warm_handles = Vec::new(); + for chunk in registrations.chunks(chunk_size) { + let chunk_regs = chunk.to_vec(); + let validator = MockValidator::new(pbs_port)?; + + let handle = + tokio::spawn(async move { validator.do_register_custom_validators(chunk_regs).await }); + warm_handles.push(handle); + } + + let warm_results: Result, _> = try_join_all(warm_handles).await; + let _warm_responses = warm_results?; + + let warm_elapsed = start_warm.elapsed(); + let warm_throughput = BENCHMARK_VALIDATOR_COUNT as f64 / warm_elapsed.as_secs_f64(); + + info!( + "Warm cache: {} validators in {:?} ({:.2} validators/sec)", + BENCHMARK_VALIDATOR_COUNT, warm_elapsed, warm_throughput + ); + + // Verify performance improvement + let speedup = cold_elapsed.as_secs_f64() / warm_elapsed.as_secs_f64(); + info!("Cache speedup: {:.2}x", speedup); + + // Cache should provide significant speedup + assert!(speedup > 2.0, "Cache should provide at least 2x speedup, got {:.2}x", speedup); + + // Verify most requests were cached in second round + let warm_relay_calls = mock_state.received_register_validator(); + let cache_hit_rate = 1.0 - (warm_relay_calls as f64 / BENCHMARK_VALIDATOR_COUNT as f64); + info!("Cache hit rate: {:.2}%", cache_hit_rate * 100.0); + + assert!( + cache_hit_rate > 0.95, + "Cache hit rate should be > 95%, got {:.2}%", + cache_hit_rate * 100.0 + ); + + info!("Performance benchmark completed successfully"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_cache_race_conditions() -> Result<()> { + setup_test_env(); + let pbs_port = 5500; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + + // Using global cache - testing real cache behavior + + info!("Testing race conditions with concurrent cache access"); + + // Create same registration for all concurrent requests + let same_registration = create_test_registration(42, Address::from([2u8; 20]), 30_000_000); + + let mut handles = Vec::new(); + let concurrent_requests = 50; + + // Spawn many concurrent requests for the same validator + for _ in 0..concurrent_requests { + let registration = same_registration.clone(); + let validator = MockValidator::new(pbs_port)?; + + let handle = tokio::spawn(async move { + validator.do_register_custom_validators(vec![registration]).await + }); + handles.push(handle); + } + + // Wait for all to complete + let results: Result, _> = try_join_all(handles).await; + let responses = results?; + + // All should succeed + for response in responses { + let response = response?; + assert_eq!(response.status(), StatusCode::OK); + } + + // Due to cache, relay should receive far fewer calls than total requests + let total_relay_calls = mock_state.received_register_validator(); + info!( + "Total relay calls for {} concurrent requests: {}", + concurrent_requests, total_relay_calls + ); + + // Should be much less than the number of concurrent requests due to caching + assert!( + total_relay_calls < concurrent_requests / 2, + "Expected far fewer relay calls due to caching, got {} out of {} requests", + total_relay_calls, + concurrent_requests + ); + + info!("Race condition test completed - cache prevented duplicate registrations"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_mixed_cached_and_new_registrations() -> Result<()> { + setup_test_env(); + let pbs_port = 5600; + let mock_state = setup_pbs_service_with_mock_relay(pbs_port).await?; + let mock_validator = MockValidator::new(pbs_port)?; + + // Using global cache - testing real cache behavior + + info!("Testing mixed batch of cached and new registrations"); + + // Register initial batch + let initial_batch = create_validator_registrations(100); + let res = mock_validator.do_register_custom_validators(initial_batch.clone()).await?; + assert_eq!(res.status(), StatusCode::OK); + let _initial_relay_calls = mock_state.received_register_validator(); + + // Create mixed batch: 50 cached + 50 new + let mut mixed_batch = initial_batch[0..50].to_vec(); // Already cached + let new_registrations = create_validator_registrations(50) + .into_iter() + .enumerate() + .map(|(i, mut reg)| { + // Make them unique by modifying pubkey + reg.message.pubkey = BlsPublicKey::from([(i + 200) as u8; 48]); + reg + }) + .collect::>(); + mixed_batch.extend(new_registrations); + + // Process mixed batch + mock_state.reset_counter(); + let res = mock_validator.do_register_custom_validators(mixed_batch).await?; + assert_eq!(res.status(), StatusCode::OK); + + let mixed_relay_calls = mock_state.received_register_validator(); + info!("Mixed batch relay calls: {} (should be ~50 for new validators only)", mixed_relay_calls); + + // Should only process new validators (around 50), not the cached ones + assert!( + mixed_relay_calls < 70, + "Expected ~50 relay calls for new validators, got {}", + mixed_relay_calls + ); + assert!( + mixed_relay_calls > 30, + "Expected at least 30 relay calls for new validators, got {}", + mixed_relay_calls + ); + + Ok(()) +} diff --git a/tests/tests/integration_relay_coordination.rs b/tests/tests/integration_relay_coordination.rs new file mode 100644 index 00000000..161a1274 --- /dev/null +++ b/tests/tests/integration_relay_coordination.rs @@ -0,0 +1,563 @@ +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use alloy::{ + primitives::Address, + rpc::types::beacon::{relay::ValidatorRegistration, BlsPublicKey, BlsSignature}, +}; +use cb_common::{ + config::RelayConfig, + pbs::{RelayClient, RelayEntry}, + signer::random_secret, + types::Chain, + utils::blst_pubkey_to_alloy, +}; +use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_tests::{ + mock_relay::{start_mock_relay_service, MockRelayState}, + mock_validator::MockValidator, + utils::{ + generate_mock_relay, generate_mock_relay_with_batch_size, get_local_address, + get_pbs_config, setup_test_env, to_pbs_config, + }, +}; +use eyre::Result; +use futures::future::{join_all, try_join_all}; +use reqwest::StatusCode; +use tokio::time::sleep; +use tracing::info; + +// Helper function to create validator registrations +fn create_validator_registrations(count: usize) -> Vec { + (0..count) + .map(|i| { + let mut pubkey_bytes = [0u8; 48]; + pubkey_bytes[0] = (i & 0xFF) as u8; + pubkey_bytes[1] = ((i >> 8) & 0xFF) as u8; + + ValidatorRegistration { + message: alloy::rpc::types::beacon::relay::ValidatorRegistrationMessage { + fee_recipient: Address::from([i as u8; 20]), + gas_limit: 30_000_000, + timestamp: 1234567890, + pubkey: BlsPublicKey::from(pubkey_bytes), + }, + signature: BlsSignature::from([0u8; 96]), + } + }) + .collect() +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_batch_registration_varying_sizes() -> Result<()> { + setup_test_env(); + let pbs_port = 5000; + + info!("Testing batch registration with varying sizes"); + + // Test with multiple batch sizes + let batch_configs = vec![ + (10, 5), // 10 validators, batch size 5 (2 batches) + (100, 25), // 100 validators, batch size 25 (4 batches) + (1000, 100), // 1000 validators, batch size 100 (10 batches) + ]; + + for (validator_count, batch_size) in batch_configs { + info!("Testing {} validators with batch size {}", validator_count, batch_size); + + // Create relay with specific batch size + let signer = random_secret(); + let pubkey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + let relay = generate_mock_relay_with_batch_size(pbs_port + 1, pubkey, batch_size)?; + + let mock_state = Arc::new(MockRelayState::new(Chain::Holesky, signer)); + tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + + let config = to_pbs_config(Chain::Holesky, get_pbs_config(pbs_port), vec![relay]); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + // Create MockValidator with unique port to avoid conflicts + let mock_validator = MockValidator::new(pbs_port + validator_count as u16)?; + + // Create and send registrations + let registrations = create_validator_registrations(validator_count); + let start_time = Instant::now(); + let res = mock_validator.do_register_custom_validators(registrations).await?; + let elapsed = start_time.elapsed(); + + assert_eq!(res.status(), StatusCode::OK); + + // Verify relay received the expected number of batch calls + let calls = mock_state.received_register_validator(); + let expected_calls = (validator_count + batch_size - 1) / batch_size; // ceiling division + assert!( + calls >= expected_calls as u64, + "Expected at least {} calls for {} validators with batch size {}, got {}", + expected_calls, + validator_count, + batch_size, + calls + ); + + info!( + "Batch test completed: {} validators, batch size {}, {} calls, {:?} elapsed", + validator_count, batch_size, calls, elapsed + ); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_parallel_relay_submissions_select_ok() -> Result<()> { + setup_test_env(); + let pbs_port = 5100; + + info!("Testing parallel relay submissions with select_ok pattern"); + + // Create multiple relays + let mut relays = vec![]; + let mut mock_states = vec![]; + + for i in 0..3 { + let signer = random_secret(); + let pubkey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + let relay = generate_mock_relay(pbs_port + i + 1, pubkey)?; + relays.push(relay); + + let mock_state = Arc::new(MockRelayState::new(Chain::Holesky, signer)); + mock_states.push(mock_state.clone()); + tokio::spawn(start_mock_relay_service(mock_state, pbs_port + i + 1)); + } + + // Configure PBS with wait_all_registrations = false for select_ok behavior + let mut pbs_config = get_pbs_config(pbs_port); + pbs_config.wait_all_registrations = false; + + let config = to_pbs_config(Chain::Holesky, pbs_config, relays); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + let mock_validator = MockValidator::new(pbs_port)?; + let registrations = create_validator_registrations(50); + + let start_time = Instant::now(); + let res = mock_validator.do_register_custom_validators(registrations).await?; + let elapsed = start_time.elapsed(); + + assert_eq!(res.status(), StatusCode::OK); + + // With select_ok, we expect faster completion but still successful registration + let successful_relays = + mock_states.iter().filter(|s| s.received_register_validator() > 0).count(); + assert!(successful_relays >= 1, "At least one relay should have received registrations"); + + info!("Select OK test completed in {:?}, {} relays participated", elapsed, successful_relays); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_relay_failure_recovery_with_timeout() -> Result<()> { + setup_test_env(); + let pbs_port = 5200; + + info!("Testing relay failure recovery with timeout and retry"); + + let chain = Chain::Holesky; + let mut relays = vec![]; + + // Relay 1: Working normally + let signer1 = random_secret(); + let pubkey1 = blst_pubkey_to_alloy(&signer1.sk_to_pk()); + relays.push(generate_mock_relay(pbs_port + 1, pubkey1)?); + let mock_state1 = Arc::new(MockRelayState::new(chain, signer1)); + tokio::spawn(start_mock_relay_service(mock_state1.clone(), pbs_port + 1)); + + // Relay 2: Will return 500 errors (simulated failure) + let signer2 = random_secret(); + let pubkey2 = blst_pubkey_to_alloy(&signer2.sk_to_pk()); + relays.push(generate_mock_relay(pbs_port + 2, pubkey2)?); + let mock_state2 = Arc::new(MockRelayState::new(chain, signer2)); + mock_state2.set_response_override(StatusCode::INTERNAL_SERVER_ERROR); + tokio::spawn(start_mock_relay_service(mock_state2.clone(), pbs_port + 2)); + + // Relay 3: Not started (will timeout) + let signer3 = random_secret(); + let pubkey3 = blst_pubkey_to_alloy(&signer3.sk_to_pk()); + relays.push(generate_mock_relay(pbs_port + 3, pubkey3)?); + // Don't start this relay to simulate timeout + + let mut pbs_config = get_pbs_config(pbs_port); + pbs_config.timeout_register_validator_ms = 1000; // 1 second timeout + pbs_config.register_validator_retry_limit = 3; + + let config = to_pbs_config(chain, pbs_config, relays); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + let mock_validator = MockValidator::new(pbs_port)?; + let registrations = create_validator_registrations(10); + + let start_time = Instant::now(); + let res = mock_validator.do_register_custom_validators(registrations).await?; + let elapsed = start_time.elapsed(); + + // Should succeed because relay 1 works + assert_eq!(res.status(), StatusCode::OK); + + // Verify that relay 1 received the registrations (working relay) + assert!( + mock_state1.received_register_validator() > 0, + "Working relay should receive registrations" + ); + + // Verify that relay 2 was attempted (even though it failed) + assert!( + mock_state2.received_register_validator() > 0, + "Failed relay should have been attempted" + ); + + info!( + "Failure recovery test completed in {:?}, working relay handled {} requests", + elapsed, + mock_state1.received_register_validator() + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_timing_games_and_bid_boost() -> Result<()> { + setup_test_env(); + let pbs_port = 5300; + + info!("Testing timing games and bid boost configurations"); + + // Create relay with timing games enabled and bid boost + let signer = random_secret(); + let pubkey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + + // Create a relay with timing games configuration + let entry = RelayEntry { + id: format!("mock_{}", pbs_port + 1), + pubkey, + url: get_local_address(pbs_port + 1).parse()?, + }; + let config = RelayConfig { + entry, + id: None, + headers: None, + get_params: None, + enable_timing_games: true, + target_first_request_ms: Some(100), + frequency_get_header_ms: Some(50), + validator_registration_batch_size: None, + bid_boost: Some(1.05), // 5% boost + }; + let relay = RelayClient::new(config)?; + + let mock_state = Arc::new(MockRelayState::new(Chain::Holesky, signer)); + tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + + let config = to_pbs_config(Chain::Holesky, get_pbs_config(pbs_port), vec![relay]); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + let mock_validator = MockValidator::new(pbs_port)?; + + // Test get_header request with timing games + let start_time = Instant::now(); + let res = mock_validator.do_get_header(Some(pubkey)).await?; + let elapsed = start_time.elapsed(); + + // Should succeed and timing should reflect timing games configuration + assert!(res.status() == StatusCode::OK || res.status() == StatusCode::NO_CONTENT); + assert!(elapsed >= Duration::from_millis(50), "Timing games should introduce delay"); + + info!( + "Timing games test completed in {:?}, received {} get_header calls", + elapsed, + mock_state.received_get_header() + ); + + // Test validator registration works with timing configuration + let registrations = create_validator_registrations(5); + let res = mock_validator.do_register_custom_validators(registrations).await?; + assert_eq!(res.status(), StatusCode::OK); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_relay_failover_under_load() -> Result<()> { + setup_test_env(); + let pbs_port = 5400; + + info!("Testing relay failover under load (100 req/sec)"); + + // Setup multiple relays with different failure patterns + let mut relays = vec![]; + let mut mock_states = vec![]; + + for i in 0..3 { + let signer = random_secret(); + let pubkey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + let relay = generate_mock_relay(pbs_port + i + 1, pubkey)?; + relays.push(relay); + + let mock_state = Arc::new(MockRelayState::new(Chain::Holesky, signer)); + + // Configure failure patterns + if i == 1 { + // Relay 2 returns 503 (service unavailable) + mock_state.set_response_override(StatusCode::SERVICE_UNAVAILABLE); + } else if i == 2 { + // Relay 3 returns 429 (rate limit) + mock_state.set_response_override(StatusCode::TOO_MANY_REQUESTS); + } + + mock_states.push(mock_state.clone()); + tokio::spawn(start_mock_relay_service(mock_state, pbs_port + i + 1)); + } + + let config = to_pbs_config(Chain::Holesky, get_pbs_config(pbs_port), relays); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + // Simulate 100 req/sec for 2 seconds + let target_rps = 100; + let duration_secs = 2; + let total_requests = target_rps * duration_secs; + let request_interval = Duration::from_millis(1000 / target_rps as u64); + + let mut handles = vec![]; + let start_time = Instant::now(); + + for i in 0..total_requests { + let port = pbs_port + 2000 + i as u16; // Use unique ports for each request + let handle = tokio::spawn(async move { + let validator = MockValidator::new(port)?; + let registrations = create_validator_registrations(1); + validator.do_register_custom_validators(registrations).await + }); + handles.push(handle); + + if i < total_requests - 1 { + sleep(request_interval).await; + } + } + + // Wait for all requests to complete + let results: Vec<_> = join_all(handles).await; + let elapsed = start_time.elapsed(); + + // Count successful requests + let successful = results + .iter() + .filter(|r| { + r.as_ref() + .ok() + .and_then(|res| res.as_ref().ok()) + .map(|res| res.status() == StatusCode::OK) + .unwrap_or(false) + }) + .count(); + + let success_rate = (successful as f64 / total_requests as f64) * 100.0; + let actual_rps = total_requests as f64 / elapsed.as_secs_f64(); + + info!( + "Load test completed: {} requests in {:?}, {:.1} req/sec, {:.1}% success rate", + total_requests, elapsed, actual_rps, success_rate + ); + + // Verify failover worked (should have high success rate despite some relays failing) + assert!(success_rate >= 70.0, "Success rate should be at least 70% with failover"); + assert!(actual_rps >= 50.0, "Should achieve at least 50 req/sec"); + + // Check relay distribution + for (i, state) in mock_states.iter().enumerate() { + let calls = state.received_register_validator(); + info!("Relay {} received {} calls", i + 1, calls); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_concurrent_relay_operations() -> Result<()> { + setup_test_env(); + let pbs_port = 5500; + + info!("Testing concurrent relay operations"); + + // Setup relays + let mut relays = vec![]; + for i in 0..2 { + let signer = random_secret(); + let pubkey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + let relay = generate_mock_relay(pbs_port + i + 1, pubkey)?; + relays.push(relay.clone()); + + let mock_state = Arc::new(MockRelayState::new(Chain::Holesky, signer)); + tokio::spawn(start_mock_relay_service(mock_state, pbs_port + i + 1)); + } + + let config = to_pbs_config(Chain::Holesky, get_pbs_config(pbs_port), relays.clone()); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + // Run concurrent operations: get_header and register_validator + let mut handles = vec![]; + + // 10 get_header requests + for i in 0..10 { + let port = pbs_port + 3000 + i as u16; + let pubkey = relays[i % 2].pubkey(); + handles.push(tokio::spawn(async move { + let validator = MockValidator::new(port)?; + validator.do_get_header(Some(pubkey)).await + })); + } + + // 10 register_validator requests + for i in 0..10 { + let port = pbs_port + 3100 + i as u16; + handles.push(tokio::spawn(async move { + let validator = MockValidator::new(port)?; + let registrations = create_validator_registrations(5); + validator.do_register_custom_validators(registrations).await + })); + } + + let start_time = Instant::now(); + let results = try_join_all(handles).await?; + let elapsed = start_time.elapsed(); + + // Verify all operations completed successfully + let get_header_responses = &results[0..10]; + let register_responses = &results[10..20]; + + for response in get_header_responses { + match response { + Ok(res) => { + assert!( + res.status() == StatusCode::OK || res.status() == StatusCode::NO_CONTENT, + "Get header should return 200 or 204" + ); + } + Err(e) => { + panic!("Get header request failed: {}", e); + } + } + } + + for response in register_responses { + match response { + Ok(res) => { + assert_eq!(res.status(), StatusCode::OK, "Register validator should return 200"); + } + Err(e) => { + panic!("Register validator request failed: {}", e); + } + } + } + + info!("Concurrent operations test completed in {:?}", elapsed); + assert!(elapsed < Duration::from_secs(5), "Concurrent operations should complete quickly"); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_relay_coordination_performance_measurements() -> Result<()> { + setup_test_env(); + let pbs_port = 5600; + + info!("Testing relay coordination performance measurements"); + + // Setup multiple relays with different batch sizes + let batch_sizes = vec![10, 20, 50]; + let mut relays = vec![]; + let mut mock_states = vec![]; + + for (i, batch_size) in batch_sizes.iter().enumerate() { + let signer = random_secret(); + let pubkey = blst_pubkey_to_alloy(&signer.sk_to_pk()); + let relay = + generate_mock_relay_with_batch_size(pbs_port + i as u16 + 1, pubkey, *batch_size)?; + relays.push(relay); + + let mock_state = Arc::new(MockRelayState::new(Chain::Holesky, signer)); + mock_states.push(mock_state.clone()); + tokio::spawn(start_mock_relay_service(mock_state, pbs_port + i as u16 + 1)); + } + + let config = to_pbs_config(Chain::Holesky, get_pbs_config(pbs_port), relays); + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + sleep(Duration::from_millis(100)).await; + + let mock_validator = MockValidator::new(pbs_port)?; + + // Create batch for registration + let registrations = create_validator_registrations(200); + let res = mock_validator.do_register_custom_validators(registrations.clone()).await?; + assert_eq!(res.status(), StatusCode::OK); + + // Performance measurement 1: Sequential batching + let start_seq = Instant::now(); + for batch in registrations.chunks(20) { + let _ = mock_validator.do_register_custom_validators(batch.to_vec()).await?; + } + let seq_duration = start_seq.elapsed(); + + // Performance measurement 2: Concurrent batching (using new validators for each) + let start_concurrent = Instant::now(); + let mut handles = vec![]; + for (i, batch) in registrations.chunks(20).enumerate() { + let batch = batch.to_vec(); + let port = pbs_port + 1000 + i as u16; // Use unique ports + handles.push(tokio::spawn(async move { + let validator = MockValidator::new(port)?; + validator.do_register_custom_validators(batch).await + })); + } + let _ = try_join_all(handles).await?; + let concurrent_duration = start_concurrent.elapsed(); + + // Verify performance improvements + let speedup = seq_duration.as_secs_f64() / concurrent_duration.as_secs_f64(); + info!( + "Performance test: Sequential {:?}, Concurrent {:?}, Speedup {:.2}x", + seq_duration, concurrent_duration, speedup + ); + + assert!(speedup >= 1.5, "Concurrent batching should be at least 1.5x faster"); + + // Check relay load distribution + for (i, (state, batch_size)) in mock_states.iter().zip(batch_sizes.iter()).enumerate() { + let calls = state.received_register_validator(); + info!("Relay {} (batch size {}): {} calls", i + 1, batch_size, calls); + assert!(calls > 0, "Each relay should receive some calls"); + } + + Ok(()) +} diff --git a/tests/tests/module_integration.rs b/tests/tests/module_integration.rs new file mode 100644 index 00000000..bb584c2a --- /dev/null +++ b/tests/tests/module_integration.rs @@ -0,0 +1,514 @@ +use cb_common::{ + config::{CommitBoostConfig, StaticModuleConfig}, + types::ModuleId, +}; +use cb_tests::{conflict_matrix::ConflictMatrix, utils::get_default_config}; +use eyre::Result; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::{broadcast, RwLock}; + +/// Helper to create a test configuration with specific modules +struct TestConfig { + config: CommitBoostConfig, +} + +impl TestConfig { + fn new() -> Self { + Self { config: get_default_config() } + } + + fn with_module(mut self, id: &str, _port: u16) -> Self { + let module = StaticModuleConfig { + id: ModuleId::from(id.to_string()), + kind: cb_common::config::ModuleKind::Commit, + docker_image: "test_image".to_string(), + signing_id: Default::default(), + env: None, + env_file: None, + }; + + if self.config.modules.is_none() { + self.config.modules = Some(vec![]); + } + + self.config.modules.as_mut().unwrap().push(module); + self + } + + fn validate(self) -> Result { + // Validate using conflict matrix + let matrix = ConflictMatrix::new(); + + if let Some(ref modules) = self.config.modules { + let module_ids: Vec = modules.iter().map(|m| m.id.to_string()).collect(); + + matrix + .validate_module_list(&module_ids) + .map_err(|e| eyre::eyre!("Module validation failed: {}", e))?; + } + + Ok(self.config) + } +} + +#[test] +fn test_compatible_modules_pass_validation() { + let config = TestConfig::new() + .with_module("pbs", 8001) + .with_module("signer", 8002) + .with_module("metrics", 8003) + .validate(); + + assert!(config.is_ok(), "Compatible modules should pass validation"); +} + +#[test] +fn test_incompatible_modules_fail_validation() { + let config = TestConfig::new() + .with_module("pbs_relay_a", 8001) + .with_module("pbs_relay_b", 8002) + .validate(); + + assert!(config.is_err(), "Incompatible modules should fail validation"); + + let error_msg = config.unwrap_err().to_string(); + assert!( + error_msg.contains("pbs_relay_a") && error_msg.contains("pbs_relay_b"), + "Error should mention both conflicting modules: {}", + error_msg + ); +} + +#[test] +fn test_port_conflict_detection() { + let mut used_ports = std::collections::HashSet::new(); + + // Simulate port usage check + let ports_to_check = vec![8080, 8081, 8080]; // Duplicate port + + for port in ports_to_check { + assert!( + used_ports.insert(port) || port == 8080, // 8080 appears twice, so second insert should fail + "Port {} should be unique", + port + ); + } + + // The duplicate port (8080) should have been detected + assert_eq!(used_ports.len(), 2, "Should only have 2 unique ports"); +} + +#[test] +fn test_module_id_uniqueness_enforcement() { + // Test with duplicate module IDs + let duplicate_modules = vec![ + "test_module".to_string(), + "other_module".to_string(), + "test_module".to_string(), // Duplicate + ]; + + // This should be caught by the uniqueness check (not the conflict matrix) + let mut seen_ids = std::collections::HashSet::new(); + let mut has_duplicates = false; + + for module_id in &duplicate_modules { + if !seen_ids.insert(module_id.clone()) { + has_duplicates = true; + break; + } + } + + assert!(has_duplicates, "Duplicate module IDs should be detected"); +} + +#[test] +fn test_conflict_matrix_integration() { + let matrix = ConflictMatrix::new(); + + // Test that matrix correctly identifies compatible modules + let compatible_config = vec!["pbs".to_string(), "signer".to_string(), "metrics".to_string()]; + + assert!( + matrix.validate_module_list(&compatible_config).is_ok(), + "Known compatible modules should pass validation" + ); + + // Test that matrix correctly identifies incompatible modules + let incompatible_config = vec!["custom_pbs".to_string(), "pbs".to_string()]; + + assert!( + matrix.validate_module_list(&incompatible_config).is_err(), + "Known incompatible modules should fail validation" + ); +} + +#[test] +fn test_edge_cases() { + let matrix = ConflictMatrix::new(); + + // Test empty configuration + assert!(matrix.validate_module_list(&[]).is_ok(), "Empty module list should be valid"); + + // Test single module + let single_module = vec!["pbs".to_string()]; + assert!(matrix.validate_module_list(&single_module).is_ok(), "Single module should be valid"); + + // Test unknown modules (should default to compatible) + let unknown_modules = vec!["unknown_module_1".to_string(), "unknown_module_2".to_string()]; + assert!( + matrix.validate_module_list(&unknown_modules).is_ok(), + "Unknown modules should default to compatible" + ); +} + +#[test] +fn test_metric_namespace_separation() { + // Test that different modules use different metric prefixes + let module_metrics = + vec![("pbs", "cb_pbs_"), ("signer", "cb_signer_"), ("metrics", "cb_metrics_")]; + + // Check that no prefix is a prefix of another + for (i, (module_a, prefix_a)) in module_metrics.iter().enumerate() { + for (j, (module_b, prefix_b)) in module_metrics.iter().enumerate() { + if i != j { + assert!( + !prefix_a.starts_with(prefix_b) || prefix_a == prefix_b, + "Module '{}' prefix '{}' conflicts with module '{}' prefix '{}'", + module_a, + prefix_a, + module_b, + prefix_b + ); + } + } + } +} + +#[test] +fn test_real_world_configuration() { + // Test a realistic configuration that should work + let config = TestConfig::new() + .with_module("commit_boost_pbs", 18550) + .with_module("commit_boost_signer", 18551) + .validate(); + + assert!(config.is_ok(), "Realistic configuration should be valid: {:?}", config.err()); +} + +#[tokio::test] +async fn test_module_startup_sequence() { + // This test simulates the module startup process + // In a real implementation, this would actually start modules + + let modules = vec![("pbs", 8080), ("signer", 8081), ("metrics", 9090)]; + + // Check conflicts before "starting" + let matrix = ConflictMatrix::new(); + let module_ids: Vec = modules.iter().map(|(id, _)| id.to_string()).collect(); + + let validation_result = matrix.validate_module_list(&module_ids); + assert!( + validation_result.is_ok(), + "Module startup should pass conflict validation: {:?}", + validation_result.err() + ); + + // Simulate port binding check + let mut bound_ports = std::collections::HashSet::new(); + for (module_id, port) in modules { + assert!( + bound_ports.insert(port), + "Module '{}' cannot bind to port {} - already in use", + module_id, + port + ); + } +} + +// Configuration validation tests +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_configuration_validation_pipeline() { + // Test comprehensive configuration validation + let mut config = get_default_config(); + + // Valid configuration should pass + assert!(validate_config(&config).is_ok()); + + // Test invalid port range + config.pbs.pbs_config.port = 0; + assert!(validate_config(&config).is_err()); + config.pbs.pbs_config.port = 18550; + + // Test invalid timeout values + config.pbs.pbs_config.timeout_get_header_ms = 0; + assert!(validate_config(&config).is_err()); + config.pbs.pbs_config.timeout_get_header_ms = 12000; + + // Test conflicting modules + config.modules = Some(vec![create_test_module("pbs"), create_test_module("custom_pbs")]); + assert!(validate_config(&config).is_err()); + + // Test valid module configuration + config.modules = Some(vec![ + create_test_module("pbs"), + create_test_module("signer"), + create_test_module("metrics"), + ]); + assert!(validate_config(&config).is_ok()); +} + +fn validate_config(config: &CommitBoostConfig) -> Result<()> { + // Validate port ranges + if config.pbs.pbs_config.port == 0 { + return Err(eyre::eyre!("Invalid PBS port")); + } + + // Validate timeouts + if config.pbs.pbs_config.timeout_get_header_ms == 0 { + return Err(eyre::eyre!("Invalid timeout value")); + } + + // Validate module conflicts + if let Some(ref modules) = config.modules { + let matrix = ConflictMatrix::new(); + let module_ids: Vec = modules.iter().map(|m| m.id.to_string()).collect(); + matrix.validate_module_list(&module_ids)?; + } + + Ok(()) +} + +fn create_test_module(id: &str) -> StaticModuleConfig { + StaticModuleConfig { + id: ModuleId::from(id.to_string()), + kind: cb_common::config::ModuleKind::Commit, + docker_image: "test_image".to_string(), + signing_id: Default::default(), + env: None, + env_file: None, + } +} + +// Metrics aggregation tests +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_metrics_aggregation_contracts() { + // Test that metrics from different modules are properly aggregated + let metrics_store = Arc::new(RwLock::new(HashMap::::new())); + + // Simulate multiple modules writing metrics concurrently + let mut handles = vec![]; + + for module_id in ["pbs", "signer", "metrics"] { + let store = metrics_store.clone(); + let id = module_id.to_string(); + + handles.push(tokio::spawn(async move { + for i in 0..100 { + let mut metrics = store.write().await; + let key = format!("cb_{}_{}", id, "requests_total"); + *metrics.entry(key).or_insert(0) += 1; + + if i % 10 == 0 { + tokio::time::sleep(Duration::from_millis(1)).await; + } + } + })); + } + + // Wait for all modules to complete + for handle in handles { + handle.await.unwrap(); + } + + // Verify metrics aggregation + let metrics = metrics_store.read().await; + assert_eq!(*metrics.get("cb_pbs_requests_total").unwrap(), 100); + assert_eq!(*metrics.get("cb_signer_requests_total").unwrap(), 100); + assert_eq!(*metrics.get("cb_metrics_requests_total").unwrap(), 100); + + // Verify namespace separation + for (key, _) in metrics.iter() { + assert!(key.starts_with("cb_"), "All metrics should have cb_ prefix"); + } +} + +// Event propagation tests +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_event_propagation_between_modules() { + // Create event bus for module communication + let (tx, _) = broadcast::channel::(100); + + // Create receivers for each module + let mut pbs_rx = tx.subscribe(); + let mut signer_rx = tx.subscribe(); + let mut metrics_rx = tx.subscribe(); + + // Test event propagation + let events = vec![ + ModuleEvent::Started("pbs".to_string()), + ModuleEvent::RequestReceived("signer".to_string()), + ModuleEvent::MetricUpdated("metrics".to_string()), + ModuleEvent::Stopped("pbs".to_string()), + ]; + + // Send events + for event in &events { + tx.send(event.clone()).unwrap(); + } + + // Verify all modules receive all events + for event in &events { + assert_eq!(pbs_rx.recv().await.unwrap(), *event); + assert_eq!(signer_rx.recv().await.unwrap(), *event); + assert_eq!(metrics_rx.recv().await.unwrap(), *event); + } +} + +#[derive(Clone, Debug, PartialEq)] +enum ModuleEvent { + Started(String), + RequestReceived(String), + MetricUpdated(String), + Stopped(String), +} + +// Performance test for configuration validation +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_config_validation_performance() { + // Test that we can validate 100 configurations per second + let start = Instant::now(); + let iterations = 100; + + for _ in 0..iterations { + let config = TestConfig::new() + .with_module("pbs", 8001) + .with_module("signer", 8002) + .with_module("metrics", 8003) + .validate(); + + assert!(config.is_ok()); + } + + let elapsed = start.elapsed(); + let rate = iterations as f64 / elapsed.as_secs_f64(); + + assert!( + rate >= 100.0, + "Configuration validation too slow: {:.2} configs/sec (expected >= 100)", + rate + ); +} + +// Test module dependency resolution +#[test] +fn test_module_dependency_resolution() { + // Some modules may depend on others + let dependencies = HashMap::from([ + ("pbs", vec!["signer"]), + ("metrics", vec![]), + ("custom_module", vec!["pbs", "signer"]), + ]); + + // Test valid dependency order + let load_order = vec!["signer", "pbs", "metrics", "custom_module"]; + assert!(validate_dependencies(&load_order, &dependencies)); + + // Test invalid dependency order (pbs before signer) + let invalid_order = vec!["pbs", "signer", "metrics"]; + assert!(!validate_dependencies(&invalid_order, &dependencies)); +} + +fn validate_dependencies(load_order: &[&str], dependencies: &HashMap<&str, Vec<&str>>) -> bool { + let mut loaded = std::collections::HashSet::new(); + + for module in load_order { + if let Some(deps) = dependencies.get(module) { + for dep in deps { + if !loaded.contains(dep) { + return false; + } + } + } + loaded.insert(*module); + } + + true +} + +// Test configuration hot reloading +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_configuration_hot_reload() { + let config = Arc::new(RwLock::new(get_default_config())); + let config_clone = config.clone(); + + // Simulate configuration update + tokio::spawn(async move { + tokio::time::sleep(Duration::from_millis(50)).await; + let mut cfg = config_clone.write().await; + cfg.pbs.pbs_config.port = 19550; + }); + + // Initial port + { + let cfg = config.read().await; + assert_eq!(cfg.pbs.pbs_config.port, 18550); + } + + // Wait for update + tokio::time::sleep(Duration::from_millis(100)).await; + + // Updated port + { + let cfg = config.read().await; + assert_eq!(cfg.pbs.pbs_config.port, 19550); + } +} + +// Test metrics collection across modules +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_cross_module_metrics_collection() { + struct ModuleMetrics { + requests: Arc>, + errors: Arc>, + latency_ms: Arc>>, + } + + let pbs_metrics = ModuleMetrics { + requests: Arc::new(RwLock::new(0)), + errors: Arc::new(RwLock::new(0)), + latency_ms: Arc::new(RwLock::new(Vec::new())), + }; + + // Simulate concurrent metric updates + let mut handles = vec![]; + + for i in 0..10 { + let requests = pbs_metrics.requests.clone(); + let errors = pbs_metrics.errors.clone(); + let latency = pbs_metrics.latency_ms.clone(); + + handles.push(tokio::spawn(async move { + *requests.write().await += 10; + if i % 3 == 0 { + *errors.write().await += 1; + } + latency.write().await.push(10 + i); + })); + } + + for handle in handles { + handle.await.unwrap(); + } + + // Verify metrics + assert_eq!(*pbs_metrics.requests.read().await, 100); + assert_eq!(*pbs_metrics.errors.read().await, 4); // 0, 3, 6, 9 + assert_eq!(pbs_metrics.latency_ms.read().await.len(), 10); + + // Calculate average latency + let latencies = pbs_metrics.latency_ms.read().await; + let avg_latency = latencies.iter().sum::() as f64 / latencies.len() as f64; + assert!(avg_latency >= 10.0 && avg_latency <= 20.0); +} diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index 10f30b6a..d6805959 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -12,7 +12,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -35,7 +35,7 @@ async fn test_get_header() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), vec![mock_relay.clone()]); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -58,7 +58,7 @@ async fn test_get_header() -> Result<()> { assert_eq!(res.message.header.timestamp, timestamp_of_slot_start_sec(0, chain)); assert_eq!( res.signature, - sign_builder_root(chain, &mock_state.signer, res.message.tree_hash_root().0) + sign_builder_root(chain, &mock_state.signer, &res.message.tree_hash_root()) ); Ok(()) } @@ -81,7 +81,7 @@ async fn test_get_header_returns_204_if_relay_down() -> Result<()> { // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), vec![mock_relay.clone()]); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -113,7 +113,7 @@ async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), vec![mock_relay.clone()]); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -121,7 +121,7 @@ async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { tokio::time::sleep(Duration::from_millis(100)).await; // Create an invalid URL by truncating the pubkey - let mut bad_url = mock_relay.get_header_url(0, B256::ZERO, pubkey).unwrap(); + let mut bad_url = mock_relay.get_header_url(0, B256::ZERO, pubkey)?; bad_url.set_path(&bad_url.path().replace(&pubkey.to_string(), &pubkey.to_string()[..10])); let mock_validator = MockValidator::new(pbs_port)?; diff --git a/tests/tests/pbs_get_status.rs b/tests/tests/pbs_get_status.rs index 3e913dc5..7d773aa8 100644 --- a/tests/tests/pbs_get_status.rs +++ b/tests/tests/pbs_get_status.rs @@ -9,7 +9,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -34,7 +34,7 @@ async fn test_get_status() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_0_port)); tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_1_port)); - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays.clone()); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -43,14 +43,13 @@ async fn test_get_status() -> Result<()> { let mock_validator = MockValidator::new(pbs_port)?; info!("Sending get status"); - let res = mock_validator.do_get_status().await.expect("failed to get status"); + let res = mock_validator.do_get_status().await?; assert_eq!(res.status(), StatusCode::OK); // Expect two statuses since two relays in config assert_eq!(mock_state.received_get_status(), 2); Ok(()) } - #[tokio::test] async fn test_get_status_returns_502_if_relay_down() -> Result<()> { setup_test_env(); @@ -67,7 +66,7 @@ async fn test_get_status_returns_502_if_relay_down() -> Result<()> { // Don't start the relay // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays.clone()); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -76,7 +75,7 @@ async fn test_get_status_returns_502_if_relay_down() -> Result<()> { let mock_validator = MockValidator::new(pbs_port)?; info!("Sending get status"); - let res = mock_validator.do_get_status().await.expect("failed to get status"); + let res = mock_validator.do_get_status().await?; assert_eq!(res.status(), StatusCode::BAD_GATEWAY); // 502 error // Expect no statuses since relay is down diff --git a/tests/tests/pbs_mux.rs b/tests/tests/pbs_mux.rs index 4d830e20..84fa1a3d 100644 --- a/tests/tests/pbs_mux.rs +++ b/tests/tests/pbs_mux.rs @@ -10,7 +10,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -37,7 +37,7 @@ async fn test_mux() -> Result<()> { // Register all relays in PBS config let relays = vec![default_relay.clone()]; - let mut config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let mut config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); config.all_relays = vec![mux_relay_1.clone(), mux_relay_2.clone(), default_relay.clone()]; // Configure mux for two relays diff --git a/tests/tests/pbs_post_blinded_blocks.rs b/tests/tests/pbs_post_blinded_blocks.rs index 9e91dfa9..24b7e66b 100644 --- a/tests/tests/pbs_post_blinded_blocks.rs +++ b/tests/tests/pbs_post_blinded_blocks.rs @@ -10,7 +10,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::{Response, StatusCode}; @@ -47,7 +47,7 @@ async fn test_submit_block_too_large() -> Result<()> { let mock_state = Arc::new(MockRelayState::new(chain, signer).with_large_body()); tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -77,7 +77,7 @@ async fn submit_block_impl(pbs_port: u16, api_version: &BuilderApiVersion) -> Re tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); diff --git a/tests/tests/pbs_post_validators.rs b/tests/tests/pbs_post_validators.rs index f2480ac1..248c5d20 100644 --- a/tests/tests/pbs_post_validators.rs +++ b/tests/tests/pbs_post_validators.rs @@ -10,7 +10,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -31,7 +31,7 @@ async fn test_register_validators() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -77,7 +77,7 @@ async fn test_register_validators_returns_422_if_request_is_malformed() -> Resul tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -85,7 +85,7 @@ async fn test_register_validators_returns_422_if_request_is_malformed() -> Resul tokio::time::sleep(Duration::from_millis(100)).await; let mock_validator = MockValidator::new(pbs_port)?; - let url = mock_validator.comm_boost.register_validator_url().unwrap(); + let url = mock_validator.comm_boost.register_validator_url()?; info!("Sending register validator"); // Bad fee recipient @@ -201,7 +201,6 @@ async fn test_register_validators_returns_422_if_request_is_malformed() -> Resul assert_eq!(mock_state.received_register_validator(), 0); Ok(()) } - #[tokio::test] async fn test_register_validators_does_not_retry_on_429() -> Result<()> { setup_test_env(); @@ -220,7 +219,7 @@ async fn test_register_validators_does_not_retry_on_429() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state.clone())); @@ -272,7 +271,7 @@ async fn test_register_validators_retries_on_500() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Set retry limit to 3 - let mut pbs_config = get_pbs_static_config(pbs_port); + let mut pbs_config = get_pbs_config(pbs_port); pbs_config.register_validator_retry_limit = 3; let config = to_pbs_config(chain, pbs_config, relays); diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 90a0365f..7c1bfe84 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,30 +1,49 @@ use std::{collections::HashMap, time::Duration}; -use alloy::{hex, primitives::FixedBytes}; +use alloy::primitives::b256; use cb_common::{ - commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, - config::StartSignerConfig, - signer::{SignerLoader, ValidatorKeysFormat}, - types::{Chain, ModuleId}, + commit::constants::{GET_PUBKEYS_PATH, REVOKE_MODULE_PATH}, + config::{load_module_signing_configs, ModuleSigningConfig}, + types::ModuleId, utils::create_jwt, }; -use cb_signer::service::SigningService; -use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; +use cb_tests::{ + signer_service::{create_admin_jwt, start_server, verify_pubkeys}, + utils::{self, setup_test_env}, +}; use eyre::Result; -use reqwest::{Response, StatusCode}; +use reqwest::StatusCode; use tracing::info; const JWT_MODULE: &str = "test-module"; const JWT_SECRET: &str = "test-jwt-secret"; +const ADMIN_SECRET: &str = "test-admin-secret"; + +async fn create_mod_signing_configs() -> HashMap { + let mut cfg = + utils::get_commit_boost_config(utils::get_pbs_static_config(utils::get_pbs_config(0))); + + let module_id = ModuleId(JWT_MODULE.to_string()); + let signing_id = b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + cfg.modules = Some(vec![utils::create_module_config(module_id.clone(), signing_id)]); + + let jwts = HashMap::from([(module_id.clone(), JWT_SECRET.to_string())]); + + load_module_signing_configs(&cfg, &jwts).unwrap() +} #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server(20100).await?; + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20100, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let jwt_config = + mod_cfgs.get(&module_id).ok_or_else(|| eyre::eyre!("Module config not found"))?; // Run a pubkeys request - let jwt = create_jwt(&module_id, JWT_SECRET)?; + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; let client = reqwest::Client::new(); let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); let response = client.get(&url).bearer_auth(&jwt).send().await?; @@ -34,12 +53,12 @@ async fn test_signer_jwt_auth_success() -> Result<()> { Ok(()) } - #[tokio::test] async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server(20200).await?; + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20101, &mod_cfgs, ADMIN_SECRET.to_string()).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -59,7 +78,9 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server(20300).await?; + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20102, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let mod_cfg = mod_cfgs.get(&module_id).ok_or_else(|| eyre::eyre!("Module config not found"))?; // Run as many pubkeys requests as the fail limit let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -71,7 +92,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { } // Run another request - this should fail due to rate limiting now - let jwt = create_jwt(&module_id, JWT_SECRET)?; + let jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; let response = client.get(&url).bearer_auth(&jwt).send().await?; assert!(response.status() == StatusCode::TOO_MANY_REQUESTS); @@ -85,62 +106,74 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { Ok(()) } - -// Starts the signer moduler server on a separate task and returns its -// configuration -async fn start_server(port: u16) -> Result { +#[tokio::test] +async fn test_signer_revoked_jwt_fail() -> Result<()> { setup_test_env(); - let chain = Chain::Hoodi; - - // Mock JWT secrets + let admin_secret = ADMIN_SECRET.to_string(); let module_id = ModuleId(JWT_MODULE.to_string()); - let mut jwts = HashMap::new(); - jwts.insert(module_id.clone(), JWT_SECRET.to_string()); - - // Create a signer config - let loader = SignerLoader::ValidatorsDir { - keys_path: "data/keystores/keys".into(), - secrets_path: "data/keystores/secrets".into(), - format: ValidatorKeysFormat::Lighthouse, - }; - let mut config = get_signer_config(loader); - config.port = port; - config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing - config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing - let start_config = get_start_signer_config(config, chain, jwts); - - // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config.clone())); - - // Make sure the server is running - tokio::time::sleep(Duration::from_millis(100)).await; - if server_handle.is_finished() { - return Err(eyre::eyre!( - "Signer service failed to start: {}", - server_handle.await.unwrap_err() - )); - } - Ok(start_config) + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20400, &mod_cfgs, admin_secret.clone()).await?; + + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let admin_jwt = create_admin_jwt(admin_secret)?; + let client = reqwest::Client::new(); + + // At first, test module should be allowed to request pubkeys + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::OK); + + let revoke_url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + let response = client + .post(&revoke_url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert!(response.status() == StatusCode::OK); + + // After revoke, test module shouldn't be allowed anymore + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + + Ok(()) } -// Verifies that the pubkeys returned by the server match the pubkeys in the -// test data -async fn verify_pubkeys(response: Response) -> Result<()> { - // Verify the expected pubkeys are returned +#[tokio::test] +async fn test_signer_only_admin_can_revoke() -> Result<()> { + setup_test_env(); + let admin_secret = ADMIN_SECRET.to_string(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20500, &mod_cfgs, admin_secret.clone()).await?; + + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let admin_jwt = create_admin_jwt(admin_secret)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + + // Module JWT shouldn't be able to revoke modules + let response = client + .post(&url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&jwt) + .send() + .await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + + // Admin should be able to revoke modules + let response = client + .post(&url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&admin_jwt) + .send() + .await?; assert!(response.status() == StatusCode::OK); - let pubkey_json = response.json::().await?; - assert_eq!(pubkey_json.keys.len(), 2); - let expected_pubkeys = vec![ - FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), - FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), - ]; - for expected in expected_pubkeys { - assert!( - pubkey_json.keys.iter().any(|k| k.consensus == expected), - "Expected pubkey not found: {:?}", - expected - ); - info!("Server returned expected pubkey: {:?}", expected); - } + Ok(()) } diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs new file mode 100644 index 00000000..9bcc07e5 --- /dev/null +++ b/tests/tests/signer_request_sig.rs @@ -0,0 +1,112 @@ +use std::collections::HashMap; + +use alloy::{ + hex, + primitives::{b256, FixedBytes}, +}; +use cb_common::{ + commit::{ + constants::REQUEST_SIGNATURE_PATH, + request::{SignConsensusRequest, SignRequest}, + }, + config::{load_module_signing_configs, ModuleSigningConfig}, + types::ModuleId, + utils::create_jwt, +}; +use cb_tests::{ + signer_service::start_server, + utils::{self, setup_test_env}, +}; +use eyre::Result; +use reqwest::StatusCode; + +const MODULE_ID_1: &str = "test-module"; +const MODULE_ID_2: &str = "another-module"; +const PUBKEY_1: [u8; 48] = + hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4"); +const ADMIN_SECRET: &str = "test-admin-secret"; + +async fn create_mod_signing_configs() -> HashMap { + let mut cfg = + utils::get_commit_boost_config(utils::get_pbs_static_config(utils::get_pbs_config(0))); + + let module_id_1 = ModuleId(MODULE_ID_1.to_string()); + let signing_id_1 = b256!("0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b"); + let module_id_2 = ModuleId(MODULE_ID_2.to_string()); + let signing_id_2 = b256!("0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d"); + + cfg.modules = Some(vec![ + utils::create_module_config(module_id_1.clone(), signing_id_1), + utils::create_module_config(module_id_2.clone(), signing_id_2), + ]); + + let jwts = HashMap::from([ + (module_id_1.clone(), "supersecret".to_string()), + (module_id_2.clone(), "anothersecret".to_string()), + ]); + + load_module_signing_configs(&cfg, &jwts).unwrap() +} + +/// Makes sure the signer service signs requests correctly, using the module's +/// signing ID +#[tokio::test] +async fn test_signer_sign_request_good() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(MODULE_ID_1.to_string()); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20200, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let jwt_config = + mod_cfgs.get(&module_id).ok_or_else(|| eyre::eyre!("Module config not found"))?; + + // Send a signing request + let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); + let request = + SignRequest::Consensus(SignConsensusRequest { pubkey: FixedBytes(PUBKEY_1), object_root }); + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); + let response = client.post(&url).json(&request).bearer_auth(&jwt).send().await?; + + // Verify the response is successful + assert!(response.status() == StatusCode::OK); + + // Verify the signature is returned + let signature = response.text().await?; + assert!(!signature.is_empty(), "Signature should not be empty"); + + let expected_signature = "\"0xa43e623f009e615faa3987368f64d6286a4103de70e9a81d82562c50c91eae2d5d6fb9db9fe943aa8ee42fd92d8210c1149f25ed6aa72a557d74a0ed5646fdd0e8255ec58e3e2931695fe913863ba0cdf90d29f651bce0a34169a6f6ce5b3115\""; + assert_eq!(signature, expected_signature, "Signature does not match expected value"); + + Ok(()) +} +#[tokio::test] +async fn test_signer_sign_request_different_module() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(MODULE_ID_2.to_string()); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20201, &mod_cfgs, ADMIN_SECRET.to_string()).await?; + let jwt_config = + mod_cfgs.get(&module_id).ok_or_else(|| eyre::eyre!("Module config not found"))?; + + // Send a signing request + let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); + let request = + SignRequest::Consensus(SignConsensusRequest { pubkey: FixedBytes(PUBKEY_1), object_root }); + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); + let response = client.post(&url).json(&request).bearer_auth(&jwt).send().await?; + + // Verify the response is successful + assert!(response.status() == StatusCode::OK); + + // Verify the signature is returned + let signature = response.text().await?; + assert!(!signature.is_empty(), "Signature should not be empty"); + + let incorrect_signature = "\"0xa43e623f009e615faa3987368f64d6286a4103de70e9a81d82562c50c91eae2d5d6fb9db9fe943aa8ee42fd92d8210c1149f25ed6aa72a557d74a0ed5646fdd0e8255ec58e3e2931695fe913863ba0cdf90d29f651bce0a34169a6f6ce5b3115\""; + assert_ne!(signature, incorrect_signature, "Signature does not match expected value"); + + Ok(()) +}